ZTWHHH commited on
Commit
2e717e6
·
verified ·
1 Parent(s): d26d294

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h +30 -0
  3. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad.h +39 -0
  4. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h +25 -0
  5. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/ger.h +39 -0
  6. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_native.h +25 -0
  7. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h +83 -0
  8. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h +25 -0
  9. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cpu_dispatch.h +25 -0
  10. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor_native.h +21 -0
  11. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_native.h +36 -0
  12. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad3d_native.h +26 -0
  13. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
  14. videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h +26 -0
  15. vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-310.pyc +0 -0
  16. vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc +0 -0
  17. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__init__.py +0 -0
  18. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc +0 -0
  19. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/_async_client.py +0 -0
  20. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py +187 -0
  21. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc +0 -0
  22. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc +0 -0
  23. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc +0 -0
  24. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc +0 -0
  25. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_speech.cpython-310.pyc +0 -0
  26. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_video.cpython-310.pyc +0 -0
  27. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc +0 -0
  28. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc +0 -0
  29. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc +0 -0
  30. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
  31. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py +30 -0
  32. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py +161 -0
  33. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py +301 -0
  34. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py +28 -0
  35. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py +80 -0
  36. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py +36 -0
  37. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py +43 -0
  38. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py +51 -0
  39. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py +54 -0
  40. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py +101 -0
  41. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py +74 -0
  42. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py +41 -0
  43. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py +62 -0
  44. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py +42 -0
  45. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py +41 -0
  46. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py +168 -0
  47. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py +100 -0
  48. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_speech.py +100 -0
  49. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_video.py +46 -0
  50. vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/video_classification.py +45 -0
.gitattributes CHANGED
@@ -1430,3 +1430,4 @@ parrot/lib/python3.10/site-packages/fontTools/pens/momentsPen.cpython-310-x86_64
1430
  parrot/lib/python3.10/site-packages/opencv_python.libs/libQt5Test-d435aae7.so.5.15.13 filter=lfs diff=lfs merge=lfs -text
1431
  vllm/lib/python3.10/site-packages/wandb/vendor/pynvml/__pycache__/pynvml.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1432
  vllm/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1430
  parrot/lib/python3.10/site-packages/opencv_python.libs/libQt5Test-d435aae7.so.5.15.13 filter=lfs diff=lfs merge=lfs -text
1431
  vllm/lib/python3.10/site-packages/wandb/vendor/pynvml/__pycache__/pynvml.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1432
  vllm/lib/python3.10/site-packages/wandb/vendor/pygments/lexers/__pycache__/_php_builtins.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1433
+ vllm/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_grid_sampler_2d_cpu_fallback_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
27
+ return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
28
+ }
29
+
30
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_standard_gamma_grad_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor
26
+ inline at::Tensor _standard_gamma_grad(const at::Tensor & self, const at::Tensor & output) {
27
+ return at::_ops::_standard_gamma_grad::call(self, output);
28
+ }
29
+
30
+ // aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & _standard_gamma_grad_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & output) {
32
+ return at::_ops::_standard_gamma_grad_out::call(self, output, out);
33
+ }
34
+ // aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & _standard_gamma_grad_outf(const at::Tensor & self, const at::Tensor & output, at::Tensor & out) {
36
+ return at::_ops::_standard_gamma_grad_out::call(self, output, out);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices);
21
+ TORCH_API at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices);
22
+ TORCH_API at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/ger.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/ger_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::ger(Tensor self, Tensor vec2) -> Tensor
26
+ inline at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2) {
27
+ return at::_ops::ger::call(self, vec2);
28
+ }
29
+
30
+ // aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & ger_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) {
32
+ return at::_ops::ger_out::call(self, vec2, out);
33
+ }
34
+ // aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & ger_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) {
36
+ return at::_ops::ger_out::call(self, vec2, out);
37
+ }
38
+
39
+ }
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_native.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/index_copy_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_index_copy_out : public at::meta::structured_index_copy {
20
+ void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Tensor & out);
21
+ };
22
+ TORCH_API at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source);
23
+ TORCH_API at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source);
24
+ } // namespace native
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API lerp__Scalar {
18
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp_")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)")
24
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight);
26
+ };
27
+
28
+ struct TORCH_API lerp__Tensor {
29
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight);
37
+ };
38
+
39
+ struct TORCH_API lerp_Scalar_out {
40
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out);
48
+ };
49
+
50
+ struct TORCH_API lerp_Tensor_out {
51
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp")
55
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
56
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)")
57
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out);
58
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out);
59
+ };
60
+
61
+ struct TORCH_API lerp_Scalar {
62
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp")
66
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
67
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor")
68
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight);
69
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight);
70
+ };
71
+
72
+ struct TORCH_API lerp_Tensor {
73
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp")
77
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
78
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor")
79
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight);
80
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight);
81
+ };
82
+
83
+ }} // namespace at::_ops
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other);
21
+ TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
22
+ TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
21
+ TORCH_API at::Tensor & multi_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const c10::optional<at::Tensor> & weight={}, int64_t reduction=at::Reduction::Mean);
22
+ TORCH_API at::Tensor & multi_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction, at::Tensor & out);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt);
20
+ } // namespace native
21
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_native.h ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/norm_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype);
20
+ TORCH_API at::Tensor & norm_ScalarOpt_dtype_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype, at::Tensor & out);
21
+ TORCH_API at::Tensor norm(const at::Tensor & self, const at::Scalar & p=2);
22
+ TORCH_API at::Tensor & norm_Scalar_out(const at::Tensor & self, const at::Scalar & p, at::Tensor & out);
23
+ struct TORCH_API structured_norm_dtype_out : public at::meta::structured_norm_ScalarOpt_dim_dtype {
24
+ void impl(const at::Tensor & self, at::OptionalScalarRef p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, const at::Tensor & out);
25
+ };
26
+ TORCH_API at::Tensor sparse_dtype_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype);
27
+ struct TORCH_API structured_norm_out : public at::meta::structured_norm_ScalarOpt_dim {
28
+ void impl(const at::Tensor & self, at::OptionalScalarRef p, at::IntArrayRef dim, bool keepdim, const at::Tensor & out);
29
+ };
30
+ TORCH_API at::Tensor sparse_norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim=false);
31
+ TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype);
32
+ TORCH_API at::Tensor & norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out);
33
+ TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim=false);
34
+ TORCH_API at::Tensor & norm_out(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::Tensor & out);
35
+ } // namespace native
36
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad3d_native.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/replication_pad3d_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_replication_pad3d_out_cpu : public at::meta::structured_replication_pad3d {
20
+ void impl(const at::Tensor & self, at::ArrayRef<int64_t> padding, const at::Tensor & out);
21
+ };
22
+ struct TORCH_API structured_replication_pad3d_out_cuda : public at::meta::structured_replication_pad3d {
23
+ void impl(const at::Tensor & self, at::ArrayRef<int64_t> padding, const at::Tensor & out);
24
+ };
25
+ } // namespace native
26
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautogradnonfunctional {
19
+
20
+ TORCH_API at::Tensor special_zeta(const at::Tensor & self, const at::Tensor & other);
21
+
22
+ } // namespace compositeexplicitautogradnonfunctional
23
+ } // namespace at
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step);
21
+ TORCH_API at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out);
22
+ TORCH_API at::Tensor & unfold_backward_symint_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step);
23
+ TORCH_API at::Tensor & unfold_backward_symint_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out);
24
+
25
+ } // namespace compositeexplicitautograd
26
+ } // namespace at
vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__init__.py ADDED
File without changes
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/_async_client.py ADDED
The diff for this file is too large to render. See raw diff
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is auto-generated by `utils/generate_inference_types.py`.
2
+ # Do not modify it manually.
3
+ #
4
+ # ruff: noqa: F401
5
+
6
+ from .audio_classification import (
7
+ AudioClassificationInput,
8
+ AudioClassificationOutputElement,
9
+ AudioClassificationOutputTransform,
10
+ AudioClassificationParameters,
11
+ )
12
+ from .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement
13
+ from .automatic_speech_recognition import (
14
+ AutomaticSpeechRecognitionEarlyStoppingEnum,
15
+ AutomaticSpeechRecognitionGenerationParameters,
16
+ AutomaticSpeechRecognitionInput,
17
+ AutomaticSpeechRecognitionOutput,
18
+ AutomaticSpeechRecognitionOutputChunk,
19
+ AutomaticSpeechRecognitionParameters,
20
+ )
21
+ from .base import BaseInferenceType
22
+ from .chat_completion import (
23
+ ChatCompletionInput,
24
+ ChatCompletionInputFunctionDefinition,
25
+ ChatCompletionInputFunctionName,
26
+ ChatCompletionInputGrammarType,
27
+ ChatCompletionInputGrammarTypeType,
28
+ ChatCompletionInputMessage,
29
+ ChatCompletionInputMessageChunk,
30
+ ChatCompletionInputMessageChunkType,
31
+ ChatCompletionInputStreamOptions,
32
+ ChatCompletionInputTool,
33
+ ChatCompletionInputToolChoiceClass,
34
+ ChatCompletionInputToolChoiceEnum,
35
+ ChatCompletionInputURL,
36
+ ChatCompletionOutput,
37
+ ChatCompletionOutputComplete,
38
+ ChatCompletionOutputFunctionDefinition,
39
+ ChatCompletionOutputLogprob,
40
+ ChatCompletionOutputLogprobs,
41
+ ChatCompletionOutputMessage,
42
+ ChatCompletionOutputToolCall,
43
+ ChatCompletionOutputTopLogprob,
44
+ ChatCompletionOutputUsage,
45
+ ChatCompletionStreamOutput,
46
+ ChatCompletionStreamOutputChoice,
47
+ ChatCompletionStreamOutputDelta,
48
+ ChatCompletionStreamOutputDeltaToolCall,
49
+ ChatCompletionStreamOutputFunction,
50
+ ChatCompletionStreamOutputLogprob,
51
+ ChatCompletionStreamOutputLogprobs,
52
+ ChatCompletionStreamOutputTopLogprob,
53
+ ChatCompletionStreamOutputUsage,
54
+ )
55
+ from .depth_estimation import DepthEstimationInput, DepthEstimationOutput
56
+ from .document_question_answering import (
57
+ DocumentQuestionAnsweringInput,
58
+ DocumentQuestionAnsweringInputData,
59
+ DocumentQuestionAnsweringOutputElement,
60
+ DocumentQuestionAnsweringParameters,
61
+ )
62
+ from .feature_extraction import FeatureExtractionInput, FeatureExtractionInputTruncationDirection
63
+ from .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters
64
+ from .image_classification import (
65
+ ImageClassificationInput,
66
+ ImageClassificationOutputElement,
67
+ ImageClassificationOutputTransform,
68
+ ImageClassificationParameters,
69
+ )
70
+ from .image_segmentation import (
71
+ ImageSegmentationInput,
72
+ ImageSegmentationOutputElement,
73
+ ImageSegmentationParameters,
74
+ ImageSegmentationSubtask,
75
+ )
76
+ from .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize
77
+ from .image_to_text import (
78
+ ImageToTextEarlyStoppingEnum,
79
+ ImageToTextGenerationParameters,
80
+ ImageToTextInput,
81
+ ImageToTextOutput,
82
+ ImageToTextParameters,
83
+ )
84
+ from .object_detection import (
85
+ ObjectDetectionBoundingBox,
86
+ ObjectDetectionInput,
87
+ ObjectDetectionOutputElement,
88
+ ObjectDetectionParameters,
89
+ )
90
+ from .question_answering import (
91
+ QuestionAnsweringInput,
92
+ QuestionAnsweringInputData,
93
+ QuestionAnsweringOutputElement,
94
+ QuestionAnsweringParameters,
95
+ )
96
+ from .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData
97
+ from .summarization import (
98
+ SummarizationInput,
99
+ SummarizationOutput,
100
+ SummarizationParameters,
101
+ SummarizationTruncationStrategy,
102
+ )
103
+ from .table_question_answering import (
104
+ Padding,
105
+ TableQuestionAnsweringInput,
106
+ TableQuestionAnsweringInputData,
107
+ TableQuestionAnsweringOutputElement,
108
+ TableQuestionAnsweringParameters,
109
+ )
110
+ from .text2text_generation import (
111
+ Text2TextGenerationInput,
112
+ Text2TextGenerationOutput,
113
+ Text2TextGenerationParameters,
114
+ Text2TextGenerationTruncationStrategy,
115
+ )
116
+ from .text_classification import (
117
+ TextClassificationInput,
118
+ TextClassificationOutputElement,
119
+ TextClassificationOutputTransform,
120
+ TextClassificationParameters,
121
+ )
122
+ from .text_generation import (
123
+ TextGenerationInput,
124
+ TextGenerationInputGenerateParameters,
125
+ TextGenerationInputGrammarType,
126
+ TextGenerationOutput,
127
+ TextGenerationOutputBestOfSequence,
128
+ TextGenerationOutputDetails,
129
+ TextGenerationOutputFinishReason,
130
+ TextGenerationOutputPrefillToken,
131
+ TextGenerationOutputToken,
132
+ TextGenerationStreamOutput,
133
+ TextGenerationStreamOutputStreamDetails,
134
+ TextGenerationStreamOutputToken,
135
+ TypeEnum,
136
+ )
137
+ from .text_to_audio import (
138
+ TextToAudioEarlyStoppingEnum,
139
+ TextToAudioGenerationParameters,
140
+ TextToAudioInput,
141
+ TextToAudioOutput,
142
+ TextToAudioParameters,
143
+ )
144
+ from .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters
145
+ from .text_to_speech import (
146
+ TextToSpeechEarlyStoppingEnum,
147
+ TextToSpeechGenerationParameters,
148
+ TextToSpeechInput,
149
+ TextToSpeechOutput,
150
+ TextToSpeechParameters,
151
+ )
152
+ from .text_to_video import TextToVideoInput, TextToVideoOutput, TextToVideoParameters
153
+ from .token_classification import (
154
+ TokenClassificationAggregationStrategy,
155
+ TokenClassificationInput,
156
+ TokenClassificationOutputElement,
157
+ TokenClassificationParameters,
158
+ )
159
+ from .translation import TranslationInput, TranslationOutput, TranslationParameters, TranslationTruncationStrategy
160
+ from .video_classification import (
161
+ VideoClassificationInput,
162
+ VideoClassificationOutputElement,
163
+ VideoClassificationOutputTransform,
164
+ VideoClassificationParameters,
165
+ )
166
+ from .visual_question_answering import (
167
+ VisualQuestionAnsweringInput,
168
+ VisualQuestionAnsweringInputData,
169
+ VisualQuestionAnsweringOutputElement,
170
+ VisualQuestionAnsweringParameters,
171
+ )
172
+ from .zero_shot_classification import (
173
+ ZeroShotClassificationInput,
174
+ ZeroShotClassificationOutputElement,
175
+ ZeroShotClassificationParameters,
176
+ )
177
+ from .zero_shot_image_classification import (
178
+ ZeroShotImageClassificationInput,
179
+ ZeroShotImageClassificationOutputElement,
180
+ ZeroShotImageClassificationParameters,
181
+ )
182
+ from .zero_shot_object_detection import (
183
+ ZeroShotObjectDetectionBoundingBox,
184
+ ZeroShotObjectDetectionInput,
185
+ ZeroShotObjectDetectionOutputElement,
186
+ ZeroShotObjectDetectionParameters,
187
+ )
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-310.pyc ADDED
Binary file (934 Bytes). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-310.pyc ADDED
Binary file (1.82 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_speech.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_video.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ AudioClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class AudioClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Audio Classification"""
17
+
18
+ function_to_apply: Optional["AudioClassificationOutputTransform"] = None
19
+ """The function to apply to the model outputs in order to retrieve the scores."""
20
+ top_k: Optional[int] = None
21
+ """When specified, limits the output to the top K most probable classes."""
22
+
23
+
24
+ @dataclass_with_extra
25
+ class AudioClassificationInput(BaseInferenceType):
26
+ """Inputs for Audio Classification inference"""
27
+
28
+ inputs: str
29
+ """The input audio data as a base64-encoded string. If no `parameters` are provided, you can
30
+ also provide the audio data as a raw bytes payload.
31
+ """
32
+ parameters: Optional[AudioClassificationParameters] = None
33
+ """Additional inference parameters for Audio Classification"""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class AudioClassificationOutputElement(BaseInferenceType):
38
+ """Outputs for Audio Classification inference"""
39
+
40
+ label: str
41
+ """The predicted class label."""
42
+ score: float
43
+ """The corresponding probability."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class AudioToAudioInput(BaseInferenceType):
13
+ """Inputs for Audio to Audio inference"""
14
+
15
+ inputs: Any
16
+ """The input audio data"""
17
+
18
+
19
+ @dataclass_with_extra
20
+ class AudioToAudioOutputElement(BaseInferenceType):
21
+ """Outputs of inference for the Audio To Audio task
22
+ A generated audio file with its label.
23
+ """
24
+
25
+ blob: Any
26
+ """The generated audio file."""
27
+ content_type: str
28
+ """The content type of audio file."""
29
+ label: str
30
+ """The label of the audio file."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains a base class for all inference types."""
15
+
16
+ import inspect
17
+ import json
18
+ from dataclasses import asdict, dataclass
19
+ from typing import Any, Dict, List, Type, TypeVar, Union, get_args
20
+
21
+
22
+ T = TypeVar("T", bound="BaseInferenceType")
23
+
24
+
25
+ def _repr_with_extra(self):
26
+ fields = list(self.__dataclass_fields__.keys())
27
+ other_fields = list(k for k in self.__dict__ if k not in fields)
28
+ return f"{self.__class__.__name__}({', '.join(f'{k}={self.__dict__[k]!r}' for k in fields + other_fields)})"
29
+
30
+
31
+ def dataclass_with_extra(cls: Type[T]) -> Type[T]:
32
+ """Decorator to add a custom __repr__ method to a dataclass, showing all fields, including extra ones.
33
+
34
+ This decorator only works with dataclasses that inherit from `BaseInferenceType`.
35
+ """
36
+ cls = dataclass(cls)
37
+ cls.__repr__ = _repr_with_extra # type: ignore[method-assign]
38
+ return cls
39
+
40
+
41
+ @dataclass
42
+ class BaseInferenceType(dict):
43
+ """Base class for all inference types.
44
+
45
+ Object is a dataclass and a dict for backward compatibility but plan is to remove the dict part in the future.
46
+
47
+ Handle parsing from dict, list and json strings in a permissive way to ensure future-compatibility (e.g. all fields
48
+ are made optional, and non-expected fields are added as dict attributes).
49
+ """
50
+
51
+ @classmethod
52
+ def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]:
53
+ """Alias to parse server response and return a single instance.
54
+
55
+ See `parse_obj` for more details.
56
+ """
57
+ output = cls.parse_obj(data)
58
+ if not isinstance(output, list):
59
+ raise ValueError(f"Invalid input data for {cls}. Expected a list, but got {type(output)}.")
60
+ return output
61
+
62
+ @classmethod
63
+ def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T:
64
+ """Alias to parse server response and return a single instance.
65
+
66
+ See `parse_obj` for more details.
67
+ """
68
+ output = cls.parse_obj(data)
69
+ if isinstance(output, list):
70
+ raise ValueError(f"Invalid input data for {cls}. Expected a single instance, but got a list.")
71
+ return output
72
+
73
+ @classmethod
74
+ def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]:
75
+ """Parse server response as a dataclass or list of dataclasses.
76
+
77
+ To enable future-compatibility, we want to handle cases where the server return more fields than expected.
78
+ In such cases, we don't want to raise an error but still create the dataclass object. Remaining fields are
79
+ added as dict attributes.
80
+ """
81
+ # Parse server response (from bytes)
82
+ if isinstance(data, bytes):
83
+ data = data.decode()
84
+ if isinstance(data, str):
85
+ data = json.loads(data)
86
+
87
+ # If a list, parse each item individually
88
+ if isinstance(data, List):
89
+ return [cls.parse_obj(d) for d in data] # type: ignore [misc]
90
+
91
+ # At this point, we expect a dict
92
+ if not isinstance(data, dict):
93
+ raise ValueError(f"Invalid data type: {type(data)}")
94
+
95
+ init_values = {}
96
+ other_values = {}
97
+ for key, value in data.items():
98
+ key = normalize_key(key)
99
+ if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init:
100
+ if isinstance(value, dict) or isinstance(value, list):
101
+ field_type = cls.__dataclass_fields__[key].type
102
+
103
+ # if `field_type` is a `BaseInferenceType`, parse it
104
+ if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType):
105
+ value = field_type.parse_obj(value)
106
+
107
+ # otherwise, recursively parse nested dataclasses (if possible)
108
+ # `get_args` returns handle Union and Optional for us
109
+ else:
110
+ expected_types = get_args(field_type)
111
+ for expected_type in expected_types:
112
+ if getattr(expected_type, "_name", None) == "List":
113
+ expected_type = get_args(expected_type)[
114
+ 0
115
+ ] # assume same type for all items in the list
116
+ if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType):
117
+ value = expected_type.parse_obj(value)
118
+ break
119
+ init_values[key] = value
120
+ else:
121
+ other_values[key] = value
122
+
123
+ # Make all missing fields default to None
124
+ # => ensure that dataclass initialization will never fail even if the server does not return all fields.
125
+ for key in cls.__dataclass_fields__:
126
+ if key not in init_values:
127
+ init_values[key] = None
128
+
129
+ # Initialize dataclass with expected values
130
+ item = cls(**init_values)
131
+
132
+ # Add remaining fields as dict attributes
133
+ item.update(other_values)
134
+
135
+ # Add remaining fields as extra dataclass fields.
136
+ # They won't be part of the dataclass fields but will be accessible as attributes.
137
+ # Use @dataclass_with_extra to show them in __repr__.
138
+ item.__dict__.update(other_values)
139
+ return item
140
+
141
+ def __post_init__(self):
142
+ self.update(asdict(self))
143
+
144
+ def __setitem__(self, __key: Any, __value: Any) -> None:
145
+ # Hacky way to keep dataclass values in sync when dict is updated
146
+ super().__setitem__(__key, __value)
147
+ if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value:
148
+ self.__setattr__(__key, __value)
149
+ return
150
+
151
+ def __setattr__(self, __name: str, __value: Any) -> None:
152
+ # Hacky way to keep dict values is sync when dataclass is updated
153
+ super().__setattr__(__name, __value)
154
+ if self.get(__name) != __value:
155
+ self[__name] = __value
156
+ return
157
+
158
+
159
+ def normalize_key(key: str) -> str:
160
+ # e.g "content-type" -> "content_type", "Accept" -> "accept"
161
+ return key.replace("-", "_").replace(" ", "_").lower()
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, List, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ChatCompletionInputURL(BaseInferenceType):
13
+ url: str
14
+
15
+
16
+ ChatCompletionInputMessageChunkType = Literal["text", "image_url"]
17
+
18
+
19
+ @dataclass_with_extra
20
+ class ChatCompletionInputMessageChunk(BaseInferenceType):
21
+ type: "ChatCompletionInputMessageChunkType"
22
+ image_url: Optional[ChatCompletionInputURL] = None
23
+ text: Optional[str] = None
24
+
25
+
26
+ @dataclass_with_extra
27
+ class ChatCompletionInputMessage(BaseInferenceType):
28
+ content: Union[List[ChatCompletionInputMessageChunk], str]
29
+ role: str
30
+ name: Optional[str] = None
31
+
32
+
33
+ ChatCompletionInputGrammarTypeType = Literal["json", "regex"]
34
+
35
+
36
+ @dataclass_with_extra
37
+ class ChatCompletionInputGrammarType(BaseInferenceType):
38
+ type: "ChatCompletionInputGrammarTypeType"
39
+ value: Any
40
+ """A string that represents a [JSON Schema](https://json-schema.org/).
41
+ JSON Schema is a declarative language that allows to annotate JSON documents
42
+ with types and descriptions.
43
+ """
44
+
45
+
46
+ @dataclass_with_extra
47
+ class ChatCompletionInputStreamOptions(BaseInferenceType):
48
+ include_usage: bool
49
+ """If set, an additional chunk will be streamed before the data: [DONE] message. The usage
50
+ field on this chunk shows the token usage statistics for the entire request, and the
51
+ choices field will always be an empty array. All other chunks will also include a usage
52
+ field, but with a null value.
53
+ """
54
+
55
+
56
+ @dataclass_with_extra
57
+ class ChatCompletionInputFunctionName(BaseInferenceType):
58
+ name: str
59
+
60
+
61
+ @dataclass_with_extra
62
+ class ChatCompletionInputToolChoiceClass(BaseInferenceType):
63
+ function: ChatCompletionInputFunctionName
64
+
65
+
66
+ ChatCompletionInputToolChoiceEnum = Literal["auto", "none", "required"]
67
+
68
+
69
+ @dataclass_with_extra
70
+ class ChatCompletionInputFunctionDefinition(BaseInferenceType):
71
+ arguments: Any
72
+ name: str
73
+ description: Optional[str] = None
74
+
75
+
76
+ @dataclass_with_extra
77
+ class ChatCompletionInputTool(BaseInferenceType):
78
+ function: ChatCompletionInputFunctionDefinition
79
+ type: str
80
+
81
+
82
+ @dataclass_with_extra
83
+ class ChatCompletionInput(BaseInferenceType):
84
+ """Chat Completion Input.
85
+ Auto-generated from TGI specs.
86
+ For more details, check out
87
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
88
+ """
89
+
90
+ messages: List[ChatCompletionInputMessage]
91
+ """A list of messages comprising the conversation so far."""
92
+ frequency_penalty: Optional[float] = None
93
+ """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
94
+ frequency in the text so far,
95
+ decreasing the model's likelihood to repeat the same line verbatim.
96
+ """
97
+ logit_bias: Optional[List[float]] = None
98
+ """UNUSED
99
+ Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON
100
+ object that maps tokens
101
+ (specified by their token ID in the tokenizer) to an associated bias value from -100 to
102
+ 100. Mathematically,
103
+ the bias is added to the logits generated by the model prior to sampling. The exact
104
+ effect will vary per model,
105
+ but values between -1 and 1 should decrease or increase likelihood of selection; values
106
+ like -100 or 100 should
107
+ result in a ban or exclusive selection of the relevant token.
108
+ """
109
+ logprobs: Optional[bool] = None
110
+ """Whether to return log probabilities of the output tokens or not. If true, returns the log
111
+ probabilities of each
112
+ output token returned in the content of message.
113
+ """
114
+ max_tokens: Optional[int] = None
115
+ """The maximum number of tokens that can be generated in the chat completion."""
116
+ model: Optional[str] = None
117
+ """[UNUSED] ID of the model to use. See the model endpoint compatibility table for details
118
+ on which models work with the Chat API.
119
+ """
120
+ n: Optional[int] = None
121
+ """UNUSED
122
+ How many chat completion choices to generate for each input message. Note that you will
123
+ be charged based on the
124
+ number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
125
+ """
126
+ presence_penalty: Optional[float] = None
127
+ """Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they
128
+ appear in the text so far,
129
+ increasing the model's likelihood to talk about new topics
130
+ """
131
+ response_format: Optional[ChatCompletionInputGrammarType] = None
132
+ seed: Optional[int] = None
133
+ stop: Optional[List[str]] = None
134
+ """Up to 4 sequences where the API will stop generating further tokens."""
135
+ stream: Optional[bool] = None
136
+ stream_options: Optional[ChatCompletionInputStreamOptions] = None
137
+ temperature: Optional[float] = None
138
+ """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the
139
+ output more random, while
140
+ lower values like 0.2 will make it more focused and deterministic.
141
+ We generally recommend altering this or `top_p` but not both.
142
+ """
143
+ tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None
144
+ tool_prompt: Optional[str] = None
145
+ """A prompt to be appended before the tools"""
146
+ tools: Optional[List[ChatCompletionInputTool]] = None
147
+ """A list of tools the model may call. Currently, only functions are supported as a tool.
148
+ Use this to provide a list of
149
+ functions the model may generate JSON inputs for.
150
+ """
151
+ top_logprobs: Optional[int] = None
152
+ """An integer between 0 and 5 specifying the number of most likely tokens to return at each
153
+ token position, each with
154
+ an associated log probability. logprobs must be set to true if this parameter is used.
155
+ """
156
+ top_p: Optional[float] = None
157
+ """An alternative to sampling with temperature, called nucleus sampling, where the model
158
+ considers the results of the
159
+ tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%
160
+ probability mass are considered.
161
+ """
162
+
163
+
164
+ @dataclass_with_extra
165
+ class ChatCompletionOutputTopLogprob(BaseInferenceType):
166
+ logprob: float
167
+ token: str
168
+
169
+
170
+ @dataclass_with_extra
171
+ class ChatCompletionOutputLogprob(BaseInferenceType):
172
+ logprob: float
173
+ token: str
174
+ top_logprobs: List[ChatCompletionOutputTopLogprob]
175
+
176
+
177
+ @dataclass_with_extra
178
+ class ChatCompletionOutputLogprobs(BaseInferenceType):
179
+ content: List[ChatCompletionOutputLogprob]
180
+
181
+
182
+ @dataclass_with_extra
183
+ class ChatCompletionOutputFunctionDefinition(BaseInferenceType):
184
+ arguments: Any
185
+ name: str
186
+ description: Optional[str] = None
187
+
188
+
189
+ @dataclass_with_extra
190
+ class ChatCompletionOutputToolCall(BaseInferenceType):
191
+ function: ChatCompletionOutputFunctionDefinition
192
+ id: str
193
+ type: str
194
+
195
+
196
+ @dataclass_with_extra
197
+ class ChatCompletionOutputMessage(BaseInferenceType):
198
+ role: str
199
+ content: Optional[str] = None
200
+ tool_calls: Optional[List[ChatCompletionOutputToolCall]] = None
201
+
202
+
203
+ @dataclass_with_extra
204
+ class ChatCompletionOutputComplete(BaseInferenceType):
205
+ finish_reason: str
206
+ index: int
207
+ message: ChatCompletionOutputMessage
208
+ logprobs: Optional[ChatCompletionOutputLogprobs] = None
209
+
210
+
211
+ @dataclass_with_extra
212
+ class ChatCompletionOutputUsage(BaseInferenceType):
213
+ completion_tokens: int
214
+ prompt_tokens: int
215
+ total_tokens: int
216
+
217
+
218
+ @dataclass_with_extra
219
+ class ChatCompletionOutput(BaseInferenceType):
220
+ """Chat Completion Output.
221
+ Auto-generated from TGI specs.
222
+ For more details, check out
223
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
224
+ """
225
+
226
+ choices: List[ChatCompletionOutputComplete]
227
+ created: int
228
+ id: str
229
+ model: str
230
+ system_fingerprint: str
231
+ usage: ChatCompletionOutputUsage
232
+
233
+
234
+ @dataclass_with_extra
235
+ class ChatCompletionStreamOutputFunction(BaseInferenceType):
236
+ arguments: str
237
+ name: Optional[str] = None
238
+
239
+
240
+ @dataclass_with_extra
241
+ class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType):
242
+ function: ChatCompletionStreamOutputFunction
243
+ id: str
244
+ index: int
245
+ type: str
246
+
247
+
248
+ @dataclass_with_extra
249
+ class ChatCompletionStreamOutputDelta(BaseInferenceType):
250
+ role: str
251
+ content: Optional[str] = None
252
+ tool_calls: Optional[ChatCompletionStreamOutputDeltaToolCall] = None
253
+
254
+
255
+ @dataclass_with_extra
256
+ class ChatCompletionStreamOutputTopLogprob(BaseInferenceType):
257
+ logprob: float
258
+ token: str
259
+
260
+
261
+ @dataclass_with_extra
262
+ class ChatCompletionStreamOutputLogprob(BaseInferenceType):
263
+ logprob: float
264
+ token: str
265
+ top_logprobs: List[ChatCompletionStreamOutputTopLogprob]
266
+
267
+
268
+ @dataclass_with_extra
269
+ class ChatCompletionStreamOutputLogprobs(BaseInferenceType):
270
+ content: List[ChatCompletionStreamOutputLogprob]
271
+
272
+
273
+ @dataclass_with_extra
274
+ class ChatCompletionStreamOutputChoice(BaseInferenceType):
275
+ delta: ChatCompletionStreamOutputDelta
276
+ index: int
277
+ finish_reason: Optional[str] = None
278
+ logprobs: Optional[ChatCompletionStreamOutputLogprobs] = None
279
+
280
+
281
+ @dataclass_with_extra
282
+ class ChatCompletionStreamOutputUsage(BaseInferenceType):
283
+ completion_tokens: int
284
+ prompt_tokens: int
285
+ total_tokens: int
286
+
287
+
288
+ @dataclass_with_extra
289
+ class ChatCompletionStreamOutput(BaseInferenceType):
290
+ """Chat Completion Stream Output.
291
+ Auto-generated from TGI specs.
292
+ For more details, check out
293
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
294
+ """
295
+
296
+ choices: List[ChatCompletionStreamOutputChoice]
297
+ created: int
298
+ id: str
299
+ model: str
300
+ system_fingerprint: str
301
+ usage: Optional[ChatCompletionStreamOutputUsage] = None
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Dict, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class DepthEstimationInput(BaseInferenceType):
13
+ """Inputs for Depth Estimation inference"""
14
+
15
+ inputs: Any
16
+ """The input image data"""
17
+ parameters: Optional[Dict[str, Any]] = None
18
+ """Additional inference parameters for Depth Estimation"""
19
+
20
+
21
+ @dataclass_with_extra
22
+ class DepthEstimationOutput(BaseInferenceType):
23
+ """Outputs of inference for the Depth Estimation task"""
24
+
25
+ depth: Any
26
+ """The predicted depth as an image"""
27
+ predicted_depth: Any
28
+ """The predicted depth as a tensor"""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, List, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class DocumentQuestionAnsweringInputData(BaseInferenceType):
13
+ """One (document, question) pair to answer"""
14
+
15
+ image: Any
16
+ """The image on which the question is asked"""
17
+ question: str
18
+ """A question to ask of the document"""
19
+
20
+
21
+ @dataclass_with_extra
22
+ class DocumentQuestionAnsweringParameters(BaseInferenceType):
23
+ """Additional inference parameters for Document Question Answering"""
24
+
25
+ doc_stride: Optional[int] = None
26
+ """If the words in the document are too long to fit with the question for the model, it will
27
+ be split in several chunks with some overlap. This argument controls the size of that
28
+ overlap.
29
+ """
30
+ handle_impossible_answer: Optional[bool] = None
31
+ """Whether to accept impossible as an answer"""
32
+ lang: Optional[str] = None
33
+ """Language to use while running OCR. Defaults to english."""
34
+ max_answer_len: Optional[int] = None
35
+ """The maximum length of predicted answers (e.g., only answers with a shorter length are
36
+ considered).
37
+ """
38
+ max_question_len: Optional[int] = None
39
+ """The maximum length of the question after tokenization. It will be truncated if needed."""
40
+ max_seq_len: Optional[int] = None
41
+ """The maximum length of the total sentence (context + question) in tokens of each chunk
42
+ passed to the model. The context will be split in several chunks (using doc_stride as
43
+ overlap) if needed.
44
+ """
45
+ top_k: Optional[int] = None
46
+ """The number of answers to return (will be chosen by order of likelihood). Can return less
47
+ than top_k answers if there are not enough options available within the context.
48
+ """
49
+ word_boxes: Optional[List[Union[List[float], str]]] = None
50
+ """A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
51
+ skip the OCR step and use the provided bounding boxes instead.
52
+ """
53
+
54
+
55
+ @dataclass_with_extra
56
+ class DocumentQuestionAnsweringInput(BaseInferenceType):
57
+ """Inputs for Document Question Answering inference"""
58
+
59
+ inputs: DocumentQuestionAnsweringInputData
60
+ """One (document, question) pair to answer"""
61
+ parameters: Optional[DocumentQuestionAnsweringParameters] = None
62
+ """Additional inference parameters for Document Question Answering"""
63
+
64
+
65
+ @dataclass_with_extra
66
+ class DocumentQuestionAnsweringOutputElement(BaseInferenceType):
67
+ """Outputs of inference for the Document Question Answering task"""
68
+
69
+ answer: str
70
+ """The answer to the question."""
71
+ end: int
72
+ """The end word index of the answer (in the OCR’d version of the input or provided word
73
+ boxes).
74
+ """
75
+ score: float
76
+ """The probability associated to the answer."""
77
+ start: int
78
+ """The start word index of the answer (in the OCR’d version of the input or provided word
79
+ boxes).
80
+ """
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import List, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ FeatureExtractionInputTruncationDirection = Literal["Left", "Right"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class FeatureExtractionInput(BaseInferenceType):
16
+ """Feature Extraction Input.
17
+ Auto-generated from TEI specs.
18
+ For more details, check out
19
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
20
+ """
21
+
22
+ inputs: Union[List[str], str]
23
+ """The text or list of texts to embed."""
24
+ normalize: Optional[bool] = None
25
+ prompt_name: Optional[str] = None
26
+ """The name of the prompt that should be used by for encoding. If not set, no prompt
27
+ will be applied.
28
+ Must be a key in the `sentence-transformers` configuration `prompts` dictionary.
29
+ For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",
30
+ ...},
31
+ then the sentence "What is the capital of France?" will be encoded as
32
+ "query: What is the capital of France?" because the prompt text will be prepended before
33
+ any text to encode.
34
+ """
35
+ truncate: Optional[bool] = None
36
+ truncation_direction: Optional["FeatureExtractionInputTruncationDirection"] = None
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ ImageClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class ImageClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Image Classification"""
17
+
18
+ function_to_apply: Optional["ImageClassificationOutputTransform"] = None
19
+ """The function to apply to the model outputs in order to retrieve the scores."""
20
+ top_k: Optional[int] = None
21
+ """When specified, limits the output to the top K most probable classes."""
22
+
23
+
24
+ @dataclass_with_extra
25
+ class ImageClassificationInput(BaseInferenceType):
26
+ """Inputs for Image Classification inference"""
27
+
28
+ inputs: str
29
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
30
+ also provide the image data as a raw bytes payload.
31
+ """
32
+ parameters: Optional[ImageClassificationParameters] = None
33
+ """Additional inference parameters for Image Classification"""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class ImageClassificationOutputElement(BaseInferenceType):
38
+ """Outputs of inference for the Image Classification task"""
39
+
40
+ label: str
41
+ """The predicted class label."""
42
+ score: float
43
+ """The corresponding probability."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class ImageSegmentationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Image Segmentation"""
17
+
18
+ mask_threshold: Optional[float] = None
19
+ """Threshold to use when turning the predicted masks into binary values."""
20
+ overlap_mask_area_threshold: Optional[float] = None
21
+ """Mask overlap threshold to eliminate small, disconnected segments."""
22
+ subtask: Optional["ImageSegmentationSubtask"] = None
23
+ """Segmentation task to be performed, depending on model capabilities."""
24
+ threshold: Optional[float] = None
25
+ """Probability threshold to filter out predicted masks."""
26
+
27
+
28
+ @dataclass_with_extra
29
+ class ImageSegmentationInput(BaseInferenceType):
30
+ """Inputs for Image Segmentation inference"""
31
+
32
+ inputs: str
33
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
34
+ also provide the image data as a raw bytes payload.
35
+ """
36
+ parameters: Optional[ImageSegmentationParameters] = None
37
+ """Additional inference parameters for Image Segmentation"""
38
+
39
+
40
+ @dataclass_with_extra
41
+ class ImageSegmentationOutputElement(BaseInferenceType):
42
+ """Outputs of inference for the Image Segmentation task
43
+ A predicted mask / segment
44
+ """
45
+
46
+ label: str
47
+ """The label of the predicted segment."""
48
+ mask: str
49
+ """The corresponding mask as a black-and-white image (base64-encoded)."""
50
+ score: Optional[float] = None
51
+ """The score or confidence degree the model has."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ImageToImageTargetSize(BaseInferenceType):
13
+ """The size in pixel of the output image."""
14
+
15
+ height: int
16
+ width: int
17
+
18
+
19
+ @dataclass_with_extra
20
+ class ImageToImageParameters(BaseInferenceType):
21
+ """Additional inference parameters for Image To Image"""
22
+
23
+ guidance_scale: Optional[float] = None
24
+ """For diffusion models. A higher guidance scale value encourages the model to generate
25
+ images closely linked to the text prompt at the expense of lower image quality.
26
+ """
27
+ negative_prompt: Optional[str] = None
28
+ """One prompt to guide what NOT to include in image generation."""
29
+ num_inference_steps: Optional[int] = None
30
+ """For diffusion models. The number of denoising steps. More denoising steps usually lead to
31
+ a higher quality image at the expense of slower inference.
32
+ """
33
+ target_size: Optional[ImageToImageTargetSize] = None
34
+ """The size in pixel of the output image."""
35
+
36
+
37
+ @dataclass_with_extra
38
+ class ImageToImageInput(BaseInferenceType):
39
+ """Inputs for Image To Image inference"""
40
+
41
+ inputs: str
42
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
43
+ also provide the image data as a raw bytes payload.
44
+ """
45
+ parameters: Optional[ImageToImageParameters] = None
46
+ """Additional inference parameters for Image To Image"""
47
+
48
+
49
+ @dataclass_with_extra
50
+ class ImageToImageOutput(BaseInferenceType):
51
+ """Outputs of inference for the Image To Image task"""
52
+
53
+ image: Any
54
+ """The output image returned as raw bytes in the payload."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ ImageToTextEarlyStoppingEnum = Literal["never"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class ImageToTextGenerationParameters(BaseInferenceType):
16
+ """Parametrization of the text generation process"""
17
+
18
+ do_sample: Optional[bool] = None
19
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
20
+ early_stopping: Optional[Union[bool, "ImageToTextEarlyStoppingEnum"]] = None
21
+ """Controls the stopping condition for beam-based methods."""
22
+ epsilon_cutoff: Optional[float] = None
23
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
24
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
25
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
26
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
27
+ """
28
+ eta_cutoff: Optional[float] = None
29
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
30
+ float strictly between 0 and 1, a token is only considered if it is greater than either
31
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
32
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
33
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
34
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
35
+ for more details.
36
+ """
37
+ max_length: Optional[int] = None
38
+ """The maximum length (in tokens) of the generated text, including the input."""
39
+ max_new_tokens: Optional[int] = None
40
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
41
+ min_length: Optional[int] = None
42
+ """The minimum length (in tokens) of the generated text, including the input."""
43
+ min_new_tokens: Optional[int] = None
44
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
45
+ num_beam_groups: Optional[int] = None
46
+ """Number of groups to divide num_beams into in order to ensure diversity among different
47
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
48
+ """
49
+ num_beams: Optional[int] = None
50
+ """Number of beams to use for beam search."""
51
+ penalty_alpha: Optional[float] = None
52
+ """The value balances the model confidence and the degeneration penalty in contrastive
53
+ search decoding.
54
+ """
55
+ temperature: Optional[float] = None
56
+ """The value used to modulate the next token probabilities."""
57
+ top_k: Optional[int] = None
58
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
59
+ top_p: Optional[float] = None
60
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
61
+ that add up to top_p or higher are kept for generation.
62
+ """
63
+ typical_p: Optional[float] = None
64
+ """Local typicality measures how similar the conditional probability of predicting a target
65
+ token next is to the expected conditional probability of predicting a random token next,
66
+ given the partial text already generated. If set to float < 1, the smallest set of the
67
+ most locally typical tokens with probabilities that add up to typical_p or higher are
68
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
69
+ """
70
+ use_cache: Optional[bool] = None
71
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
72
+
73
+
74
+ @dataclass_with_extra
75
+ class ImageToTextParameters(BaseInferenceType):
76
+ """Additional inference parameters for Image To Text"""
77
+
78
+ max_new_tokens: Optional[int] = None
79
+ """The amount of maximum tokens to generate."""
80
+ # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
81
+ generate_kwargs: Optional[ImageToTextGenerationParameters] = None
82
+ """Parametrization of the text generation process"""
83
+
84
+
85
+ @dataclass_with_extra
86
+ class ImageToTextInput(BaseInferenceType):
87
+ """Inputs for Image To Text inference"""
88
+
89
+ inputs: Any
90
+ """The input image data"""
91
+ parameters: Optional[ImageToTextParameters] = None
92
+ """Additional inference parameters for Image To Text"""
93
+
94
+
95
+ @dataclass_with_extra
96
+ class ImageToTextOutput(BaseInferenceType):
97
+ """Outputs of inference for the Image To Text task"""
98
+
99
+ generated_text: Any
100
+ image_to_text_output_generated_text: Optional[str] = None
101
+ """The generated text."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class QuestionAnsweringInputData(BaseInferenceType):
13
+ """One (context, question) pair to answer"""
14
+
15
+ context: str
16
+ """The context to be used for answering the question"""
17
+ question: str
18
+ """The question to be answered"""
19
+
20
+
21
+ @dataclass_with_extra
22
+ class QuestionAnsweringParameters(BaseInferenceType):
23
+ """Additional inference parameters for Question Answering"""
24
+
25
+ align_to_words: Optional[bool] = None
26
+ """Attempts to align the answer to real words. Improves quality on space separated
27
+ languages. Might hurt on non-space-separated languages (like Japanese or Chinese)
28
+ """
29
+ doc_stride: Optional[int] = None
30
+ """If the context is too long to fit with the question for the model, it will be split in
31
+ several chunks with some overlap. This argument controls the size of that overlap.
32
+ """
33
+ handle_impossible_answer: Optional[bool] = None
34
+ """Whether to accept impossible as an answer."""
35
+ max_answer_len: Optional[int] = None
36
+ """The maximum length of predicted answers (e.g., only answers with a shorter length are
37
+ considered).
38
+ """
39
+ max_question_len: Optional[int] = None
40
+ """The maximum length of the question after tokenization. It will be truncated if needed."""
41
+ max_seq_len: Optional[int] = None
42
+ """The maximum length of the total sentence (context + question) in tokens of each chunk
43
+ passed to the model. The context will be split in several chunks (using docStride as
44
+ overlap) if needed.
45
+ """
46
+ top_k: Optional[int] = None
47
+ """The number of answers to return (will be chosen by order of likelihood). Note that we
48
+ return less than topk answers if there are not enough options available within the
49
+ context.
50
+ """
51
+
52
+
53
+ @dataclass_with_extra
54
+ class QuestionAnsweringInput(BaseInferenceType):
55
+ """Inputs for Question Answering inference"""
56
+
57
+ inputs: QuestionAnsweringInputData
58
+ """One (context, question) pair to answer"""
59
+ parameters: Optional[QuestionAnsweringParameters] = None
60
+ """Additional inference parameters for Question Answering"""
61
+
62
+
63
+ @dataclass_with_extra
64
+ class QuestionAnsweringOutputElement(BaseInferenceType):
65
+ """Outputs of inference for the Question Answering task"""
66
+
67
+ answer: str
68
+ """The answer to the question."""
69
+ end: int
70
+ """The character position in the input where the answer ends."""
71
+ score: float
72
+ """The probability associated to the answer."""
73
+ start: int
74
+ """The character position in the input where the answer begins."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Dict, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ SummarizationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class SummarizationParameters(BaseInferenceType):
16
+ """Additional inference parameters for summarization."""
17
+
18
+ clean_up_tokenization_spaces: Optional[bool] = None
19
+ """Whether to clean up the potential extra spaces in the text output."""
20
+ generate_parameters: Optional[Dict[str, Any]] = None
21
+ """Additional parametrization of the text generation algorithm."""
22
+ truncation: Optional["SummarizationTruncationStrategy"] = None
23
+ """The truncation strategy to use."""
24
+
25
+
26
+ @dataclass_with_extra
27
+ class SummarizationInput(BaseInferenceType):
28
+ """Inputs for Summarization inference"""
29
+
30
+ inputs: str
31
+ """The input text to summarize."""
32
+ parameters: Optional[SummarizationParameters] = None
33
+ """Additional inference parameters for summarization."""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class SummarizationOutput(BaseInferenceType):
38
+ """Outputs of inference for the Summarization task"""
39
+
40
+ summary_text: str
41
+ """The summarized text."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Dict, List, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class TableQuestionAnsweringInputData(BaseInferenceType):
13
+ """One (table, question) pair to answer"""
14
+
15
+ question: str
16
+ """The question to be answered about the table"""
17
+ table: Dict[str, List[str]]
18
+ """The table to serve as context for the questions"""
19
+
20
+
21
+ Padding = Literal["do_not_pad", "longest", "max_length"]
22
+
23
+
24
+ @dataclass_with_extra
25
+ class TableQuestionAnsweringParameters(BaseInferenceType):
26
+ """Additional inference parameters for Table Question Answering"""
27
+
28
+ padding: Optional["Padding"] = None
29
+ """Activates and controls padding."""
30
+ sequential: Optional[bool] = None
31
+ """Whether to do inference sequentially or as a batch. Batching is faster, but models like
32
+ SQA require the inference to be done sequentially to extract relations within sequences,
33
+ given their conversational nature.
34
+ """
35
+ truncation: Optional[bool] = None
36
+ """Activates and controls truncation."""
37
+
38
+
39
+ @dataclass_with_extra
40
+ class TableQuestionAnsweringInput(BaseInferenceType):
41
+ """Inputs for Table Question Answering inference"""
42
+
43
+ inputs: TableQuestionAnsweringInputData
44
+ """One (table, question) pair to answer"""
45
+ parameters: Optional[TableQuestionAnsweringParameters] = None
46
+ """Additional inference parameters for Table Question Answering"""
47
+
48
+
49
+ @dataclass_with_extra
50
+ class TableQuestionAnsweringOutputElement(BaseInferenceType):
51
+ """Outputs of inference for the Table Question Answering task"""
52
+
53
+ answer: str
54
+ """The answer of the question given the table. If there is an aggregator, the answer will be
55
+ preceded by `AGGREGATOR >`.
56
+ """
57
+ cells: List[str]
58
+ """List of strings made up of the answer cell values."""
59
+ coordinates: List[List[int]]
60
+ """Coordinates of the cells of the answers."""
61
+ aggregator: Optional[str] = None
62
+ """If the model has an aggregator, this returns the aggregator."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Dict, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ Text2TextGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class Text2TextGenerationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Text2text Generation"""
17
+
18
+ clean_up_tokenization_spaces: Optional[bool] = None
19
+ """Whether to clean up the potential extra spaces in the text output."""
20
+ generate_parameters: Optional[Dict[str, Any]] = None
21
+ """Additional parametrization of the text generation algorithm"""
22
+ truncation: Optional["Text2TextGenerationTruncationStrategy"] = None
23
+ """The truncation strategy to use"""
24
+
25
+
26
+ @dataclass_with_extra
27
+ class Text2TextGenerationInput(BaseInferenceType):
28
+ """Inputs for Text2text Generation inference"""
29
+
30
+ inputs: str
31
+ """The input text data"""
32
+ parameters: Optional[Text2TextGenerationParameters] = None
33
+ """Additional inference parameters for Text2text Generation"""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class Text2TextGenerationOutput(BaseInferenceType):
38
+ """Outputs of inference for the Text2text Generation task"""
39
+
40
+ generated_text: Any
41
+ text2_text_generation_output_generated_text: Optional[str] = None
42
+ """The generated text."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ TextClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class TextClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Text Classification"""
17
+
18
+ function_to_apply: Optional["TextClassificationOutputTransform"] = None
19
+ """The function to apply to the model outputs in order to retrieve the scores."""
20
+ top_k: Optional[int] = None
21
+ """When specified, limits the output to the top K most probable classes."""
22
+
23
+
24
+ @dataclass_with_extra
25
+ class TextClassificationInput(BaseInferenceType):
26
+ """Inputs for Text Classification inference"""
27
+
28
+ inputs: str
29
+ """The text to classify"""
30
+ parameters: Optional[TextClassificationParameters] = None
31
+ """Additional inference parameters for Text Classification"""
32
+
33
+
34
+ @dataclass_with_extra
35
+ class TextClassificationOutputElement(BaseInferenceType):
36
+ """Outputs of inference for the Text Classification task"""
37
+
38
+ label: str
39
+ """The predicted class label."""
40
+ score: float
41
+ """The corresponding probability."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_generation.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, List, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ TypeEnum = Literal["json", "regex"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class TextGenerationInputGrammarType(BaseInferenceType):
16
+ type: "TypeEnum"
17
+ value: Any
18
+ """A string that represents a [JSON Schema](https://json-schema.org/).
19
+ JSON Schema is a declarative language that allows to annotate JSON documents
20
+ with types and descriptions.
21
+ """
22
+
23
+
24
+ @dataclass_with_extra
25
+ class TextGenerationInputGenerateParameters(BaseInferenceType):
26
+ adapter_id: Optional[str] = None
27
+ """Lora adapter id"""
28
+ best_of: Optional[int] = None
29
+ """Generate best_of sequences and return the one if the highest token logprobs."""
30
+ decoder_input_details: Optional[bool] = None
31
+ """Whether to return decoder input token logprobs and ids."""
32
+ details: Optional[bool] = None
33
+ """Whether to return generation details."""
34
+ do_sample: Optional[bool] = None
35
+ """Activate logits sampling."""
36
+ frequency_penalty: Optional[float] = None
37
+ """The parameter for frequency penalty. 1.0 means no penalty
38
+ Penalize new tokens based on their existing frequency in the text so far,
39
+ decreasing the model's likelihood to repeat the same line verbatim.
40
+ """
41
+ grammar: Optional[TextGenerationInputGrammarType] = None
42
+ max_new_tokens: Optional[int] = None
43
+ """Maximum number of tokens to generate."""
44
+ repetition_penalty: Optional[float] = None
45
+ """The parameter for repetition penalty. 1.0 means no penalty.
46
+ See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
47
+ """
48
+ return_full_text: Optional[bool] = None
49
+ """Whether to prepend the prompt to the generated text"""
50
+ seed: Optional[int] = None
51
+ """Random sampling seed."""
52
+ stop: Optional[List[str]] = None
53
+ """Stop generating tokens if a member of `stop` is generated."""
54
+ temperature: Optional[float] = None
55
+ """The value used to module the logits distribution."""
56
+ top_k: Optional[int] = None
57
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
58
+ top_n_tokens: Optional[int] = None
59
+ """The number of highest probability vocabulary tokens to keep for top-n-filtering."""
60
+ top_p: Optional[float] = None
61
+ """Top-p value for nucleus sampling."""
62
+ truncate: Optional[int] = None
63
+ """Truncate inputs tokens to the given size."""
64
+ typical_p: Optional[float] = None
65
+ """Typical Decoding mass
66
+ See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666)
67
+ for more information.
68
+ """
69
+ watermark: Optional[bool] = None
70
+ """Watermarking with [A Watermark for Large Language
71
+ Models](https://arxiv.org/abs/2301.10226).
72
+ """
73
+
74
+
75
+ @dataclass_with_extra
76
+ class TextGenerationInput(BaseInferenceType):
77
+ """Text Generation Input.
78
+ Auto-generated from TGI specs.
79
+ For more details, check out
80
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
81
+ """
82
+
83
+ inputs: str
84
+ parameters: Optional[TextGenerationInputGenerateParameters] = None
85
+ stream: Optional[bool] = None
86
+
87
+
88
+ TextGenerationOutputFinishReason = Literal["length", "eos_token", "stop_sequence"]
89
+
90
+
91
+ @dataclass_with_extra
92
+ class TextGenerationOutputPrefillToken(BaseInferenceType):
93
+ id: int
94
+ logprob: float
95
+ text: str
96
+
97
+
98
+ @dataclass_with_extra
99
+ class TextGenerationOutputToken(BaseInferenceType):
100
+ id: int
101
+ logprob: float
102
+ special: bool
103
+ text: str
104
+
105
+
106
+ @dataclass_with_extra
107
+ class TextGenerationOutputBestOfSequence(BaseInferenceType):
108
+ finish_reason: "TextGenerationOutputFinishReason"
109
+ generated_text: str
110
+ generated_tokens: int
111
+ prefill: List[TextGenerationOutputPrefillToken]
112
+ tokens: List[TextGenerationOutputToken]
113
+ seed: Optional[int] = None
114
+ top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
115
+
116
+
117
+ @dataclass_with_extra
118
+ class TextGenerationOutputDetails(BaseInferenceType):
119
+ finish_reason: "TextGenerationOutputFinishReason"
120
+ generated_tokens: int
121
+ prefill: List[TextGenerationOutputPrefillToken]
122
+ tokens: List[TextGenerationOutputToken]
123
+ best_of_sequences: Optional[List[TextGenerationOutputBestOfSequence]] = None
124
+ seed: Optional[int] = None
125
+ top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
126
+
127
+
128
+ @dataclass_with_extra
129
+ class TextGenerationOutput(BaseInferenceType):
130
+ """Text Generation Output.
131
+ Auto-generated from TGI specs.
132
+ For more details, check out
133
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
134
+ """
135
+
136
+ generated_text: str
137
+ details: Optional[TextGenerationOutputDetails] = None
138
+
139
+
140
+ @dataclass_with_extra
141
+ class TextGenerationStreamOutputStreamDetails(BaseInferenceType):
142
+ finish_reason: "TextGenerationOutputFinishReason"
143
+ generated_tokens: int
144
+ input_length: int
145
+ seed: Optional[int] = None
146
+
147
+
148
+ @dataclass_with_extra
149
+ class TextGenerationStreamOutputToken(BaseInferenceType):
150
+ id: int
151
+ logprob: float
152
+ special: bool
153
+ text: str
154
+
155
+
156
+ @dataclass_with_extra
157
+ class TextGenerationStreamOutput(BaseInferenceType):
158
+ """Text Generation Stream Output.
159
+ Auto-generated from TGI specs.
160
+ For more details, check out
161
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
162
+ """
163
+
164
+ index: int
165
+ token: TextGenerationStreamOutputToken
166
+ details: Optional[TextGenerationStreamOutputStreamDetails] = None
167
+ generated_text: Optional[str] = None
168
+ top_tokens: Optional[List[TextGenerationStreamOutputToken]] = None
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ TextToAudioEarlyStoppingEnum = Literal["never"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class TextToAudioGenerationParameters(BaseInferenceType):
16
+ """Parametrization of the text generation process"""
17
+
18
+ do_sample: Optional[bool] = None
19
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
20
+ early_stopping: Optional[Union[bool, "TextToAudioEarlyStoppingEnum"]] = None
21
+ """Controls the stopping condition for beam-based methods."""
22
+ epsilon_cutoff: Optional[float] = None
23
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
24
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
25
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
26
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
27
+ """
28
+ eta_cutoff: Optional[float] = None
29
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
30
+ float strictly between 0 and 1, a token is only considered if it is greater than either
31
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
32
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
33
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
34
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
35
+ for more details.
36
+ """
37
+ max_length: Optional[int] = None
38
+ """The maximum length (in tokens) of the generated text, including the input."""
39
+ max_new_tokens: Optional[int] = None
40
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
41
+ min_length: Optional[int] = None
42
+ """The minimum length (in tokens) of the generated text, including the input."""
43
+ min_new_tokens: Optional[int] = None
44
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
45
+ num_beam_groups: Optional[int] = None
46
+ """Number of groups to divide num_beams into in order to ensure diversity among different
47
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
48
+ """
49
+ num_beams: Optional[int] = None
50
+ """Number of beams to use for beam search."""
51
+ penalty_alpha: Optional[float] = None
52
+ """The value balances the model confidence and the degeneration penalty in contrastive
53
+ search decoding.
54
+ """
55
+ temperature: Optional[float] = None
56
+ """The value used to modulate the next token probabilities."""
57
+ top_k: Optional[int] = None
58
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
59
+ top_p: Optional[float] = None
60
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
61
+ that add up to top_p or higher are kept for generation.
62
+ """
63
+ typical_p: Optional[float] = None
64
+ """Local typicality measures how similar the conditional probability of predicting a target
65
+ token next is to the expected conditional probability of predicting a random token next,
66
+ given the partial text already generated. If set to float < 1, the smallest set of the
67
+ most locally typical tokens with probabilities that add up to typical_p or higher are
68
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
69
+ """
70
+ use_cache: Optional[bool] = None
71
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
72
+
73
+
74
+ @dataclass_with_extra
75
+ class TextToAudioParameters(BaseInferenceType):
76
+ """Additional inference parameters for Text To Audio"""
77
+
78
+ # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
79
+ generate_kwargs: Optional[TextToAudioGenerationParameters] = None
80
+ """Parametrization of the text generation process"""
81
+
82
+
83
+ @dataclass_with_extra
84
+ class TextToAudioInput(BaseInferenceType):
85
+ """Inputs for Text To Audio inference"""
86
+
87
+ inputs: str
88
+ """The input text data"""
89
+ parameters: Optional[TextToAudioParameters] = None
90
+ """Additional inference parameters for Text To Audio"""
91
+
92
+
93
+ @dataclass_with_extra
94
+ class TextToAudioOutput(BaseInferenceType):
95
+ """Outputs of inference for the Text To Audio task"""
96
+
97
+ audio: Any
98
+ """The generated audio waveform."""
99
+ sampling_rate: float
100
+ """The sampling rate of the generated audio waveform."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_speech.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ TextToSpeechEarlyStoppingEnum = Literal["never"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class TextToSpeechGenerationParameters(BaseInferenceType):
16
+ """Parametrization of the text generation process"""
17
+
18
+ do_sample: Optional[bool] = None
19
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
20
+ early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None
21
+ """Controls the stopping condition for beam-based methods."""
22
+ epsilon_cutoff: Optional[float] = None
23
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
24
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
25
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
26
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
27
+ """
28
+ eta_cutoff: Optional[float] = None
29
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
30
+ float strictly between 0 and 1, a token is only considered if it is greater than either
31
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
32
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
33
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
34
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
35
+ for more details.
36
+ """
37
+ max_length: Optional[int] = None
38
+ """The maximum length (in tokens) of the generated text, including the input."""
39
+ max_new_tokens: Optional[int] = None
40
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
41
+ min_length: Optional[int] = None
42
+ """The minimum length (in tokens) of the generated text, including the input."""
43
+ min_new_tokens: Optional[int] = None
44
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
45
+ num_beam_groups: Optional[int] = None
46
+ """Number of groups to divide num_beams into in order to ensure diversity among different
47
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
48
+ """
49
+ num_beams: Optional[int] = None
50
+ """Number of beams to use for beam search."""
51
+ penalty_alpha: Optional[float] = None
52
+ """The value balances the model confidence and the degeneration penalty in contrastive
53
+ search decoding.
54
+ """
55
+ temperature: Optional[float] = None
56
+ """The value used to modulate the next token probabilities."""
57
+ top_k: Optional[int] = None
58
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
59
+ top_p: Optional[float] = None
60
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
61
+ that add up to top_p or higher are kept for generation.
62
+ """
63
+ typical_p: Optional[float] = None
64
+ """Local typicality measures how similar the conditional probability of predicting a target
65
+ token next is to the expected conditional probability of predicting a random token next,
66
+ given the partial text already generated. If set to float < 1, the smallest set of the
67
+ most locally typical tokens with probabilities that add up to typical_p or higher are
68
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
69
+ """
70
+ use_cache: Optional[bool] = None
71
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
72
+
73
+
74
+ @dataclass_with_extra
75
+ class TextToSpeechParameters(BaseInferenceType):
76
+ """Additional inference parameters for Text To Speech"""
77
+
78
+ # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
79
+ generate_kwargs: Optional[TextToSpeechGenerationParameters] = None
80
+ """Parametrization of the text generation process"""
81
+
82
+
83
+ @dataclass_with_extra
84
+ class TextToSpeechInput(BaseInferenceType):
85
+ """Inputs for Text To Speech inference"""
86
+
87
+ inputs: str
88
+ """The input text data"""
89
+ parameters: Optional[TextToSpeechParameters] = None
90
+ """Additional inference parameters for Text To Speech"""
91
+
92
+
93
+ @dataclass_with_extra
94
+ class TextToSpeechOutput(BaseInferenceType):
95
+ """Outputs of inference for the Text To Speech task"""
96
+
97
+ audio: Any
98
+ """The generated audio"""
99
+ sampling_rate: Optional[float] = None
100
+ """The sampling rate of the generated audio waveform."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_video.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, List, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class TextToVideoParameters(BaseInferenceType):
13
+ """Additional inference parameters for Text To Video"""
14
+
15
+ guidance_scale: Optional[float] = None
16
+ """A higher guidance scale value encourages the model to generate videos closely linked to
17
+ the text prompt, but values too high may cause saturation and other artifacts.
18
+ """
19
+ negative_prompt: Optional[List[str]] = None
20
+ """One or several prompt to guide what NOT to include in video generation."""
21
+ num_frames: Optional[float] = None
22
+ """The num_frames parameter determines how many video frames are generated."""
23
+ num_inference_steps: Optional[int] = None
24
+ """The number of denoising steps. More denoising steps usually lead to a higher quality
25
+ video at the expense of slower inference.
26
+ """
27
+ seed: Optional[int] = None
28
+ """Seed for the random number generator."""
29
+
30
+
31
+ @dataclass_with_extra
32
+ class TextToVideoInput(BaseInferenceType):
33
+ """Inputs for Text To Video inference"""
34
+
35
+ inputs: str
36
+ """The input text data (sometimes called "prompt")"""
37
+ parameters: Optional[TextToVideoParameters] = None
38
+ """Additional inference parameters for Text To Video"""
39
+
40
+
41
+ @dataclass_with_extra
42
+ class TextToVideoOutput(BaseInferenceType):
43
+ """Outputs of inference for the Text To Video task"""
44
+
45
+ video: Any
46
+ """The generated video returned as raw bytes in the payload."""
vllm/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/video_classification.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ VideoClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class VideoClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Video Classification"""
17
+
18
+ frame_sampling_rate: Optional[int] = None
19
+ """The sampling rate used to select frames from the video."""
20
+ function_to_apply: Optional["VideoClassificationOutputTransform"] = None
21
+ """The function to apply to the model outputs in order to retrieve the scores."""
22
+ num_frames: Optional[int] = None
23
+ """The number of sampled frames to consider for classification."""
24
+ top_k: Optional[int] = None
25
+ """When specified, limits the output to the top K most probable classes."""
26
+
27
+
28
+ @dataclass_with_extra
29
+ class VideoClassificationInput(BaseInferenceType):
30
+ """Inputs for Video Classification inference"""
31
+
32
+ inputs: Any
33
+ """The input video data"""
34
+ parameters: Optional[VideoClassificationParameters] = None
35
+ """Additional inference parameters for Video Classification"""
36
+
37
+
38
+ @dataclass_with_extra
39
+ class VideoClassificationOutputElement(BaseInferenceType):
40
+ """Outputs of inference for the Video Classification task"""
41
+
42
+ label: str
43
+ """The predicted class label."""
44
+ score: float
45
+ """The corresponding probability."""