ZTWHHH commited on
Commit
f21b90e
·
verified ·
1 Parent(s): 01806e8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. parrot/lib/libasan.so.6 +3 -0
  3. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh +47 -0
  4. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh +149 -0
  5. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h +91 -0
  6. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h +105 -0
  7. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h +23 -0
  8. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h +93 -0
  9. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h +75 -0
  10. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h +318 -0
  11. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h +37 -0
  12. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h +174 -0
  13. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h +11 -0
  14. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h +5 -0
  15. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh +4 -0
  16. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h +11 -0
  17. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh +78 -0
  18. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h +87 -0
  19. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh +53 -0
  20. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h +11 -0
  21. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh +43 -0
  22. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h +249 -0
  23. parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h +14 -0
  24. parrot/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h +41 -0
  25. videollama2/lib/python3.10/site-packages/latex2mathml/__init__.py +3 -0
  26. videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/__init__.cpython-310.pyc +0 -0
  27. videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/commands.cpython-310.pyc +0 -0
  28. videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/converter.cpython-310.pyc +0 -0
  29. videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/exceptions.cpython-310.pyc +0 -0
  30. videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/symbols_parser.cpython-310.pyc +0 -0
  31. videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/tokenizer.cpython-310.pyc +0 -0
  32. videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/walker.cpython-310.pyc +0 -0
  33. videollama2/lib/python3.10/site-packages/latex2mathml/commands.py +506 -0
  34. videollama2/lib/python3.10/site-packages/latex2mathml/converter.py +595 -0
  35. videollama2/lib/python3.10/site-packages/latex2mathml/exceptions.py +46 -0
  36. videollama2/lib/python3.10/site-packages/latex2mathml/py.typed +1 -0
  37. videollama2/lib/python3.10/site-packages/latex2mathml/symbols_parser.py +78 -0
  38. videollama2/lib/python3.10/site-packages/latex2mathml/tokenizer.py +55 -0
  39. videollama2/lib/python3.10/site-packages/latex2mathml/unimathsymbols.txt +0 -0
  40. videollama2/lib/python3.10/site-packages/latex2mathml/walker.py +457 -0
  41. videollama2/lib/python3.10/site-packages/shortuuid/__init__.py +18 -0
  42. videollama2/lib/python3.10/site-packages/shortuuid/__pycache__/__init__.cpython-310.pyc +0 -0
  43. videollama2/lib/python3.10/site-packages/shortuuid/__pycache__/cli.cpython-310.pyc +0 -0
  44. videollama2/lib/python3.10/site-packages/shortuuid/__pycache__/django_fields.cpython-310.pyc +0 -0
  45. videollama2/lib/python3.10/site-packages/shortuuid/__pycache__/main.cpython-310.pyc +0 -0
  46. videollama2/lib/python3.10/site-packages/shortuuid/cli.py +50 -0
  47. videollama2/lib/python3.10/site-packages/shortuuid/django_fields.py +39 -0
  48. videollama2/lib/python3.10/site-packages/shortuuid/main.py +137 -0
  49. videollama2/lib/python3.10/site-packages/shortuuid/py.typed +0 -0
  50. videollama2/lib/python3.10/site-packages/shortuuid/test_shortuuid.py +224 -0
.gitattributes CHANGED
@@ -489,3 +489,4 @@ videollama2/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSans-Oblique.ttf fil
489
  videollama2/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSansCondensed-Oblique.ttf filter=lfs diff=lfs merge=lfs -text
490
  videollama2/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSansCondensed.ttf filter=lfs diff=lfs merge=lfs -text
491
  parrot/lib/python3.10/site-packages/xformers/_C.so filter=lfs diff=lfs merge=lfs -text
 
 
489
  videollama2/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSansCondensed-Oblique.ttf filter=lfs diff=lfs merge=lfs -text
490
  videollama2/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSansCondensed.ttf filter=lfs diff=lfs merge=lfs -text
491
  parrot/lib/python3.10/site-packages/xformers/_C.so filter=lfs diff=lfs merge=lfs -text
492
+ parrot/lib/libasan.so.6 filter=lfs diff=lfs merge=lfs -text
parrot/lib/libasan.so.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8a7995a4d84a8817af8d1604bef621e99d0622df4eda14f6fe5245735a952e
3
+ size 7575272
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/ApplyGridUtils.cuh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/cuda/CUDAContext.h>
2
+
3
+ #include <cuda_runtime.h>
4
+
5
+ namespace at::cuda {
6
+
7
+ /**
8
+ Computes ceil(a / b)
9
+ */
10
+ template <typename T>
11
+ __host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
12
+ return (a + b - 1) / b;
13
+ }
14
+
15
+ namespace {
16
+
17
+ // Threads per block for our apply kernel
18
+ // FIXME: use occupancy calculator instead
19
+ constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
20
+ constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
21
+
22
+ template <int step = 1>
23
+ inline bool getApplyGrid(uint64_t totalElements, dim3& grid, c10::DeviceIndex curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
24
+ if (curDevice == -1) return false;
25
+ uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
26
+ uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
27
+ uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
28
+ if (numBlocks > maxGridX)
29
+ numBlocks = maxGridX;
30
+ grid = dim3(numBlocks);
31
+ return true;
32
+ }
33
+
34
+ constexpr int getApplyBlocksPerSM() {
35
+ return AT_APPLY_BLOCKS_PER_SM;
36
+ }
37
+
38
+ constexpr int getApplyBlockSize() {
39
+ return AT_APPLY_THREADS_PER_BLOCK;
40
+ }
41
+
42
+ inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
43
+ return dim3(max_threads_per_block);
44
+ }
45
+
46
+ } // anonymous namespace
47
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/AsmUtils.cuh ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ // Collection of direct PTX functions
5
+
6
+ namespace at::cuda {
7
+
8
+ template <typename T>
9
+ struct Bitfield {};
10
+
11
+ template <>
12
+ struct Bitfield<unsigned int> {
13
+ static __device__ __host__ __forceinline__
14
+ unsigned int getBitfield(unsigned int val, int pos, int len) {
15
+ #if !defined(__CUDA_ARCH__)
16
+ pos &= 0xff;
17
+ len &= 0xff;
18
+
19
+ unsigned int m = (1u << len) - 1u;
20
+ return (val >> pos) & m;
21
+ #else
22
+ unsigned int ret;
23
+ asm("bfe.u32 %0, %1, %2, %3;" : "=r"(ret) : "r"(val), "r"(pos), "r"(len));
24
+ return ret;
25
+ #endif
26
+ }
27
+
28
+ static __device__ __host__ __forceinline__
29
+ unsigned int setBitfield(unsigned int val, unsigned int toInsert, int pos, int len) {
30
+ #if !defined(__CUDA_ARCH__)
31
+ pos &= 0xff;
32
+ len &= 0xff;
33
+
34
+ unsigned int m = (1u << len) - 1u;
35
+ toInsert &= m;
36
+ toInsert <<= pos;
37
+ m <<= pos;
38
+
39
+ return (val & ~m) | toInsert;
40
+ #else
41
+ unsigned int ret;
42
+ asm("bfi.b32 %0, %1, %2, %3, %4;" :
43
+ "=r"(ret) : "r"(toInsert), "r"(val), "r"(pos), "r"(len));
44
+ return ret;
45
+ #endif
46
+ }
47
+ };
48
+
49
+ template <>
50
+ struct Bitfield<uint64_t> {
51
+ static __device__ __host__ __forceinline__
52
+ uint64_t getBitfield(uint64_t val, int pos, int len) {
53
+ #if !defined(__CUDA_ARCH__)
54
+ pos &= 0xff;
55
+ len &= 0xff;
56
+
57
+ uint64_t m = (1u << len) - 1u;
58
+ return (val >> pos) & m;
59
+ #else
60
+ uint64_t ret;
61
+ asm("bfe.u64 %0, %1, %2, %3;" : "=l"(ret) : "l"(val), "r"(pos), "r"(len));
62
+ return ret;
63
+ #endif
64
+ }
65
+
66
+ static __device__ __host__ __forceinline__
67
+ uint64_t setBitfield(uint64_t val, uint64_t toInsert, int pos, int len) {
68
+ #if !defined(__CUDA_ARCH__)
69
+ pos &= 0xff;
70
+ len &= 0xff;
71
+
72
+ uint64_t m = (1u << len) - 1u;
73
+ toInsert &= m;
74
+ toInsert <<= pos;
75
+ m <<= pos;
76
+
77
+ return (val & ~m) | toInsert;
78
+ #else
79
+ uint64_t ret;
80
+ asm("bfi.b64 %0, %1, %2, %3, %4;" :
81
+ "=l"(ret) : "l"(toInsert), "l"(val), "r"(pos), "r"(len));
82
+ return ret;
83
+ #endif
84
+ }
85
+ };
86
+
87
+ __device__ __forceinline__ int getLaneId() {
88
+ #if defined(USE_ROCM)
89
+ return __lane_id();
90
+ #else
91
+ int laneId;
92
+ asm("mov.s32 %0, %%laneid;" : "=r"(laneId) );
93
+ return laneId;
94
+ #endif
95
+ }
96
+
97
+ #if defined(USE_ROCM)
98
+ __device__ __forceinline__ unsigned long long int getLaneMaskLt() {
99
+ const std::uint64_t m = (1ull << getLaneId()) - 1ull;
100
+ return m;
101
+ }
102
+ #else
103
+ __device__ __forceinline__ unsigned getLaneMaskLt() {
104
+ unsigned mask;
105
+ asm("mov.u32 %0, %%lanemask_lt;" : "=r"(mask));
106
+ return mask;
107
+ }
108
+ #endif
109
+
110
+ #if defined (USE_ROCM)
111
+ __device__ __forceinline__ unsigned long long int getLaneMaskLe() {
112
+ std::uint64_t m = UINT64_MAX >> (sizeof(std::uint64_t) * CHAR_BIT - (getLaneId() + 1));
113
+ return m;
114
+ }
115
+ #else
116
+ __device__ __forceinline__ unsigned getLaneMaskLe() {
117
+ unsigned mask;
118
+ asm("mov.u32 %0, %%lanemask_le;" : "=r"(mask));
119
+ return mask;
120
+ }
121
+ #endif
122
+
123
+ #if defined(USE_ROCM)
124
+ __device__ __forceinline__ unsigned long long int getLaneMaskGt() {
125
+ const std::uint64_t m = getLaneMaskLe();
126
+ return m ? ~m : m;
127
+ }
128
+ #else
129
+ __device__ __forceinline__ unsigned getLaneMaskGt() {
130
+ unsigned mask;
131
+ asm("mov.u32 %0, %%lanemask_gt;" : "=r"(mask));
132
+ return mask;
133
+ }
134
+ #endif
135
+
136
+ #if defined(USE_ROCM)
137
+ __device__ __forceinline__ unsigned long long int getLaneMaskGe() {
138
+ const std::uint64_t m = getLaneMaskLt();
139
+ return ~m;
140
+ }
141
+ #else
142
+ __device__ __forceinline__ unsigned getLaneMaskGe() {
143
+ unsigned mask;
144
+ asm("mov.u32 %0, %%lanemask_ge;" : "=r"(mask));
145
+ return mask;
146
+ }
147
+ #endif
148
+
149
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAContextLight.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // Light-weight version of CUDAContext.h with fewer transitive includes
3
+
4
+ #include <cstdint>
5
+
6
+ #include <cuda_runtime_api.h>
7
+ #include <cusparse.h>
8
+ #include <cublas_v2.h>
9
+
10
+ // cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also
11
+ // added bf16 support
12
+ #include <cublasLt.h>
13
+
14
+ #ifdef CUDART_VERSION
15
+ #include <cusolverDn.h>
16
+ #endif
17
+
18
+ #if defined(USE_ROCM)
19
+ #include <hipsolver/hipsolver.h>
20
+ #endif
21
+
22
+ #include <c10/core/Allocator.h>
23
+ #include <c10/cuda/CUDAFunctions.h>
24
+
25
+ namespace c10 {
26
+ struct Allocator;
27
+ }
28
+
29
+ namespace at::cuda {
30
+
31
+ /*
32
+ A common CUDA interface for ATen.
33
+
34
+ This interface is distinct from CUDAHooks, which defines an interface that links
35
+ to both CPU-only and CUDA builds. That interface is intended for runtime
36
+ dispatch and should be used from files that are included in both CPU-only and
37
+ CUDA builds.
38
+
39
+ CUDAContext, on the other hand, should be preferred by files only included in
40
+ CUDA builds. It is intended to expose CUDA functionality in a consistent
41
+ manner.
42
+
43
+ This means there is some overlap between the CUDAContext and CUDAHooks, but
44
+ the choice of which to use is simple: use CUDAContext when in a CUDA-only file,
45
+ use CUDAHooks otherwise.
46
+
47
+ Note that CUDAContext simply defines an interface with no associated class.
48
+ It is expected that the modules whose functions compose this interface will
49
+ manage their own state. There is only a single CUDA context/state.
50
+ */
51
+
52
+ /**
53
+ * DEPRECATED: use device_count() instead
54
+ */
55
+ inline int64_t getNumGPUs() {
56
+ return c10::cuda::device_count();
57
+ }
58
+
59
+ /**
60
+ * CUDA is available if we compiled with CUDA, and there are one or more
61
+ * devices. If we compiled with CUDA but there is a driver problem, etc.,
62
+ * this function will report CUDA is not available (rather than raise an error.)
63
+ */
64
+ inline bool is_available() {
65
+ return c10::cuda::device_count() > 0;
66
+ }
67
+
68
+ TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties();
69
+
70
+ TORCH_CUDA_CPP_API int warp_size();
71
+
72
+ TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(c10::DeviceIndex device);
73
+
74
+ TORCH_CUDA_CPP_API bool canDeviceAccessPeer(
75
+ c10::DeviceIndex device,
76
+ c10::DeviceIndex peer_device);
77
+
78
+ TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator();
79
+
80
+ /* Handles */
81
+ TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle();
82
+ TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
83
+ TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle();
84
+
85
+ TORCH_CUDA_CPP_API void clearCublasWorkspaces();
86
+
87
+ #if defined(CUDART_VERSION) || defined(USE_ROCM)
88
+ TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle();
89
+ #endif
90
+
91
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADataType.h ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+
5
+ #include <cuda.h>
6
+ #include <library_types.h>
7
+
8
+ namespace at::cuda {
9
+
10
+ template <typename scalar_t>
11
+ cudaDataType getCudaDataType() {
12
+ static_assert(false && sizeof(scalar_t), "Cannot convert type to cudaDataType.");
13
+ return {};
14
+ }
15
+
16
+ template<> inline cudaDataType getCudaDataType<at::Half>() {
17
+ return CUDA_R_16F;
18
+ }
19
+ template<> inline cudaDataType getCudaDataType<float>() {
20
+ return CUDA_R_32F;
21
+ }
22
+ template<> inline cudaDataType getCudaDataType<double>() {
23
+ return CUDA_R_64F;
24
+ }
25
+ template<> inline cudaDataType getCudaDataType<c10::complex<c10::Half>>() {
26
+ return CUDA_C_16F;
27
+ }
28
+ template<> inline cudaDataType getCudaDataType<c10::complex<float>>() {
29
+ return CUDA_C_32F;
30
+ }
31
+ template<> inline cudaDataType getCudaDataType<c10::complex<double>>() {
32
+ return CUDA_C_64F;
33
+ }
34
+
35
+ template<> inline cudaDataType getCudaDataType<uint8_t>() {
36
+ return CUDA_R_8U;
37
+ }
38
+ template<> inline cudaDataType getCudaDataType<int8_t>() {
39
+ return CUDA_R_8I;
40
+ }
41
+ template<> inline cudaDataType getCudaDataType<int>() {
42
+ return CUDA_R_32I;
43
+ }
44
+
45
+ template<> inline cudaDataType getCudaDataType<int16_t>() {
46
+ return CUDA_R_16I;
47
+ }
48
+ template<> inline cudaDataType getCudaDataType<int64_t>() {
49
+ return CUDA_R_64I;
50
+ }
51
+ template<> inline cudaDataType getCudaDataType<at::BFloat16>() {
52
+ return CUDA_R_16BF;
53
+ }
54
+
55
+ inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) {
56
+ switch (scalar_type) {
57
+ case c10::ScalarType::Byte:
58
+ return CUDA_R_8U;
59
+ case c10::ScalarType::Char:
60
+ return CUDA_R_8I;
61
+ case c10::ScalarType::Int:
62
+ return CUDA_R_32I;
63
+ case c10::ScalarType::Half:
64
+ return CUDA_R_16F;
65
+ case c10::ScalarType::Float:
66
+ return CUDA_R_32F;
67
+ case c10::ScalarType::Double:
68
+ return CUDA_R_64F;
69
+ case c10::ScalarType::ComplexHalf:
70
+ return CUDA_C_16F;
71
+ case c10::ScalarType::ComplexFloat:
72
+ return CUDA_C_32F;
73
+ case c10::ScalarType::ComplexDouble:
74
+ return CUDA_C_64F;
75
+ case c10::ScalarType::Short:
76
+ return CUDA_R_16I;
77
+ case c10::ScalarType::Long:
78
+ return CUDA_R_64I;
79
+ case c10::ScalarType::BFloat16:
80
+ return CUDA_R_16BF;
81
+ #if defined(CUDA_VERSION) && CUDA_VERSION >= 11080
82
+ case c10::ScalarType::Float8_e4m3fn:
83
+ return CUDA_R_8F_E4M3;
84
+ case c10::ScalarType::Float8_e5m2:
85
+ return CUDA_R_8F_E5M2;
86
+ #endif
87
+ #if defined(USE_ROCM)
88
+ #if defined(HIP_NEW_TYPE_ENUMS)
89
+ case c10::ScalarType::Float8_e4m3fnuz:
90
+ return HIP_R_8F_E4M3_FNUZ;
91
+ case c10::ScalarType::Float8_e5m2fnuz:
92
+ return HIP_R_8F_E5M2_FNUZ;
93
+ #else
94
+ case c10::ScalarType::Float8_e4m3fnuz:
95
+ return static_cast<hipDataType>(1000);
96
+ case c10::ScalarType::Float8_e5m2fnuz:
97
+ return static_cast<hipDataType>(1001);
98
+ #endif
99
+ #endif
100
+ default:
101
+ TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.")
102
+ }
103
+ }
104
+
105
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDADevice.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/Exceptions.h>
4
+
5
+ #include <cuda.h>
6
+ #include <cuda_runtime.h>
7
+
8
+ namespace at::cuda {
9
+
10
+ inline Device getDeviceFromPtr(void* ptr) {
11
+ cudaPointerAttributes attr{};
12
+
13
+ AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr));
14
+
15
+ #if !defined(USE_ROCM)
16
+ TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered,
17
+ "The specified pointer resides on host memory and is not registered with any CUDA device.");
18
+ #endif
19
+
20
+ return {c10::DeviceType::CUDA, static_cast<DeviceIndex>(attr.device)};
21
+ }
22
+
23
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDAGraph.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <c10/core/Device.h>
5
+ #include <c10/cuda/CUDAGraphsC10Utils.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include <c10/util/flat_hash_map.h>
8
+
9
+ namespace at {
10
+
11
+ struct Generator;
12
+ struct CUDAGeneratorImpl;
13
+ struct CUDAGeneratorState;
14
+
15
+ namespace cuda {
16
+
17
+ // Standalone way to get a unique mempool id usable as a pool=... argument
18
+ // to CUDAGraph::capture_begin
19
+ TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle();
20
+
21
+ struct TORCH_CUDA_CPP_API CUDAGraph {
22
+ CUDAGraph();
23
+ ~CUDAGraph();
24
+
25
+ static void inc_pending_event_queries();
26
+ static void dec_pending_event_queries();
27
+ static int num_pending_event_queries();
28
+ // See Note [Explicit Registration of Generators to the CUDA Graph]
29
+ void register_generator_state(c10::intrusive_ptr<at::CUDAGeneratorState> state);
30
+ void register_generator_state(const at::Generator& generator);
31
+ void capture_begin(
32
+ MempoolId_t pool = {0, 0},
33
+ cudaStreamCaptureMode capture_mode = cudaStreamCaptureModeGlobal);
34
+ void capture_end();
35
+ void replay();
36
+ void reset();
37
+ MempoolId_t pool();
38
+ void enable_debug_mode();
39
+ void debug_dump(const std::string& debug_path);
40
+
41
+ protected:
42
+ cudaGraph_t graph_ = nullptr;
43
+ cudaGraphExec_t graph_exec_ = nullptr;
44
+
45
+ static std::atomic<int> pending_event_queries;
46
+
47
+ // internal states so reset() can do its best cleaning up
48
+ // Set to true in capture_end if cudaStreamEndCapture succeeded
49
+ // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate
50
+ // to create graph_exec_, then graph_ is deleted
51
+ bool has_graph_ = false;
52
+ // Set to true in capture_end if cudaGraphInstantiate succeeded
53
+ bool has_graph_exec_ = false;
54
+
55
+ // uuid of this instance's current capture, used to
56
+ // specify the pool.
57
+ CaptureId_t id_;
58
+
59
+ // the ID assigned by cuda during graph capture,
60
+ // used to identify when a stream is participating in capture
61
+ CaptureId_t capture_id_ = -1;
62
+
63
+ // uuid used to request a particular private mempool from CUDACachingAllocator.
64
+ // By default, this will be set to {id_, 0}.
65
+ //
66
+ // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_
67
+ // will be set to the other graph's mempool_id_, and therefore share a mempool with the
68
+ // other graph.
69
+ //
70
+ // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(),
71
+ // it will share a mempool with any other captures that used "pool=handle".
72
+ //
73
+ // Sharing a mempool across graphs saves memory, and it's safe if you
74
+ // know you'll replay those graphs in the same order you captured them.
75
+ MempoolId_t mempool_id_;
76
+
77
+ // Stream on which capture began
78
+ at::cuda::CUDAStream capture_stream_;
79
+
80
+ // multiple generator states and their wholegraph_increments in this graph
81
+ // that are managed by the CUDA Graph
82
+ ska::flat_hash_map<c10::intrusive_ptr<at::CUDAGeneratorState>, uint64_t>
83
+ captured_generator_states_;
84
+
85
+ // Device where capture occurred. Right now, for simplicity, we require all ops
86
+ // in a capture to run on the same device, but this is a limitation of CUDAGraph,
87
+ // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device
88
+ // captures if needed.
89
+ int capture_dev_;
90
+ };
91
+
92
+ } // namespace cuda
93
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparse.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #if defined(USE_ROCM)
5
+ #include <hipsparse/hipsparse-version.h>
6
+ #define HIPSPARSE_VERSION ((hipsparseVersionMajor*100000) + (hipsparseVersionMinor*100) + hipsparseVersionPatch)
7
+ #endif
8
+
9
+ // cuSparse Generic API added in CUDA 10.1
10
+ // Windows support added in CUDA 11.0
11
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && ((CUSPARSE_VERSION >= 10300) || (CUSPARSE_VERSION >= 11000 && defined(_WIN32)))
12
+ #define AT_USE_CUSPARSE_GENERIC_API() 1
13
+ #else
14
+ #define AT_USE_CUSPARSE_GENERIC_API() 0
15
+ #endif
16
+
17
+ // cuSparse Generic API descriptor pointers were changed to const in CUDA 12.0
18
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
19
+ (CUSPARSE_VERSION < 12000)
20
+ #define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 1
21
+ #else
22
+ #define AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS() 0
23
+ #endif
24
+
25
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && \
26
+ (CUSPARSE_VERSION >= 12000)
27
+ #define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 1
28
+ #else
29
+ #define AT_USE_CUSPARSE_CONST_DESCRIPTORS() 0
30
+ #endif
31
+
32
+ #if defined(USE_ROCM)
33
+ // hipSparse const API added in v2.4.0
34
+ #if HIPSPARSE_VERSION >= 200400
35
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 1
36
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0
37
+ #define AT_USE_HIPSPARSE_GENERIC_API() 1
38
+ #else
39
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
40
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 1
41
+ #define AT_USE_HIPSPARSE_GENERIC_API() 1
42
+ #endif
43
+ #else // USE_ROCM
44
+ #define AT_USE_HIPSPARSE_CONST_DESCRIPTORS() 0
45
+ #define AT_USE_HIPSPARSE_NON_CONST_DESCRIPTORS() 0
46
+ #define AT_USE_HIPSPARSE_GENERIC_API() 0
47
+ #endif // USE_ROCM
48
+
49
+ // cuSparse Generic API spsv function was added in CUDA 11.3.0
50
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11500)
51
+ #define AT_USE_CUSPARSE_GENERIC_SPSV() 1
52
+ #else
53
+ #define AT_USE_CUSPARSE_GENERIC_SPSV() 0
54
+ #endif
55
+
56
+ // cuSparse Generic API spsm function was added in CUDA 11.3.1
57
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11600)
58
+ #define AT_USE_CUSPARSE_GENERIC_SPSM() 1
59
+ #else
60
+ #define AT_USE_CUSPARSE_GENERIC_SPSM() 0
61
+ #endif
62
+
63
+ // cuSparse Generic API sddmm function was added in CUDA 11.2.1 (cuSparse version 11400)
64
+ #if defined(CUDART_VERSION) && defined(CUSPARSE_VERSION) && (CUSPARSE_VERSION >= 11400)
65
+ #define AT_USE_CUSPARSE_GENERIC_SDDMM() 1
66
+ #else
67
+ #define AT_USE_CUSPARSE_GENERIC_SDDMM() 0
68
+ #endif
69
+
70
+ // BSR triangular solve functions were added in hipSPARSE 1.11.2 (ROCm 4.5.0)
71
+ #if defined(CUDART_VERSION) || defined(USE_ROCM)
72
+ #define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 1
73
+ #else
74
+ #define AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() 0
75
+ #endif
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CUDASparseBlas.h ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /*
4
+ Provides a subset of cuSPARSE functions as templates:
5
+
6
+ csrgeam2<scalar_t>(...)
7
+
8
+ where scalar_t is double, float, c10::complex<double> or c10::complex<float>.
9
+ The functions are available in at::cuda::sparse namespace.
10
+ */
11
+
12
+ #include <ATen/cuda/CUDAContext.h>
13
+ #include <ATen/cuda/CUDASparse.h>
14
+
15
+ namespace at::cuda::sparse {
16
+
17
+ #define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \
18
+ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
19
+ const cusparseMatDescr_t descrA, int nnzA, \
20
+ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
21
+ const int *csrSortedColIndA, const scalar_t *beta, \
22
+ const cusparseMatDescr_t descrB, int nnzB, \
23
+ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
24
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
25
+ const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \
26
+ const int *csrSortedColIndC, size_t *pBufferSizeInBytes
27
+
28
+ template <typename scalar_t>
29
+ inline void csrgeam2_bufferSizeExt(
30
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) {
31
+ TORCH_INTERNAL_ASSERT(
32
+ false,
33
+ "at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ",
34
+ typeid(scalar_t).name());
35
+ }
36
+
37
+ template <>
38
+ void csrgeam2_bufferSizeExt<float>(
39
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float));
40
+ template <>
41
+ void csrgeam2_bufferSizeExt<double>(
42
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double));
43
+ template <>
44
+ void csrgeam2_bufferSizeExt<c10::complex<float>>(
45
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<float>));
46
+ template <>
47
+ void csrgeam2_bufferSizeExt<c10::complex<double>>(
48
+ CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<double>));
49
+
50
+ #define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \
51
+ cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \
52
+ int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \
53
+ const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \
54
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
55
+ int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace
56
+
57
+ template <typename scalar_t>
58
+ inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) {
59
+ TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz(
60
+ handle,
61
+ m,
62
+ n,
63
+ descrA,
64
+ nnzA,
65
+ csrSortedRowPtrA,
66
+ csrSortedColIndA,
67
+ descrB,
68
+ nnzB,
69
+ csrSortedRowPtrB,
70
+ csrSortedColIndB,
71
+ descrC,
72
+ csrSortedRowPtrC,
73
+ nnzTotalDevHostPtr,
74
+ workspace));
75
+ }
76
+
77
+ #define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \
78
+ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \
79
+ const cusparseMatDescr_t descrA, int nnzA, \
80
+ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \
81
+ const int *csrSortedColIndA, const scalar_t *beta, \
82
+ const cusparseMatDescr_t descrB, int nnzB, \
83
+ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \
84
+ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \
85
+ scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \
86
+ void *pBuffer
87
+
88
+ template <typename scalar_t>
89
+ inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) {
90
+ TORCH_INTERNAL_ASSERT(
91
+ false,
92
+ "at::cuda::sparse::csrgeam2: not implemented for ",
93
+ typeid(scalar_t).name());
94
+ }
95
+
96
+ template <>
97
+ void csrgeam2<float>(CUSPARSE_CSRGEAM2_ARGTYPES(float));
98
+ template <>
99
+ void csrgeam2<double>(CUSPARSE_CSRGEAM2_ARGTYPES(double));
100
+ template <>
101
+ void csrgeam2<c10::complex<float>>(
102
+ CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<float>));
103
+ template <>
104
+ void csrgeam2<c10::complex<double>>(
105
+ CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<double>));
106
+
107
+ #define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \
108
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
109
+ cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \
110
+ int kb, int nnzb, const scalar_t *alpha, \
111
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
112
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
113
+ const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc
114
+
115
+ template <typename scalar_t>
116
+ inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) {
117
+ TORCH_INTERNAL_ASSERT(
118
+ false,
119
+ "at::cuda::sparse::bsrmm: not implemented for ",
120
+ typeid(scalar_t).name());
121
+ }
122
+
123
+ template <>
124
+ void bsrmm<float>(CUSPARSE_BSRMM_ARGTYPES(float));
125
+ template <>
126
+ void bsrmm<double>(CUSPARSE_BSRMM_ARGTYPES(double));
127
+ template <>
128
+ void bsrmm<c10::complex<float>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<float>));
129
+ template <>
130
+ void bsrmm<c10::complex<double>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<double>));
131
+
132
+ #define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \
133
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
134
+ cusparseOperation_t transA, int mb, int nb, int nnzb, \
135
+ const scalar_t *alpha, const cusparseMatDescr_t descrA, \
136
+ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
137
+ int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y
138
+
139
+ template <typename scalar_t>
140
+ inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) {
141
+ TORCH_INTERNAL_ASSERT(
142
+ false,
143
+ "at::cuda::sparse::bsrmv: not implemented for ",
144
+ typeid(scalar_t).name());
145
+ }
146
+
147
+ template <>
148
+ void bsrmv<float>(CUSPARSE_BSRMV_ARGTYPES(float));
149
+ template <>
150
+ void bsrmv<double>(CUSPARSE_BSRMV_ARGTYPES(double));
151
+ template <>
152
+ void bsrmv<c10::complex<float>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<float>));
153
+ template <>
154
+ void bsrmv<c10::complex<double>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<double>));
155
+
156
+ #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE()
157
+
158
+ #define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \
159
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
160
+ cusparseOperation_t transA, int mb, int nnzb, \
161
+ const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
162
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
163
+ bsrsv2Info_t info, int *pBufferSizeInBytes
164
+
165
+ template <typename scalar_t>
166
+ inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) {
167
+ TORCH_INTERNAL_ASSERT(
168
+ false,
169
+ "at::cuda::sparse::bsrsv2_bufferSize: not implemented for ",
170
+ typeid(scalar_t).name());
171
+ }
172
+
173
+ template <>
174
+ void bsrsv2_bufferSize<float>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float));
175
+ template <>
176
+ void bsrsv2_bufferSize<double>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double));
177
+ template <>
178
+ void bsrsv2_bufferSize<c10::complex<float>>(
179
+ CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<float>));
180
+ template <>
181
+ void bsrsv2_bufferSize<c10::complex<double>>(
182
+ CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<double>));
183
+
184
+ #define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \
185
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
186
+ cusparseOperation_t transA, int mb, int nnzb, \
187
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
188
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
189
+ bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
190
+
191
+ template <typename scalar_t>
192
+ inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) {
193
+ TORCH_INTERNAL_ASSERT(
194
+ false,
195
+ "at::cuda::sparse::bsrsv2_analysis: not implemented for ",
196
+ typeid(scalar_t).name());
197
+ }
198
+
199
+ template <>
200
+ void bsrsv2_analysis<float>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float));
201
+ template <>
202
+ void bsrsv2_analysis<double>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double));
203
+ template <>
204
+ void bsrsv2_analysis<c10::complex<float>>(
205
+ CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<float>));
206
+ template <>
207
+ void bsrsv2_analysis<c10::complex<double>>(
208
+ CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<double>));
209
+
210
+ #define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \
211
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
212
+ cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \
213
+ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
214
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
215
+ bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \
216
+ cusparseSolvePolicy_t policy, void *pBuffer
217
+
218
+ template <typename scalar_t>
219
+ inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) {
220
+ TORCH_INTERNAL_ASSERT(
221
+ false,
222
+ "at::cuda::sparse::bsrsv2_solve: not implemented for ",
223
+ typeid(scalar_t).name());
224
+ }
225
+
226
+ template <>
227
+ void bsrsv2_solve<float>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float));
228
+ template <>
229
+ void bsrsv2_solve<double>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double));
230
+ template <>
231
+ void bsrsv2_solve<c10::complex<float>>(
232
+ CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<float>));
233
+ template <>
234
+ void bsrsv2_solve<c10::complex<double>>(
235
+ CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<double>));
236
+
237
+ #define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \
238
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
239
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
240
+ int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \
241
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
242
+ bsrsm2Info_t info, int *pBufferSizeInBytes
243
+
244
+ template <typename scalar_t>
245
+ inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) {
246
+ TORCH_INTERNAL_ASSERT(
247
+ false,
248
+ "at::cuda::sparse::bsrsm2_bufferSize: not implemented for ",
249
+ typeid(scalar_t).name());
250
+ }
251
+
252
+ template <>
253
+ void bsrsm2_bufferSize<float>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float));
254
+ template <>
255
+ void bsrsm2_bufferSize<double>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double));
256
+ template <>
257
+ void bsrsm2_bufferSize<c10::complex<float>>(
258
+ CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<float>));
259
+ template <>
260
+ void bsrsm2_bufferSize<c10::complex<double>>(
261
+ CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<double>));
262
+
263
+ #define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \
264
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
265
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
266
+ int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \
267
+ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \
268
+ bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer
269
+
270
+ template <typename scalar_t>
271
+ inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) {
272
+ TORCH_INTERNAL_ASSERT(
273
+ false,
274
+ "at::cuda::sparse::bsrsm2_analysis: not implemented for ",
275
+ typeid(scalar_t).name());
276
+ }
277
+
278
+ template <>
279
+ void bsrsm2_analysis<float>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float));
280
+ template <>
281
+ void bsrsm2_analysis<double>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double));
282
+ template <>
283
+ void bsrsm2_analysis<c10::complex<float>>(
284
+ CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<float>));
285
+ template <>
286
+ void bsrsm2_analysis<c10::complex<double>>(
287
+ CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<double>));
288
+
289
+ #define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \
290
+ cusparseHandle_t handle, cusparseDirection_t dirA, \
291
+ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \
292
+ int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \
293
+ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \
294
+ int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \
295
+ scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer
296
+
297
+ template <typename scalar_t>
298
+ inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) {
299
+ TORCH_INTERNAL_ASSERT(
300
+ false,
301
+ "at::cuda::sparse::bsrsm2_solve: not implemented for ",
302
+ typeid(scalar_t).name());
303
+ }
304
+
305
+ template <>
306
+ void bsrsm2_solve<float>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float));
307
+ template <>
308
+ void bsrsm2_solve<double>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double));
309
+ template <>
310
+ void bsrsm2_solve<c10::complex<float>>(
311
+ CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<float>));
312
+ template <>
313
+ void bsrsm2_solve<c10::complex<double>>(
314
+ CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<double>));
315
+
316
+ #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE
317
+
318
+ } // namespace at::cuda::sparse
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/CachingHostAllocator.h ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/CachingHostAllocator.h>
4
+ #include <c10/core/Allocator.h>
5
+ #include <c10/cuda/CUDAStream.h>
6
+
7
+ namespace at::cuda {
8
+
9
+ //
10
+ // A caching allocator for CUDA host allocations (pinned memory).
11
+ //
12
+ // This provides a drop-in replacement for THCudaHostAllocator, which re-uses
13
+ // freed pinned (page-locked) memory allocations. This avoids device
14
+ // synchronizations due to cudaFreeHost calls.
15
+ //
16
+ // To ensure correct behavior, THCCachingHostAllocator_recordEvent must be
17
+ // called anytime a pointer from this allocator is used in a cudaMemcpyAsync
18
+ // call between host and device, and passed the corresponding context from the
19
+ // allocation. This is currently invoked by at::native::copy_kernel_cuda.
20
+ //
21
+ TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator();
22
+
23
+ // Records an event in the specified stream. The allocation corresponding to the
24
+ // input `ptr`/`ctx` will not be re-used until the event has occurred.
25
+ TORCH_CUDA_CPP_API bool CachingHostAllocator_recordEvent(
26
+ void* ptr,
27
+ void* ctx,
28
+ c10::cuda::CUDAStream stream);
29
+
30
+ // Releases cached pinned memory allocations via cudaHostFree
31
+ TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache();
32
+
33
+ inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) {
34
+ return getCachingHostAllocator()->allocate(size);
35
+ }
36
+
37
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/Exceptions.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cublas_v2.h>
4
+ #include <cusparse.h>
5
+ #include <c10/macros/Export.h>
6
+
7
+ #ifdef CUDART_VERSION
8
+ #include <cusolver_common.h>
9
+ #endif
10
+
11
+ #include <ATen/Context.h>
12
+ #include <c10/util/Exception.h>
13
+ #include <c10/cuda/CUDAException.h>
14
+
15
+
16
+ namespace c10 {
17
+
18
+ class CuDNNError : public c10::Error {
19
+ using Error::Error;
20
+ };
21
+
22
+ } // namespace c10
23
+
24
+ #define AT_CUDNN_FRONTEND_CHECK(EXPR, ...) \
25
+ do { \
26
+ auto error_object = EXPR; \
27
+ if (!error_object.is_good()) { \
28
+ TORCH_CHECK_WITH(CuDNNError, false, \
29
+ "cuDNN Frontend error: ", error_object.get_message()); \
30
+ } \
31
+ } while (0) \
32
+
33
+ #define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__)
34
+
35
+ // See Note [CHECK macro]
36
+ #define AT_CUDNN_CHECK(EXPR, ...) \
37
+ do { \
38
+ cudnnStatus_t status = EXPR; \
39
+ if (status != CUDNN_STATUS_SUCCESS) { \
40
+ if (status == CUDNN_STATUS_NOT_SUPPORTED) { \
41
+ TORCH_CHECK_WITH(CuDNNError, false, \
42
+ "cuDNN error: ", \
43
+ cudnnGetErrorString(status), \
44
+ ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); \
45
+ } else { \
46
+ TORCH_CHECK_WITH(CuDNNError, false, \
47
+ "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); \
48
+ } \
49
+ } \
50
+ } while (0)
51
+
52
+ namespace at::cuda::blas {
53
+ C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error);
54
+ } // namespace at::cuda::blas
55
+
56
+ #define TORCH_CUDABLAS_CHECK(EXPR) \
57
+ do { \
58
+ cublasStatus_t __err = EXPR; \
59
+ TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \
60
+ "CUDA error: ", \
61
+ at::cuda::blas::_cublasGetErrorEnum(__err), \
62
+ " when calling `" #EXPR "`"); \
63
+ } while (0)
64
+
65
+ const char *cusparseGetErrorString(cusparseStatus_t status);
66
+
67
+ #define TORCH_CUDASPARSE_CHECK(EXPR) \
68
+ do { \
69
+ cusparseStatus_t __err = EXPR; \
70
+ TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, \
71
+ "CUDA error: ", \
72
+ cusparseGetErrorString(__err), \
73
+ " when calling `" #EXPR "`"); \
74
+ } while (0)
75
+
76
+ // cusolver related headers are only supported on cuda now
77
+ #ifdef CUDART_VERSION
78
+
79
+ namespace at::cuda::solver {
80
+ C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status);
81
+
82
+ constexpr const char* _cusolver_backend_suggestion = \
83
+ "If you keep seeing this error, you may use " \
84
+ "`torch.backends.cuda.preferred_linalg_library()` to try " \
85
+ "linear algebra operators with other supported backends. " \
86
+ "See https://pytorch.org/docs/stable/backends.html#torch.backends.cuda.preferred_linalg_library";
87
+
88
+ } // namespace at::cuda::solver
89
+
90
+ // When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan.
91
+ // When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue.
92
+ #define TORCH_CUSOLVER_CHECK(EXPR) \
93
+ do { \
94
+ cusolverStatus_t __err = EXPR; \
95
+ if ((CUDA_VERSION < 11500 && \
96
+ __err == CUSOLVER_STATUS_EXECUTION_FAILED) || \
97
+ (CUDA_VERSION >= 11500 && \
98
+ __err == CUSOLVER_STATUS_INVALID_VALUE)) { \
99
+ TORCH_CHECK_LINALG( \
100
+ false, \
101
+ "cusolver error: ", \
102
+ at::cuda::solver::cusolverGetErrorMessage(__err), \
103
+ ", when calling `" #EXPR "`", \
104
+ ". This error may appear if the input matrix contains NaN. ", \
105
+ at::cuda::solver::_cusolver_backend_suggestion); \
106
+ } else { \
107
+ TORCH_CHECK( \
108
+ __err == CUSOLVER_STATUS_SUCCESS, \
109
+ "cusolver error: ", \
110
+ at::cuda::solver::cusolverGetErrorMessage(__err), \
111
+ ", when calling `" #EXPR "`. ", \
112
+ at::cuda::solver::_cusolver_backend_suggestion); \
113
+ } \
114
+ } while (0)
115
+
116
+ #else
117
+ #define TORCH_CUSOLVER_CHECK(EXPR) EXPR
118
+ #endif
119
+
120
+ #define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR)
121
+
122
+ // For CUDA Driver API
123
+ //
124
+ // This is here instead of in c10 because NVRTC is loaded dynamically via a stub
125
+ // in ATen, and we need to use its nvrtcGetErrorString.
126
+ // See NOTE [ USE OF NVRTC AND DRIVER API ].
127
+ #if !defined(USE_ROCM)
128
+
129
+ #define AT_CUDA_DRIVER_CHECK(EXPR) \
130
+ do { \
131
+ CUresult __err = EXPR; \
132
+ if (__err != CUDA_SUCCESS) { \
133
+ const char* err_str; \
134
+ CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \
135
+ if (get_error_str_err != CUDA_SUCCESS) { \
136
+ AT_ERROR("CUDA driver error: unknown error"); \
137
+ } else { \
138
+ AT_ERROR("CUDA driver error: ", err_str); \
139
+ } \
140
+ } \
141
+ } while (0)
142
+
143
+ #else
144
+
145
+ #define AT_CUDA_DRIVER_CHECK(EXPR) \
146
+ do { \
147
+ CUresult __err = EXPR; \
148
+ if (__err != CUDA_SUCCESS) { \
149
+ AT_ERROR("CUDA driver error: ", static_cast<int>(__err)); \
150
+ } \
151
+ } while (0)
152
+
153
+ #endif
154
+
155
+ // For CUDA NVRTC
156
+ //
157
+ // Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE,
158
+ // incorrectly produces the error string "NVRTC unknown error."
159
+ // The following maps it correctly.
160
+ //
161
+ // This is here instead of in c10 because NVRTC is loaded dynamically via a stub
162
+ // in ATen, and we need to use its nvrtcGetErrorString.
163
+ // See NOTE [ USE OF NVRTC AND DRIVER API ].
164
+ #define AT_CUDA_NVRTC_CHECK(EXPR) \
165
+ do { \
166
+ nvrtcResult __err = EXPR; \
167
+ if (__err != NVRTC_SUCCESS) { \
168
+ if (static_cast<int>(__err) != 7) { \
169
+ AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \
170
+ } else { \
171
+ AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \
172
+ } \
173
+ } \
174
+ } while (0)
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/PeerToPeerAccess.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/macros/Macros.h>
2
+ #include <cstdint>
3
+
4
+ namespace at::cuda {
5
+ namespace detail {
6
+ void init_p2p_access_cache(int64_t num_devices);
7
+ }
8
+
9
+ TORCH_CUDA_CPP_API bool get_p2p_access(int source_dev, int dest_dev);
10
+
11
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxCudaState.h ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+
5
+ #include <ATen/cuda/detail/PhiloxCudaStateRaw.cuh>
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/PhiloxUtils.cuh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cuda/PhiloxCudaState.h>
4
+ #include <ATen/cuda/detail/UnpackRaw.cuh>
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/PinnedMemoryAllocator.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <ATen/cuda/CachingHostAllocator.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ inline TORCH_CUDA_CPP_API at::Allocator* getPinnedMemoryAllocator() {
9
+ return getCachingHostAllocator();
10
+ }
11
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/ScanUtils.cuh ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/ceil_div.h>
4
+ #include <ATen/cuda/DeviceUtils.cuh>
5
+ #include <ATen/cuda/AsmUtils.cuh>
6
+ #include <c10/macros/Macros.h>
7
+
8
+ // Collection of in-kernel scan / prefix sum utilities
9
+
10
+ namespace at::cuda {
11
+
12
+ // Inclusive prefix sum for binary vars using intra-warp voting +
13
+ // shared memory
14
+ template <typename T, bool KillWARDependency, class BinaryFunction>
15
+ __device__ void inclusiveBinaryPrefixScan(T* smem, bool in, T* out, BinaryFunction binop) {
16
+ // Within-warp, we use warp voting.
17
+ #if defined (USE_ROCM)
18
+ unsigned long long int vote = WARP_BALLOT(in);
19
+ T index = __popcll(getLaneMaskLe() & vote);
20
+ T carry = __popcll(vote);
21
+ #else
22
+ T vote = WARP_BALLOT(in);
23
+ T index = __popc(getLaneMaskLe() & vote);
24
+ T carry = __popc(vote);
25
+ #endif
26
+
27
+ int warp = threadIdx.x / C10_WARP_SIZE;
28
+
29
+ // Per each warp, write out a value
30
+ if (getLaneId() == 0) {
31
+ smem[warp] = carry;
32
+ }
33
+
34
+ __syncthreads();
35
+
36
+ // Sum across warps in one thread. This appears to be faster than a
37
+ // warp shuffle scan for CC 3.0+
38
+ if (threadIdx.x == 0) {
39
+ int current = 0;
40
+ for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
41
+ T v = smem[i];
42
+ smem[i] = binop(smem[i], current);
43
+ current = binop(current, v);
44
+ }
45
+ }
46
+
47
+ __syncthreads();
48
+
49
+ // load the carry from the preceding warp
50
+ if (warp >= 1) {
51
+ index = binop(index, smem[warp - 1]);
52
+ }
53
+
54
+ *out = index;
55
+
56
+ if (KillWARDependency) {
57
+ __syncthreads();
58
+ }
59
+ }
60
+
61
+ // Exclusive prefix sum for binary vars using intra-warp voting +
62
+ // shared memory
63
+ template <typename T, bool KillWARDependency, class BinaryFunction>
64
+ __device__ void exclusiveBinaryPrefixScan(T* smem, bool in, T* out, T* carry, BinaryFunction binop) {
65
+ inclusiveBinaryPrefixScan<T, false, BinaryFunction>(smem, in, out, binop);
66
+
67
+ // Inclusive to exclusive
68
+ *out -= (T) in;
69
+
70
+ // The outgoing carry for all threads is the last warp's sum
71
+ *carry = smem[at::ceil_div<int>(blockDim.x, C10_WARP_SIZE) - 1];
72
+
73
+ if (KillWARDependency) {
74
+ __syncthreads();
75
+ }
76
+ }
77
+
78
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/cub.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+ #include <c10/core/ScalarType.h>
4
+ #include <ATen/cuda/CUDAConfig.h>
5
+
6
+ // NOTE: These templates are intentionally not defined in this header,
7
+ // which aviods re-compiling them for each translation unit. If you get
8
+ // a link error, you need to add an explicit instantiation for your
9
+ // types in cub.cu
10
+
11
+ namespace at::cuda::cub {
12
+
13
+ inline int get_num_bits(uint64_t max_key) {
14
+ int num_bits = 1;
15
+ while (max_key > 1) {
16
+ max_key >>= 1;
17
+ num_bits++;
18
+ }
19
+ return num_bits;
20
+ }
21
+
22
+ namespace detail {
23
+
24
+ // radix_sort_pairs doesn't interact with value_t other than to copy
25
+ // the data, so we can save template instantiations by reinterpreting
26
+ // it as an opaque type.
27
+ template <int N> struct alignas(N) OpaqueType { char data[N]; };
28
+
29
+ template<typename key_t, int value_size>
30
+ void radix_sort_pairs_impl(
31
+ const key_t *keys_in, key_t *keys_out,
32
+ const OpaqueType<value_size> *values_in, OpaqueType<value_size> *values_out,
33
+ int64_t n, bool descending, int64_t begin_bit, int64_t end_bit);
34
+
35
+ } // namespace detail
36
+
37
+ template<typename key_t, typename value_t>
38
+ void radix_sort_pairs(
39
+ const key_t *keys_in, key_t *keys_out,
40
+ const value_t *values_in, value_t *values_out,
41
+ int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8) {
42
+ static_assert(std::is_trivially_copyable_v<value_t> ||
43
+ AT_ROCM_ENABLED(), // ROCm incorrectly fails this check for vector types
44
+ "radix_sort_pairs value type must be trivially copyable");
45
+ // Make value type opaque, so all inputs of a certain size use the same template instantiation
46
+ using opaque_t = detail::OpaqueType<sizeof(value_t)>;
47
+ static_assert(sizeof(value_t) <= 8 && (sizeof(value_t) & (sizeof(value_t) - 1)) == 0,
48
+ "This size of value_t is not instantiated. Please instantiate it in cub.cu"
49
+ " and modify this check.");
50
+ static_assert(sizeof(value_t) == alignof(value_t), "Expected value_t to be size-aligned");
51
+ detail::radix_sort_pairs_impl(
52
+ keys_in, keys_out,
53
+ reinterpret_cast<const opaque_t*>(values_in),
54
+ reinterpret_cast<opaque_t*>(values_out),
55
+ n, descending, begin_bit, end_bit);
56
+ }
57
+
58
+ template<typename key_t>
59
+ void radix_sort_keys(
60
+ const key_t *keys_in, key_t *keys_out,
61
+ int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8);
62
+
63
+ // NOTE: Intermediate sums will be truncated to input_t precision
64
+ template <typename input_t, typename output_t>
65
+ void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t n);
66
+
67
+ template <typename scalar_t>
68
+ void inclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) {
69
+ return inclusive_sum_truncating(input, output, n);
70
+ }
71
+
72
+ // NOTE: Sums are done is common_type<input_t, output_t>
73
+ template <typename input_t, typename output_t>
74
+ void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t n);
75
+
76
+ template <typename scalar_t>
77
+ void exclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) {
78
+ return exclusive_sum_in_common_type(input, output, n);
79
+ }
80
+
81
+ void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n);
82
+ inline void mask_exclusive_sum(const bool *mask, int64_t *output_idx, int64_t n) {
83
+ return mask_exclusive_sum(
84
+ reinterpret_cast<const uint8_t*>(mask), output_idx, n);
85
+ }
86
+
87
+ } // namespace at::cuda::cub
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/cub_definitions.cuh ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if !defined(USE_ROCM)
4
+ #include <cuda.h> // for CUDA_VERSION
5
+ #endif
6
+
7
+ #if !defined(USE_ROCM)
8
+ #include <cub/version.cuh>
9
+ #else
10
+ #define CUB_VERSION 0
11
+ #endif
12
+
13
+ // cub sort support for __nv_bfloat16 is added to cub 1.13 in:
14
+ // https://github.com/NVIDIA/cub/pull/306
15
+ #if CUB_VERSION >= 101300
16
+ #define CUB_SUPPORTS_NV_BFLOAT16() true
17
+ #else
18
+ #define CUB_SUPPORTS_NV_BFLOAT16() false
19
+ #endif
20
+
21
+ // cub support for CUB_WRAPPED_NAMESPACE is added to cub 1.13.1 in:
22
+ // https://github.com/NVIDIA/cub/pull/326
23
+ // CUB_WRAPPED_NAMESPACE is defined globally in cmake/Dependencies.cmake
24
+ // starting from CUDA 11.5
25
+ #if defined(CUB_WRAPPED_NAMESPACE) || defined(THRUST_CUB_WRAPPED_NAMESPACE)
26
+ #define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() true
27
+ #else
28
+ #define USE_GLOBAL_CUB_WRAPPED_NAMESPACE() false
29
+ #endif
30
+
31
+ // cub support for UniqueByKey is added to cub 1.16 in:
32
+ // https://github.com/NVIDIA/cub/pull/405
33
+ #if CUB_VERSION >= 101600
34
+ #define CUB_SUPPORTS_UNIQUE_BY_KEY() true
35
+ #else
36
+ #define CUB_SUPPORTS_UNIQUE_BY_KEY() false
37
+ #endif
38
+
39
+ // cub support for scan by key is added to cub 1.15
40
+ // in https://github.com/NVIDIA/cub/pull/376
41
+ #if CUB_VERSION >= 101500
42
+ #define CUB_SUPPORTS_SCAN_BY_KEY() 1
43
+ #else
44
+ #define CUB_SUPPORTS_SCAN_BY_KEY() 0
45
+ #endif
46
+
47
+ // cub support for cub::FutureValue is added to cub 1.15 in:
48
+ // https://github.com/NVIDIA/cub/pull/305
49
+ #if CUB_VERSION >= 101500
50
+ #define CUB_SUPPORTS_FUTURE_VALUE() true
51
+ #else
52
+ #define CUB_SUPPORTS_FUTURE_VALUE() false
53
+ #endif
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/LazyNVRTC.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/detail/CUDAHooksInterface.h>
3
+ namespace at::cuda {
4
+ // Forward-declares at::cuda::NVRTC
5
+ struct NVRTC;
6
+
7
+ namespace detail {
8
+ extern NVRTC lazyNVRTC;
9
+ } // namespace detail
10
+
11
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/PhiloxCudaStateRaw.cuh ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // No "#pragma once" because this is a raw definition that can be copied by jit codegen.
2
+ // Eager mode clients should not include this file directly, instead,
3
+ // they should #include <ATen/cuda/PhiloxCudaState.h>, which has a #pragma once.
4
+
5
+ // Stores RNG state values. Passed as a kernel argument.
6
+ // See Note [CUDA Graph-safe RNG states].
7
+ //
8
+ // The raw definition lives in its own file so jit codegen can easily copy it.
9
+ namespace at {
10
+
11
+ struct PhiloxCudaState {
12
+ PhiloxCudaState() = default;
13
+ // Called if graph capture is not underway
14
+ PhiloxCudaState(uint64_t seed,
15
+ uint64_t offset) {
16
+ seed_.val = seed;
17
+ offset_.val = offset;
18
+ }
19
+ // Called if graph capture is underway
20
+ PhiloxCudaState(int64_t* seed,
21
+ int64_t* offset_extragraph,
22
+ uint32_t offset_intragraph) {
23
+ seed_.ptr = seed;
24
+ offset_.ptr = offset_extragraph;
25
+ offset_intragraph_ = offset_intragraph;
26
+ captured_ = true;
27
+ }
28
+
29
+ // Public members, directly accessible by at::cuda::philox::unpack.
30
+ // If we made them private with getters/setters, the getters/setters
31
+ // would have to be __device__, and we can't declare __device__ in ATen.
32
+ union Payload {
33
+ uint64_t val;
34
+ int64_t* ptr;
35
+ };
36
+
37
+ Payload seed_{};
38
+ Payload offset_{};
39
+ uint32_t offset_intragraph_ = 0;
40
+ bool captured_ = false;
41
+ };
42
+
43
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/jiterator_impl.h ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/jit_macros.h>
3
+
4
+ #if AT_USE_JITERATOR()
5
+
6
+ #include <ATen/native/TensorIterator.h>
7
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
8
+ #include <ATen/native/cuda/jit_utils.h>
9
+ #include <ATen/native/cuda/MemoryAccess.cuh>
10
+ #include <ATen/native/cuda/JitLoops.cuh>
11
+
12
+ #include <string>
13
+ #include <variant>
14
+ #include <vector>
15
+
16
+ namespace at::native {
17
+
18
+
19
+ #define AT_FOR_8_CASES(_) \
20
+ _(1) \
21
+ _(2) \
22
+ _(3) \
23
+ _(4) \
24
+ _(5) \
25
+ _(6) \
26
+ _(7) \
27
+ _(8)
28
+
29
+ #define AT_FOR_8_CASES_WITH_COMMA(_) \
30
+ _(1) , \
31
+ _(2) , \
32
+ _(3) , \
33
+ _(4) , \
34
+ _(5) , \
35
+ _(6) , \
36
+ _(7) , \
37
+ _(8)
38
+
39
+ c10::SmallVector<std::string> get_extra_args_typenames(const c10::SmallVector<at::Scalar>& extra_args) {
40
+ c10::SmallVector<std::string> args_typenames(extra_args.size());
41
+ for (const auto i : c10::irange(extra_args.size())) {
42
+ args_typenames[i] = at::cuda::jit::typeName(extra_args[i].type());
43
+ }
44
+ return args_typenames;
45
+ }
46
+
47
+ int can_vectorize_up_to(at::ScalarType type, char* pointer) {
48
+ switch(type) {
49
+ #define DEFINE_CASE(ctype, scalartype) \
50
+ case ScalarType::scalartype : return memory::can_vectorize_up_to<ctype>(pointer);
51
+
52
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE)
53
+ #undef DEFINE_CASE
54
+
55
+ default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type);
56
+ }
57
+ }
58
+
59
+ // jitted version of the above
60
+ // See Note [Jiterator], this relies on the assumptions enumerated there
61
+ int jitted_can_vectorize_up_to(const TensorIteratorBase& iter) {
62
+ const at::ScalarType common_dtype = iter.common_dtype();
63
+ const at::ScalarType result_dtype = common_dtype;
64
+
65
+ // Deals with output
66
+ int result = can_vectorize_up_to(result_dtype, static_cast<char*>(iter.data_ptr(0)));
67
+
68
+ // Incorporates input(s)
69
+ for (auto i = 1; i < iter.ntensors(); ++i) {
70
+ result = std::min<int>(result, can_vectorize_up_to(common_dtype, static_cast<char*>(iter.data_ptr(i))));
71
+ }
72
+
73
+ return result;
74
+ }
75
+
76
+ template<bool IS_INPUT, int N>
77
+ static std::unique_ptr<OffsetCalculator<N>> make_unique_offset_calculator(
78
+ const TensorIteratorBase& iter) {
79
+ // array size can not be 0, this happens when N == 0
80
+ constexpr int array_size = std::max<int>(N, 1);
81
+ TORCH_INTERNAL_ASSERT(N == (IS_INPUT ? iter.ninputs() : iter.noutputs()));
82
+
83
+ std::array<const int64_t*, array_size> strides;
84
+ int64_t element_sizes[array_size];
85
+ for (int i = 0; i < N; i++) {
86
+ int index = IS_INPUT ? i + iter.noutputs() : i;
87
+ strides[i] = iter.strides(index).data();
88
+ element_sizes[i] = iter.element_size(index);
89
+ }
90
+ return std::make_unique<OffsetCalculator<N>>(iter.ndim(), iter.shape().data(), strides.data(), element_sizes);
91
+ }
92
+
93
+ template <bool IS_INPUT>
94
+ struct OffsetCalculatorVariant {
95
+ #define DEFINE_CASE(index) std::unique_ptr<OffsetCalculator<index>>
96
+ using OffsetCalculatorTypes = std::variant<
97
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
98
+ >;
99
+ #undef DEFINE_CASE
100
+
101
+ OffsetCalculatorVariant(const TensorIteratorBase& iter) {
102
+ int num = IS_INPUT ? iter.ninputs() : iter.noutputs();
103
+
104
+ switch(num) {
105
+ #define DEFINE_CASE(index) \
106
+ case index : v = make_unique_offset_calculator<IS_INPUT, index>(iter); break;
107
+
108
+ AT_FOR_8_CASES(DEFINE_CASE)
109
+ #undef DEFINE_CASE
110
+ default:
111
+ TORCH_CHECK(false, "OffsetCalculatorVariant is not implemented for num_tensor = ", num);
112
+ }
113
+ }
114
+
115
+ void* data_ptr() {
116
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
117
+ }
118
+
119
+ private:
120
+ OffsetCalculatorTypes v{};
121
+ };
122
+
123
+ struct ArrayVariant {
124
+ // works for up to 8 input + 8 outputs
125
+ #define DEFINE_CASE(index) at::detail::Array<char*, index>, at::detail::Array<char*, index+8>
126
+ using ArrayTypes = std::variant<
127
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
128
+ >;
129
+ #undef DEFINE_CASE
130
+
131
+ ArrayVariant(const TensorIteratorBase& iter) {
132
+ int ntensors = iter.ntensors();
133
+ switch(ntensors) {
134
+ #define DEFINE_CASE(index) \
135
+ case index: array = at::detail::Array<char*, index>{}; break; \
136
+ case index+8: array = at::detail::Array<char*, index+8>{}; break;
137
+
138
+ AT_FOR_8_CASES(DEFINE_CASE)
139
+ #undef DEFINE_CASE
140
+
141
+ default:
142
+ TORCH_CHECK(false, "ArrayVariant is not implemented for ntensors = ", ntensors);
143
+ }
144
+
145
+ std::visit([&](auto& a) {
146
+ for (auto i = 0; i < ntensors; ++i) {
147
+ a[i] = (char*)iter.data_ptr(i);
148
+ }
149
+ }, array);
150
+ }
151
+
152
+ void* data_ptr() {
153
+ return std::visit([](auto & a){ return static_cast<void*>(&a); }, array);
154
+ }
155
+
156
+ private:
157
+ ArrayTypes array;
158
+ };
159
+
160
+ struct TrivialOffsetCalculatorVariant {
161
+ #define DEFINE_CASE(index) TrivialOffsetCalculator<index>
162
+ using TrivialOffsetCalculatorTypes = std::variant<
163
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
164
+ >;
165
+ #undef DEFINE_CASE
166
+
167
+ TrivialOffsetCalculatorVariant(int num) {
168
+ switch(num) {
169
+ #define DEFINE_CASE(index) \
170
+ case index: v = TrivialOffsetCalculator<index>(); break;
171
+
172
+ AT_FOR_8_CASES(DEFINE_CASE)
173
+ #undef DEFINE_CASE
174
+
175
+ default:
176
+ TORCH_CHECK(false, "TrivialOffsetCalculatorVariant is not implemented for num_tensors = ", num);
177
+ }
178
+ }
179
+
180
+ void* data_ptr() {
181
+ return std::visit([](auto & v){ return static_cast<void*>(&v); }, v);
182
+ }
183
+
184
+ private:
185
+ TrivialOffsetCalculatorTypes v{};
186
+ };
187
+
188
+ struct LoadWithCastVariant {
189
+ #define DEFINE_CASE(index) std::unique_ptr<memory::LoadWithCast<index>>
190
+ using LoadWithCastPtr = std::variant<
191
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
192
+ >;
193
+ #undef DEFINE_CASE
194
+
195
+ LoadWithCastVariant(const TensorIteratorBase& iter) {
196
+ int arity = iter.ninputs();
197
+ switch(arity) {
198
+ #define DEFINE_CASE(index) \
199
+ case index: v = std::make_unique<memory::LoadWithCast<index>>(iter); break;
200
+
201
+ AT_FOR_8_CASES(DEFINE_CASE)
202
+ #undef DEFINE_CASE
203
+
204
+ default:
205
+ TORCH_CHECK(false, "LoadWithCastVariant is not implemented for ninputs = ", arity);
206
+ }
207
+ }
208
+
209
+ void* data_ptr() {
210
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
211
+ }
212
+
213
+ private:
214
+ LoadWithCastPtr v{};
215
+ };
216
+
217
+ struct StoreWithCastVariant {
218
+ #define DEFINE_CASE(index) std::unique_ptr<memory::StoreWithCast<index>>
219
+ using StoreWithCastPtr = std::variant<
220
+ AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE)
221
+ >;
222
+ #undef DEFINE_CASE
223
+
224
+ StoreWithCastVariant(const TensorIteratorBase& iter) {
225
+ int num = iter.noutputs();
226
+ switch(num) {
227
+ #define DEFINE_CASE(index) \
228
+ case index: v = std::make_unique<memory::StoreWithCast<index>>(iter); break;
229
+
230
+ AT_FOR_8_CASES(DEFINE_CASE)
231
+ #undef DEFINE_CASE
232
+
233
+ default:
234
+ TORCH_CHECK(false, "StoreWithCastVariant is not implemented for noutputs = ", num);
235
+ }
236
+ }
237
+
238
+ void* data_ptr() {
239
+ return std::visit([](auto & v){ return static_cast<void*>(v.get()); }, v);
240
+ }
241
+
242
+ private:
243
+ StoreWithCastPtr v{};
244
+ };
245
+
246
+ } // namespace at::native
247
+
248
+
249
+ #endif // AT_USE_JITERATOR()
parrot/lib/python3.10/site-packages/torch/include/ATen/cuda/llvm_jit_strings.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <c10/macros/Export.h>
5
+
6
+ namespace at::cuda {
7
+
8
+ TORCH_CUDA_CPP_API const std::string &get_traits_string();
9
+ TORCH_CUDA_CPP_API const std::string &get_cmath_string();
10
+ TORCH_CUDA_CPP_API const std::string &get_complex_body_string();
11
+ TORCH_CUDA_CPP_API const std::string &get_complex_half_body_string();
12
+ TORCH_CUDA_CPP_API const std::string &get_complex_math_string();
13
+
14
+ } // namespace at::cuda
parrot/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/miopen/miopen-wrapper.h>
4
+ #include <string>
5
+ #include <stdexcept>
6
+ #include <sstream>
7
+
8
+ namespace at { namespace native {
9
+
10
+ class miopen_exception : public std::runtime_error {
11
+ public:
12
+ miopenStatus_t status;
13
+ miopen_exception(miopenStatus_t status, const char* msg)
14
+ : std::runtime_error(msg)
15
+ , status(status) {}
16
+ miopen_exception(miopenStatus_t status, const std::string& msg)
17
+ : std::runtime_error(msg)
18
+ , status(status) {}
19
+ };
20
+
21
+ inline void MIOPEN_CHECK(miopenStatus_t status)
22
+ {
23
+ if (status != miopenStatusSuccess) {
24
+ if (status == miopenStatusNotImplemented) {
25
+ throw miopen_exception(status, std::string(miopenGetErrorString(status)) +
26
+ ". This error may appear if you passed in a non-contiguous input.");
27
+ }
28
+ throw miopen_exception(status, miopenGetErrorString(status));
29
+ }
30
+ }
31
+
32
+ inline void HIP_CHECK(hipError_t error)
33
+ {
34
+ if (error != hipSuccess) {
35
+ std::string msg("HIP error: ");
36
+ msg += hipGetErrorString(error);
37
+ throw std::runtime_error(msg);
38
+ }
39
+ }
40
+
41
+ }} // namespace at::native
videollama2/lib/python3.10/site-packages/latex2mathml/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from importlib import metadata
2
+
3
+ __version__ = metadata.version("latex2mathml")
videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (259 Bytes). View file
 
videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/commands.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/converter.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/symbols_parser.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/tokenizer.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
videollama2/lib/python3.10/site-packages/latex2mathml/__pycache__/walker.cpython-310.pyc ADDED
Binary file (9.58 kB). View file
 
videollama2/lib/python3.10/site-packages/latex2mathml/commands.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict, defaultdict
2
+ from typing import DefaultDict, Dict, Optional, Tuple
3
+
4
+ OPENING_BRACE = "{"
5
+ CLOSING_BRACE = "}"
6
+ BRACES = "{}"
7
+
8
+ OPENING_BRACKET = "["
9
+ CLOSING_BRACKET = "]"
10
+ BRACKETS = "[]"
11
+
12
+ OPENING_PARENTHESIS = "("
13
+ CLOSING_PARENTHESIS = ")"
14
+ PARENTHESES = "()"
15
+
16
+ SUBSUP = "_^"
17
+ SUBSCRIPT = "_"
18
+ SUPERSCRIPT = "^"
19
+ APOSTROPHE = "'"
20
+ PRIME = r"\prime"
21
+ DPRIME = r"\dprime"
22
+
23
+ LEFT = r"\left"
24
+ MIDDLE = r"\middle"
25
+ RIGHT = r"\right"
26
+
27
+ ABOVE = r"\above"
28
+ ABOVEWITHDELIMS = r"\abovewithdelims"
29
+ ATOP = r"\atop"
30
+ ATOPWITHDELIMS = r"\atopwithdelims"
31
+ BINOM = r"\binom"
32
+ BRACE = r"\brace"
33
+ BRACK = r"\brack"
34
+ CFRAC = r"\cfrac"
35
+ CHOOSE = r"\choose"
36
+ DBINOM = r"\dbinom"
37
+ DFRAC = r"\dfrac"
38
+ FRAC = r"\frac"
39
+ GENFRAC = r"\genfrac"
40
+ OVER = r"\over"
41
+ TBINOM = r"\tbinom"
42
+ TFRAC = r"\tfrac"
43
+
44
+ ROOT = r"\root"
45
+ SQRT = r"\sqrt"
46
+
47
+ OVERSET = r"\overset"
48
+ UNDERSET = r"\underset"
49
+
50
+ ACUTE = r"\acute"
51
+ BAR = r"\bar"
52
+ BREVE = r"\breve"
53
+ CHECK = r"\check"
54
+ DOT = r"\dot"
55
+ DDOT = r"\ddot"
56
+ DDDOT = r"\dddot"
57
+ DDDDOT = r"\ddddot"
58
+ GRAVE = r"\grave"
59
+ HAT = r"\hat"
60
+ MATHRING = r"\mathring"
61
+ OVERBRACE = r"\overbrace"
62
+ OVERLEFTARROW = r"\overleftarrow"
63
+ OVERLEFTRIGHTARROW = r"\overleftrightarrow"
64
+ OVERLINE = r"\overline"
65
+ OVERPAREN = r"\overparen"
66
+ OVERRIGHTARROW = r"\overrightarrow"
67
+ TILDE = r"\tilde"
68
+ UNDERBRACE = r"\underbrace"
69
+ UNDERLEFTARROW = r"\underleftarrow"
70
+ UNDERLINE = r"\underline"
71
+ UNDERPAREN = r"\underparen"
72
+ UNDERRIGHTARROW = r"\underrightarrow"
73
+ UNDERLEFTRIGHTARROW = r"\underleftrightarrow"
74
+ VEC = r"\vec"
75
+ WIDEHAT = r"\widehat"
76
+ WIDETILDE = r"\widetilde"
77
+ XLEFTARROW = r"\xleftarrow"
78
+ XRIGHTARROW = r"\xrightarrow"
79
+
80
+ HREF = r"\href"
81
+ TEXT = r"\text"
82
+ TEXTBF = r"\textbf"
83
+ TEXTIT = r"\textit"
84
+ TEXTRM = r"\textrm"
85
+ TEXTSF = r"\textsf"
86
+ TEXTTT = r"\texttt"
87
+
88
+ BEGIN = r"\begin"
89
+ END = r"\end"
90
+
91
+ LIMITS = r"\limits"
92
+ INTEGRAL = r"\int"
93
+ SUMMATION = r"\sum"
94
+ LIMIT = (r"\lim", r"\sup", r"\inf", r"\max", r"\min")
95
+
96
+ OPERATORNAME = r"\operatorname"
97
+
98
+ LBRACE = r"\{"
99
+
100
+ FUNCTIONS = (
101
+ r"\arccos",
102
+ r"\arcsin",
103
+ r"\arctan",
104
+ r"\cos",
105
+ r"\cosh",
106
+ r"\cot",
107
+ r"\coth",
108
+ r"\csc",
109
+ r"\deg",
110
+ r"\dim",
111
+ r"\exp",
112
+ r"\hom",
113
+ r"\ker",
114
+ r"\ln",
115
+ r"\lg",
116
+ r"\log",
117
+ r"\sec",
118
+ r"\sin",
119
+ r"\sinh",
120
+ r"\tan",
121
+ r"\tanh",
122
+ )
123
+ DETERMINANT = r"\det"
124
+ GCD = r"\gcd"
125
+ INTOP = r"\intop"
126
+ INJLIM = r"\injlim"
127
+ LIMINF = r"\liminf"
128
+ LIMSUP = r"\limsup"
129
+ PR = r"\Pr"
130
+ PROJLIM = r"\projlim"
131
+ MOD = r"\mod"
132
+ PMOD = r"\pmod"
133
+ BMOD = r"\bmod"
134
+
135
+ HDASHLINE = r"\hdashline"
136
+ HLINE = r"\hline"
137
+ HFIL = r"\hfil"
138
+
139
+ CASES = r"\cases"
140
+ DISPLAYLINES = r"\displaylines"
141
+ SMALLMATRIX = r"\smallmatrix"
142
+ SUBSTACK = r"\substack"
143
+ SPLIT = r"\split"
144
+ ALIGN = r"\align*"
145
+ MATRICES = (
146
+ r"\matrix",
147
+ r"\matrix*",
148
+ r"\pmatrix",
149
+ r"\pmatrix*",
150
+ r"\bmatrix",
151
+ r"\bmatrix*",
152
+ r"\Bmatrix",
153
+ r"\Bmatrix*",
154
+ r"\vmatrix",
155
+ r"\vmatrix*",
156
+ r"\Vmatrix",
157
+ r"\Vmatrix*",
158
+ r"\array",
159
+ SUBSTACK,
160
+ CASES,
161
+ DISPLAYLINES,
162
+ SMALLMATRIX,
163
+ SPLIT,
164
+ ALIGN,
165
+ )
166
+
167
+ BACKSLASH = "\\"
168
+ CARRIAGE_RETURN = r"\cr"
169
+
170
+ COLON = r"\:"
171
+ COMMA = r"\,"
172
+ DOUBLEBACKSLASH = r"\\"
173
+ ENSPACE = r"\enspace"
174
+ EXCLAMATION = r"\!"
175
+ GREATER_THAN = r"\>"
176
+ HSKIP = r"\hskip"
177
+ HSPACE = r"\hspace"
178
+ KERN = r"\kern"
179
+ MKERN = r"\mkern"
180
+ MSKIP = r"\mskip"
181
+ MSPACE = r"\mspace"
182
+ NEGTHINSPACE = r"\negthinspace"
183
+ NEGMEDSPACE = r"\negmedspace"
184
+ NEGTHICKSPACE = r"\negthickspace"
185
+ NOBREAKSPACE = r"\nobreakspace"
186
+ SPACE = r"\space"
187
+ THINSPACE = r"\thinspace"
188
+ QQUAD = r"\qquad"
189
+ QUAD = r"\quad"
190
+ SEMICOLON = r"\;"
191
+
192
+ BLACKBOARD_BOLD = r"\Bbb"
193
+ BOLD_SYMBOL = r"\boldsymbol"
194
+ MIT = r"\mit"
195
+ OLDSTYLE = r"\oldstyle"
196
+ SCR = r"\scr"
197
+ TT = r"\tt"
198
+
199
+ MATH = r"\math"
200
+ MATHBB = r"\mathbb"
201
+ MATHBF = r"\mathbf"
202
+ MATHCAL = r"\mathcal"
203
+ MATHFRAK = r"\mathfrak"
204
+ MATHIT = r"\mathit"
205
+ MATHRM = r"\mathrm"
206
+ MATHSCR = r"\mathscr"
207
+ MATHSF = r"\mathsf"
208
+ MATHTT = r"\mathtt"
209
+
210
+ BOXED = r"\boxed"
211
+ FBOX = r"\fbox"
212
+ HBOX = r"\hbox"
213
+ MBOX = r"\mbox"
214
+
215
+ COLOR = r"\color"
216
+ DISPLAYSTYLE = r"\displaystyle"
217
+ TEXTSTYLE = r"\textstyle"
218
+ SCRIPTSTYLE = r"\scriptstyle"
219
+ SCRIPTSCRIPTSTYLE = r"\scriptscriptstyle"
220
+ STYLE = r"\style"
221
+
222
+ HPHANTOM = r"\hphantom"
223
+ PHANTOM = r"\phantom"
224
+ VPHANTOM = r"\vphantom"
225
+
226
+ IDOTSINT = r"\idotsint"
227
+ LATEX = r"\LaTeX"
228
+ TEX = r"\TeX"
229
+
230
+ SIDESET = r"\sideset"
231
+
232
+ SKEW = r"\skew"
233
+ NOT = r"\not"
234
+
235
+
236
+ def font_factory(default: Optional[str], replacement: Dict[str, Optional[str]]) -> DefaultDict[str, Optional[str]]:
237
+ fonts = defaultdict(lambda: default, replacement)
238
+ return fonts
239
+
240
+
241
+ LOCAL_FONTS: Dict[str, DefaultDict[str, Optional[str]]] = {
242
+ BLACKBOARD_BOLD: font_factory("double-struck", {"fence": None}),
243
+ BOLD_SYMBOL: font_factory("bold", {"mi": "bold-italic", "mtext": None}),
244
+ MATHBB: font_factory("double-struck", {"fence": None}),
245
+ MATHBF: font_factory("bold", {"fence": None}),
246
+ MATHCAL: font_factory("script", {"fence": None}),
247
+ MATHFRAK: font_factory("fraktur", {"fence": None}),
248
+ MATHIT: font_factory("italic", {"fence": None}),
249
+ MATHRM: font_factory(None, {"mi": "normal"}),
250
+ MATHSCR: font_factory("script", {"fence": None}),
251
+ MATHSF: font_factory(None, {"mi": "sans-serif"}),
252
+ MATHTT: font_factory("monospace", {"fence": None}),
253
+ MIT: font_factory("italic", {"fence": None, "mi": None}),
254
+ OLDSTYLE: font_factory("normal", {"fence": None}),
255
+ SCR: font_factory("script", {"fence": None}),
256
+ TT: font_factory("monospace", {"fence": None}),
257
+ }
258
+
259
+ OLD_STYLE_FONTS: Dict[str, DefaultDict[str, Optional[str]]] = {
260
+ r"\rm": font_factory(None, {"mi": "normal"}),
261
+ r"\bf": font_factory(None, {"mi": "bold"}),
262
+ r"\it": font_factory(None, {"mi": "italic"}),
263
+ r"\sf": font_factory(None, {"mi": "sans-serif"}),
264
+ r"\tt": font_factory(None, {"mi": "monospace"}),
265
+ }
266
+
267
+ GLOBAL_FONTS = {
268
+ **OLD_STYLE_FONTS,
269
+ r"\cal": font_factory("script", {"fence": None}),
270
+ r"\frak": font_factory("fraktur", {"fence": None}),
271
+ }
272
+
273
+ COMMANDS_WITH_ONE_PARAMETER = (
274
+ ACUTE,
275
+ BAR,
276
+ BLACKBOARD_BOLD,
277
+ BOLD_SYMBOL,
278
+ BOXED,
279
+ BREVE,
280
+ CHECK,
281
+ DOT,
282
+ DDOT,
283
+ DDDOT,
284
+ DDDDOT,
285
+ GRAVE,
286
+ HAT,
287
+ HPHANTOM,
288
+ MATHRING,
289
+ MIT,
290
+ MOD,
291
+ OLDSTYLE,
292
+ OVERBRACE,
293
+ OVERLEFTARROW,
294
+ OVERLEFTRIGHTARROW,
295
+ OVERLINE,
296
+ OVERPAREN,
297
+ OVERRIGHTARROW,
298
+ PHANTOM,
299
+ PMOD,
300
+ SCR,
301
+ TILDE,
302
+ TT,
303
+ UNDERBRACE,
304
+ UNDERLEFTARROW,
305
+ UNDERLINE,
306
+ UNDERPAREN,
307
+ UNDERRIGHTARROW,
308
+ UNDERLEFTRIGHTARROW,
309
+ VEC,
310
+ VPHANTOM,
311
+ WIDEHAT,
312
+ WIDETILDE,
313
+ )
314
+ COMMANDS_WITH_TWO_PARAMETERS = (
315
+ BINOM,
316
+ CFRAC,
317
+ DBINOM,
318
+ DFRAC,
319
+ FRAC,
320
+ OVERSET,
321
+ TBINOM,
322
+ TFRAC,
323
+ UNDERSET,
324
+ )
325
+
326
+ BIG: Dict[str, Tuple[str, dict]] = {
327
+ # command: (mathml_equivalent, attributes)
328
+ r"\Bigg": ("mo", OrderedDict([("minsize", "2.470em"), ("maxsize", "2.470em")])),
329
+ r"\bigg": ("mo", OrderedDict([("minsize", "2.047em"), ("maxsize", "2.047em")])),
330
+ r"\Big": ("mo", OrderedDict([("minsize", "1.623em"), ("maxsize", "1.623em")])),
331
+ r"\big": ("mo", OrderedDict([("minsize", "1.2em"), ("maxsize", "1.2em")])),
332
+ }
333
+
334
+ BIG_OPEN_CLOSE = {
335
+ command + postfix: (tag, OrderedDict([("stretchy", "true"), ("fence", "true"), *attrib.items()]))
336
+ for command, (tag, attrib) in BIG.items()
337
+ for postfix in "lmr"
338
+ }
339
+
340
+ MSTYLE_SIZES: Dict[str, Tuple[str, dict]] = {
341
+ # command: (mathml_equivalent, attributes)
342
+ r"\Huge": ("mstyle", {"mathsize": "2.49em"}),
343
+ r"\huge": ("mstyle", {"mathsize": "2.07em"}),
344
+ r"\LARGE": ("mstyle", {"mathsize": "1.73em"}),
345
+ r"\Large": ("mstyle", {"mathsize": "1.44em"}),
346
+ r"\large": ("mstyle", {"mathsize": "1.2em"}),
347
+ r"\normalsize": ("mstyle", {"mathsize": "1em"}),
348
+ r"\scriptsize": ("mstyle", {"mathsize": "0.7em"}),
349
+ r"\small": ("mstyle", {"mathsize": "0.85em"}),
350
+ r"\tiny": ("mstyle", {"mathsize": "0.5em"}),
351
+ r"\Tiny": ("mstyle", {"mathsize": "0.6em"}),
352
+ }
353
+
354
+ STYLES: Dict[str, Tuple[str, dict]] = {
355
+ DISPLAYSTYLE: ("mstyle", {"displaystyle": "true", "scriptlevel": "0"}),
356
+ TEXTSTYLE: ("mstyle", {"displaystyle": "false", "scriptlevel": "0"}),
357
+ SCRIPTSTYLE: ("mstyle", {"displaystyle": "false", "scriptlevel": "1"}),
358
+ SCRIPTSCRIPTSTYLE: ("mstyle", {"displaystyle": "false", "scriptlevel": "2"}),
359
+ }
360
+
361
+ CONVERSION_MAP: Dict[str, Tuple[str, dict]] = {
362
+ # command: (mathml_equivalent, attributes)
363
+ # tables
364
+ **{matrix: ("mtable", {}) for matrix in MATRICES},
365
+ DISPLAYLINES: ("mtable", {"rowspacing": "0.5em", "columnspacing": "1em", "displaystyle": "true"}),
366
+ SMALLMATRIX: ("mtable", {"rowspacing": "0.1em", "columnspacing": "0.2778em"}),
367
+ SPLIT: (
368
+ "mtable",
369
+ {"displaystyle": "true", "columnspacing": "0em", "rowspacing": "3pt"},
370
+ ),
371
+ ALIGN: (
372
+ "mtable",
373
+ {"displaystyle": "true", "rowspacing": "3pt"},
374
+ ),
375
+ # subscripts/superscripts
376
+ SUBSCRIPT: ("msub", {}),
377
+ SUPERSCRIPT: ("msup", {}),
378
+ SUBSUP: ("msubsup", {}),
379
+ # fractions
380
+ BINOM: ("mfrac", {"linethickness": "0"}),
381
+ CFRAC: ("mfrac", {}),
382
+ DBINOM: ("mfrac", {"linethickness": "0"}),
383
+ DFRAC: ("mfrac", {}),
384
+ FRAC: ("mfrac", {}),
385
+ GENFRAC: ("mfrac", {}),
386
+ TBINOM: ("mfrac", {"linethickness": "0"}),
387
+ TFRAC: ("mfrac", {}),
388
+ # over/under
389
+ ACUTE: ("mover", {}),
390
+ BAR: ("mover", {}),
391
+ BREVE: ("mover", {}),
392
+ CHECK: ("mover", {}),
393
+ DOT: ("mover", {}),
394
+ DDOT: ("mover", {}),
395
+ DDDOT: ("mover", {}),
396
+ DDDDOT: ("mover", {}),
397
+ GRAVE: ("mover", {}),
398
+ HAT: ("mover", {}),
399
+ LIMITS: ("munderover", {}),
400
+ MATHRING: ("mover", {}),
401
+ OVERBRACE: ("mover", {}),
402
+ OVERLEFTARROW: ("mover", {}),
403
+ OVERLEFTRIGHTARROW: ("mover", {}),
404
+ OVERLINE: ("mover", {}),
405
+ OVERPAREN: ("mover", {}),
406
+ OVERRIGHTARROW: ("mover", {}),
407
+ TILDE: ("mover", {}),
408
+ OVERSET: ("mover", {}),
409
+ UNDERBRACE: ("munder", {}),
410
+ UNDERLEFTARROW: ("munder", {}),
411
+ UNDERLINE: ("munder", {}),
412
+ UNDERPAREN: ("munder", {}),
413
+ UNDERRIGHTARROW: ("munder", {}),
414
+ UNDERLEFTRIGHTARROW: ("munder", {}),
415
+ UNDERSET: ("munder", {}),
416
+ VEC: ("mover", {}),
417
+ WIDEHAT: ("mover", {}),
418
+ WIDETILDE: ("mover", {}),
419
+ # spaces
420
+ COLON: ("mspace", {"width": "0.222em"}),
421
+ COMMA: ("mspace", {"width": "0.167em"}),
422
+ DOUBLEBACKSLASH: ("mspace", {"linebreak": "newline"}),
423
+ ENSPACE: ("mspace", {"width": "0.5em"}),
424
+ EXCLAMATION: ("mspace", {"width": "negativethinmathspace"}),
425
+ GREATER_THAN: ("mspace", {"width": "0.222em"}),
426
+ HSKIP: ("mspace", {}),
427
+ HSPACE: ("mspace", {}),
428
+ KERN: ("mspace", {}),
429
+ MKERN: ("mspace", {}),
430
+ MSKIP: ("mspace", {}),
431
+ MSPACE: ("mspace", {}),
432
+ NEGTHINSPACE: ("mspace", {"width": "negativethinmathspace"}),
433
+ NEGMEDSPACE: ("mspace", {"width": "negativemediummathspace"}),
434
+ NEGTHICKSPACE: ("mspace", {"width": "negativethickmathspace"}),
435
+ THINSPACE: ("mspace", {"width": "thinmathspace"}),
436
+ QQUAD: ("mspace", {"width": "2em"}),
437
+ QUAD: ("mspace", {"width": "1em"}),
438
+ SEMICOLON: ("mspace", {"width": "0.278em"}),
439
+ # enclose
440
+ BOXED: ("menclose", {"notation": "box"}),
441
+ FBOX: ("menclose", {"notation": "box"}),
442
+ # operators
443
+ **BIG,
444
+ **BIG_OPEN_CLOSE,
445
+ **MSTYLE_SIZES,
446
+ **{limit: ("mo", {}) for limit in LIMIT},
447
+ LEFT: ("mo", OrderedDict([("stretchy", "true"), ("fence", "true"), ("form", "prefix")])),
448
+ MIDDLE: ("mo", OrderedDict([("stretchy", "true"), ("fence", "true"), ("lspace", "0.05em"), ("rspace", "0.05em")])),
449
+ RIGHT: ("mo", OrderedDict([("stretchy", "true"), ("fence", "true"), ("form", "postfix")])),
450
+ # styles
451
+ COLOR: ("mstyle", {}),
452
+ **STYLES,
453
+ # others
454
+ SQRT: ("msqrt", {}),
455
+ ROOT: ("mroot", {}),
456
+ HREF: ("mtext", {}),
457
+ TEXT: ("mtext", {}),
458
+ TEXTBF: ("mtext", {"mathvariant": "bold"}),
459
+ TEXTIT: ("mtext", {"mathvariant": "italic"}),
460
+ TEXTRM: ("mtext", {}),
461
+ TEXTSF: ("mtext", {"mathvariant": "sans-serif"}),
462
+ TEXTTT: ("mtext", {"mathvariant": "monospace"}),
463
+ HBOX: ("mtext", {}),
464
+ MBOX: ("mtext", {}),
465
+ HPHANTOM: ("mphantom", {}),
466
+ PHANTOM: ("mphantom", {}),
467
+ VPHANTOM: ("mphantom", {}),
468
+ SIDESET: ("mrow", {}),
469
+ SKEW: ("mrow", {}),
470
+ MOD: ("mi", {}),
471
+ PMOD: ("mi", {}),
472
+ BMOD: ("mo", {}),
473
+ XLEFTARROW: ("mover", {}),
474
+ XRIGHTARROW: ("mover", {}),
475
+ }
476
+
477
+
478
+ DIACRITICS: Dict[str, Tuple[str, Dict[str, str]]] = {
479
+ ACUTE: ("&#x000B4;", {}),
480
+ BAR: ("&#x000AF;", {"stretchy": "true"}),
481
+ BREVE: ("&#x002D8;", {}),
482
+ CHECK: ("&#x002C7;", {}),
483
+ DOT: ("&#x002D9;", {}),
484
+ DDOT: ("&#x000A8;", {}),
485
+ DDDOT: ("&#x020DB;", {}),
486
+ DDDDOT: ("&#x020DC;", {}),
487
+ GRAVE: ("&#x00060;", {}),
488
+ HAT: ("&#x0005E;", {"stretchy": "false"}),
489
+ MATHRING: ("&#x002DA;", {}),
490
+ OVERBRACE: ("&#x23DE;", {}),
491
+ OVERLEFTARROW: ("&#x02190;", {}),
492
+ OVERLEFTRIGHTARROW: ("&#x02194;", {}),
493
+ OVERLINE: ("&#x02015;", {"accent": "true"}),
494
+ OVERPAREN: ("&#x023DC;", {}),
495
+ OVERRIGHTARROW: ("&#x02192;", {}),
496
+ TILDE: ("&#x0007E;", {"stretchy": "false"}),
497
+ UNDERBRACE: ("&#x23DF;", {}),
498
+ UNDERLEFTARROW: ("&#x02190;", {}),
499
+ UNDERLEFTRIGHTARROW: ("&#x02194;", {}),
500
+ UNDERLINE: ("&#x02015;", {"accent": "true"}),
501
+ UNDERPAREN: ("&#x023DD;", {}),
502
+ UNDERRIGHTARROW: ("&#x02192;", {}),
503
+ VEC: ("&#x02192;", {"stretchy": "true"}),
504
+ WIDEHAT: ("&#x0005E;", {}),
505
+ WIDETILDE: ("&#x0007E;", {}),
506
+ }
videollama2/lib/python3.10/site-packages/latex2mathml/converter.py ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import enum
3
+ import re
4
+ from collections import OrderedDict
5
+ from typing import Dict, Iterable, Iterator, List, Optional, Tuple
6
+ from xml.etree.cElementTree import Element, SubElement, tostring
7
+ from xml.sax.saxutils import unescape
8
+
9
+ from latex2mathml import commands
10
+ from latex2mathml.symbols_parser import convert_symbol
11
+ from latex2mathml.walker import Node, walk
12
+
13
+ COLUMN_ALIGNMENT_MAP = {"r": "right", "l": "left", "c": "center"}
14
+ OPERATORS = (
15
+ "+",
16
+ "-",
17
+ "*",
18
+ "/",
19
+ "(",
20
+ ")",
21
+ "=",
22
+ ",",
23
+ "?",
24
+ "[",
25
+ "]",
26
+ "|",
27
+ r"\|",
28
+ "!",
29
+ r"\{",
30
+ r"\}",
31
+ r">",
32
+ r"<",
33
+ r".",
34
+ r"\bigotimes",
35
+ r"\centerdot",
36
+ r"\dots",
37
+ r"\dotsc",
38
+ r"\dotso",
39
+ r"\gt",
40
+ r"\ldotp",
41
+ r"\lt",
42
+ r"\lvert",
43
+ r"\lVert",
44
+ r"\lvertneqq",
45
+ r"\ngeqq",
46
+ r"\omicron",
47
+ r"\rvert",
48
+ r"\rVert",
49
+ r"\S",
50
+ r"\smallfrown",
51
+ r"\smallint",
52
+ r"\smallsmile",
53
+ r"\surd",
54
+ r"\varsubsetneqq",
55
+ r"\varsupsetneqq",
56
+ )
57
+ MATH_MODE_PATTERN = re.compile(r"\\\$|\$|\\?[^\\$]+")
58
+
59
+
60
+ class Mode(enum.Enum):
61
+ TEXT = enum.auto()
62
+ MATH = enum.auto()
63
+
64
+
65
+ def convert(
66
+ latex: str,
67
+ xmlns: str = "http://www.w3.org/1998/Math/MathML",
68
+ display: str = "inline",
69
+ parent: Optional[Element] = None,
70
+ ) -> str:
71
+ math = convert_to_element(latex, xmlns, display, parent)
72
+ return _convert(math)
73
+
74
+
75
+ def convert_to_element(
76
+ latex: str,
77
+ xmlns: str = "http://www.w3.org/1998/Math/MathML",
78
+ display: str = "inline",
79
+ parent: Optional[Element] = None,
80
+ ) -> Element:
81
+ tag = "math"
82
+ attrib = {"xmlns": xmlns, "display": display}
83
+ math = Element(tag, attrib) if parent is None else SubElement(parent, tag, attrib)
84
+ row = SubElement(math, "mrow")
85
+ _convert_group(iter(walk(latex)), row)
86
+ return math
87
+
88
+
89
+ def _convert(tree: Element) -> str:
90
+ return unescape(tostring(tree, encoding="unicode"))
91
+
92
+
93
+ def _convert_matrix(nodes: Iterator[Node], parent: Element, command: str, alignment: Optional[str] = None) -> None:
94
+ row = None
95
+ cell = None
96
+
97
+ col_index = 0
98
+ col_alignment = None
99
+
100
+ max_col_size = 0
101
+
102
+ row_index = 0
103
+ row_lines = []
104
+
105
+ hfil_indexes: List[bool] = []
106
+
107
+ for node in nodes:
108
+ if row is None:
109
+ row = SubElement(parent, "mtr")
110
+
111
+ if cell is None:
112
+ col_alignment, col_index = _get_column_alignment(alignment, col_alignment, col_index)
113
+ cell = _make_matrix_cell(row, col_alignment)
114
+
115
+ if node.token == commands.BRACES:
116
+ _convert_group(iter([node]), cell)
117
+ elif node.token == "&":
118
+ _set_cell_alignment(cell, hfil_indexes)
119
+ hfil_indexes = []
120
+ col_alignment, col_index = _get_column_alignment(alignment, col_alignment, col_index)
121
+ cell = _make_matrix_cell(row, col_alignment)
122
+ if command in (commands.SPLIT, commands.ALIGN) and col_index % 2 == 0:
123
+ SubElement(cell, "mi")
124
+ elif node.token in (commands.DOUBLEBACKSLASH, commands.CARRIAGE_RETURN):
125
+ _set_cell_alignment(cell, hfil_indexes)
126
+ hfil_indexes = []
127
+ row_index += 1
128
+ if col_index > max_col_size:
129
+ max_col_size = col_index
130
+ col_index = 0
131
+ col_alignment, col_index = _get_column_alignment(alignment, col_alignment, col_index)
132
+ row = SubElement(parent, "mtr")
133
+ cell = _make_matrix_cell(row, col_alignment)
134
+ elif node.token == commands.HLINE:
135
+ row_lines.append("solid")
136
+ elif node.token == commands.HDASHLINE:
137
+ row_lines.append("dashed")
138
+ elif node.token == commands.HFIL:
139
+ hfil_indexes.append(True)
140
+ else:
141
+ if row_index > len(row_lines):
142
+ row_lines.append("none")
143
+ hfil_indexes.append(False)
144
+ _convert_group(iter([node]), cell)
145
+
146
+ if col_index > max_col_size:
147
+ max_col_size = col_index
148
+
149
+ if any(r == "solid" for r in row_lines):
150
+ parent.set("rowlines", " ".join(row_lines))
151
+
152
+ if row is not None and cell is not None and len(cell) == 0:
153
+ # Remove last row if it does not contain anything
154
+ parent.remove(row)
155
+
156
+ if max_col_size and command == commands.ALIGN:
157
+ spacing = ("0em", "2em")
158
+ multiplier = max_col_size // len(spacing)
159
+ parent.set("columnspacing", " ".join(spacing * multiplier))
160
+
161
+
162
+ def _set_cell_alignment(cell: Element, hfil_indexes: List[bool]) -> None:
163
+ if cell is not None and any(hfil_indexes) and len(hfil_indexes) > 1:
164
+ if hfil_indexes[0] and not hfil_indexes[-1]:
165
+ cell.attrib["columnalign"] = "right"
166
+ elif not hfil_indexes[0] and hfil_indexes[-1]:
167
+ cell.attrib["columnalign"] = "left"
168
+
169
+
170
+ def _get_column_alignment(
171
+ alignment: Optional[str], column_alignment: Optional[str], column_index: int
172
+ ) -> Tuple[Optional[str], int]:
173
+ if alignment:
174
+ try:
175
+ column_alignment = COLUMN_ALIGNMENT_MAP.get(alignment[column_index])
176
+ except IndexError:
177
+ column_alignment = COLUMN_ALIGNMENT_MAP.get(alignment[column_index % len(alignment)])
178
+ column_index += 1
179
+ return column_alignment, column_index
180
+
181
+
182
+ def _make_matrix_cell(row: Element, column_alignment: Optional[str]) -> Element:
183
+ if column_alignment:
184
+ return SubElement(row, "mtd", columnalign=column_alignment)
185
+ return SubElement(row, "mtd")
186
+
187
+
188
+ def _convert_group(nodes: Iterable[Node], parent: Element, font: Optional[Dict[str, Optional[str]]] = None) -> None:
189
+ _font = font
190
+ for node in nodes:
191
+ token = node.token
192
+ if token in (*commands.MSTYLE_SIZES, *commands.STYLES):
193
+ node = Node(token=token, children=tuple(n for n in nodes))
194
+ _convert_command(node, parent, _font)
195
+ elif token in commands.CONVERSION_MAP or token in (commands.MOD, commands.PMOD):
196
+ _convert_command(node, parent, _font)
197
+ elif token in commands.LOCAL_FONTS and node.children is not None:
198
+ _convert_group(iter(node.children), parent, commands.LOCAL_FONTS[token])
199
+ elif token.startswith(commands.MATH) and node.children is not None:
200
+ _convert_group(iter(node.children), parent, _font)
201
+ elif token in commands.GLOBAL_FONTS.keys():
202
+ _font = commands.GLOBAL_FONTS.get(token)
203
+ elif node.children is None:
204
+ _convert_symbol(node, parent, _font)
205
+ elif node.children is not None:
206
+ attributes = node.attributes or {}
207
+ _row = SubElement(parent, "mrow", attrib=attributes)
208
+ _convert_group(iter(node.children), _row, _font)
209
+
210
+
211
+ def _get_alignment_and_column_lines(alignment: Optional[str] = None) -> Tuple[Optional[str], Optional[str]]:
212
+ if alignment is None:
213
+ return None, None
214
+ if "|" not in alignment:
215
+ return alignment, None
216
+ _alignment = ""
217
+ column_lines = []
218
+ for c in alignment:
219
+ if c == "|":
220
+ column_lines.append("solid")
221
+ else:
222
+ _alignment += c
223
+ if len(_alignment) - len(column_lines) == 2:
224
+ column_lines.append("none")
225
+ return _alignment, " ".join(column_lines)
226
+
227
+
228
+ def separate_by_mode(text: str) -> Iterator[Tuple[str, Mode]]:
229
+ string = ""
230
+ is_math_mode = False
231
+ for match in MATH_MODE_PATTERN.findall(text):
232
+ if match == "$": # should match both $ and $$
233
+ yield string, Mode.MATH if is_math_mode else Mode.TEXT
234
+ string = ""
235
+ is_math_mode = not is_math_mode
236
+ else:
237
+ string += match
238
+ if len(string):
239
+ yield string, Mode.MATH if is_math_mode else Mode.TEXT
240
+ # TODO: if stays in math mode, means not terminated properly, raise error
241
+
242
+
243
+ def _convert_command(node: Node, parent: Element, font: Optional[Dict[str, Optional[str]]] = None) -> None:
244
+ command = node.token
245
+ modifier = node.modifier
246
+
247
+ if command in (commands.SUBSTACK, commands.SMALLMATRIX):
248
+ parent = SubElement(parent, "mstyle", scriptlevel="1")
249
+ elif command == commands.CASES:
250
+ parent = SubElement(parent, "mrow")
251
+ lbrace = SubElement(parent, "mo", OrderedDict([("stretchy", "true"), ("fence", "true"), ("form", "prefix")]))
252
+ lbrace.text = "&#x{};".format(convert_symbol(commands.LBRACE))
253
+ elif command in (commands.DBINOM, commands.DFRAC):
254
+ parent = SubElement(parent, "mstyle", displaystyle="true", scriptlevel="0")
255
+ elif command == commands.HPHANTOM:
256
+ parent = SubElement(parent, "mpadded", height="0", depth="0")
257
+ elif command == commands.VPHANTOM:
258
+ parent = SubElement(parent, "mpadded", width="0")
259
+ elif command in (commands.TBINOM, commands.HBOX, commands.MBOX, commands.TFRAC):
260
+ parent = SubElement(parent, "mstyle", displaystyle="false", scriptlevel="0")
261
+ elif command in (commands.MOD, commands.PMOD):
262
+ SubElement(parent, "mspace", width="1em")
263
+
264
+ tag, attributes = copy.deepcopy(commands.CONVERSION_MAP[command])
265
+
266
+ if node.attributes is not None and node.token != commands.SKEW:
267
+ attributes.update(node.attributes)
268
+
269
+ if command == commands.LEFT:
270
+ parent = SubElement(parent, "mrow")
271
+
272
+ _append_prefix_element(node, parent)
273
+
274
+ alignment, column_lines = _get_alignment_and_column_lines(node.alignment)
275
+
276
+ if column_lines:
277
+ attributes["columnlines"] = column_lines
278
+
279
+ if command == commands.SUBSUP and node.children is not None and node.children[0].token == commands.GCD:
280
+ tag = "munderover"
281
+ elif command == commands.SUPERSCRIPT and modifier in (commands.LIMITS, commands.OVERBRACE):
282
+ tag = "mover"
283
+ elif command == commands.SUBSCRIPT and modifier in (commands.LIMITS, commands.UNDERBRACE):
284
+ tag = "munder"
285
+ elif command == commands.SUBSUP and modifier in (commands.LIMITS, commands.OVERBRACE, commands.UNDERBRACE):
286
+ tag = "munderover"
287
+ elif (
288
+ command in (commands.XLEFTARROW, commands.XRIGHTARROW) and node.children is not None and len(node.children) == 2
289
+ ):
290
+ tag = "munderover"
291
+
292
+ element = SubElement(parent, tag, attributes)
293
+
294
+ if command in commands.LIMIT:
295
+ element.text = command[1:]
296
+ elif command in (commands.MOD, commands.PMOD):
297
+ element.text = "mod"
298
+ SubElement(parent, "mspace", width="0.333em")
299
+ elif command == commands.BMOD:
300
+ element.text = "mod"
301
+ elif command in (commands.XLEFTARROW, commands.XRIGHTARROW):
302
+ style = SubElement(element, "mstyle", scriptlevel="0")
303
+ arrow = SubElement(style, "mo")
304
+ if command == commands.XLEFTARROW:
305
+ arrow.text = "&#x2190;"
306
+ elif command == commands.XRIGHTARROW:
307
+ arrow.text = "&#x2192;"
308
+ elif node.text is not None:
309
+ if command == commands.MIDDLE:
310
+ element.text = "&#x{};".format(convert_symbol(node.text))
311
+ elif command == commands.HBOX:
312
+ mtext: Optional[Element] = element
313
+ for text, mode in separate_by_mode(node.text):
314
+ if mode == Mode.TEXT:
315
+ if mtext is None:
316
+ mtext = SubElement(parent, tag, attributes)
317
+ mtext.text = text.replace(" ", "&#x000A0;")
318
+ _set_font(mtext, "mtext", font)
319
+ mtext = None
320
+ else:
321
+ _row = SubElement(parent, "mrow")
322
+ _convert_group(iter(walk(text)), _row)
323
+ else:
324
+ if command == commands.FBOX:
325
+ element = SubElement(element, "mtext")
326
+ element.text = node.text.replace(" ", "&#x000A0;")
327
+ _set_font(element, "mtext", font)
328
+ elif node.delimiter is not None and command not in (commands.FRAC, commands.GENFRAC):
329
+ if node.delimiter != ".":
330
+ symbol = convert_symbol(node.delimiter)
331
+ element.text = node.delimiter if symbol is None else "&#x{};".format(symbol)
332
+
333
+ if node.children is not None:
334
+ _parent = element
335
+ if command in (commands.LEFT, commands.MOD, commands.PMOD):
336
+ _parent = parent
337
+ if command in commands.MATRICES:
338
+ if command == commands.CASES:
339
+ alignment = "l"
340
+ elif command in (commands.SPLIT, commands.ALIGN):
341
+ alignment = "rl"
342
+ _convert_matrix(iter(node.children), _parent, command, alignment=alignment)
343
+ elif command == commands.CFRAC:
344
+ for child in node.children:
345
+ p = SubElement(_parent, "mstyle", displaystyle="false", scriptlevel="0")
346
+ _convert_group(iter([child]), p, font)
347
+ elif command == commands.SIDESET:
348
+ Node(
349
+ r"\style",
350
+ children=(Node(r"\mspace", attributes={"width": "-0.167em"}),),
351
+ attributes={"scriptlevel": "0"},
352
+ ),
353
+ left, right = node.children
354
+ _convert_group(iter([left]), _parent, font)
355
+ fill = SubElement(_parent, "mstyle", scriptlevel="0")
356
+ SubElement(fill, "mspace", width="-0.167em")
357
+ _convert_group(iter([right]), _parent, font)
358
+ elif command == commands.SKEW:
359
+ child = node.children[0]
360
+ new_node = Node(
361
+ token=child.token,
362
+ children=(
363
+ Node(
364
+ token=commands.BRACES,
365
+ children=(*child.children, Node(token=commands.MKERN, attributes=node.attributes)),
366
+ ),
367
+ ),
368
+ )
369
+ _convert_group(iter([new_node]), _parent, font)
370
+ elif command in (commands.XLEFTARROW, commands.XRIGHTARROW):
371
+ for child in node.children:
372
+ padded = SubElement(
373
+ _parent,
374
+ "mpadded",
375
+ OrderedDict(
376
+ [("width", "+0.833em"), ("lspace", "0.556em"), ("voffset", "-.2em"), ("height", "-.2em")]
377
+ ),
378
+ )
379
+ _convert_group(iter([child]), padded, font)
380
+ SubElement(padded, "mspace", depth=".25em")
381
+ else:
382
+ _convert_group(iter(node.children), _parent, font)
383
+
384
+ _add_diacritic(command, element)
385
+
386
+ _append_postfix_element(node, parent)
387
+
388
+
389
+ def _add_diacritic(command: str, parent: Element) -> None:
390
+ if command in commands.DIACRITICS:
391
+ text, attributes = copy.deepcopy(commands.DIACRITICS[command])
392
+ element = SubElement(parent, "mo", attributes)
393
+ element.text = text
394
+
395
+
396
+ def _convert_and_append_command(command: str, parent: Element, attributes: Optional[Dict[str, str]] = None) -> None:
397
+ code_point = convert_symbol(command)
398
+ mo = SubElement(parent, "mo", attributes if attributes is not None else {})
399
+ mo.text = "&#x{};".format(code_point) if code_point else command
400
+
401
+
402
+ def _append_prefix_element(node: Node, parent: Element) -> None:
403
+ size = "2.047em"
404
+ if parent.attrib.get("displaystyle") == "false" or node.token == commands.TBINOM:
405
+ size = "1.2em"
406
+ if node.token in (r"\pmatrix", commands.PMOD):
407
+ _convert_and_append_command(r"\lparen", parent)
408
+ elif node.token in (commands.BINOM, commands.DBINOM, commands.TBINOM):
409
+ _convert_and_append_command(r"\lparen", parent, {"minsize": size, "maxsize": size})
410
+ elif node.token == r"\bmatrix":
411
+ _convert_and_append_command(r"\lbrack", parent)
412
+ elif node.token == r"\Bmatrix":
413
+ _convert_and_append_command(r"\lbrace", parent)
414
+ elif node.token == r"\vmatrix":
415
+ _convert_and_append_command(r"\vert", parent)
416
+ elif node.token == r"\Vmatrix":
417
+ _convert_and_append_command(r"\Vert", parent)
418
+ elif node.token in (commands.FRAC, commands.GENFRAC) and node.delimiter is not None and node.delimiter[0] != ".":
419
+ # TODO: use 1.2em if inline
420
+ _convert_and_append_command(node.delimiter[0], parent, {"minsize": size, "maxsize": size})
421
+
422
+
423
+ def _append_postfix_element(node: Node, parent: Element) -> None:
424
+ size = "2.047em"
425
+ if parent.attrib.get("displaystyle") == "false" or node.token == commands.TBINOM:
426
+ size = "1.2em"
427
+ if node.token in (r"\pmatrix", commands.PMOD):
428
+ _convert_and_append_command(r"\rparen", parent)
429
+ elif node.token in (commands.BINOM, commands.DBINOM, commands.TBINOM):
430
+ _convert_and_append_command(r"\rparen", parent, {"minsize": size, "maxsize": size})
431
+ elif node.token == r"\bmatrix":
432
+ _convert_and_append_command(r"\rbrack", parent)
433
+ elif node.token == r"\Bmatrix":
434
+ _convert_and_append_command(r"\rbrace", parent)
435
+ elif node.token == r"\vmatrix":
436
+ _convert_and_append_command(r"\vert", parent)
437
+ elif node.token == r"\Vmatrix":
438
+ _convert_and_append_command(r"\Vert", parent)
439
+ elif node.token in (commands.FRAC, commands.GENFRAC) and node.delimiter is not None and node.delimiter[1] != ".":
440
+ # TODO: use 1.2em if inline
441
+ _convert_and_append_command(node.delimiter[1], parent, {"minsize": size, "maxsize": size})
442
+ elif node.token == commands.SKEW and node.attributes is not None:
443
+ SubElement(parent, "mspace", width="-" + node.attributes["width"])
444
+
445
+
446
+ def _convert_symbol(node: Node, parent: Element, font: Optional[Dict[str, Optional[str]]] = None) -> None:
447
+ token = node.token
448
+ attributes = node.attributes or {}
449
+ symbol = convert_symbol(token)
450
+ if re.match(r"\d+(.\d+)?", token):
451
+ element = SubElement(parent, "mn", attrib=attributes)
452
+ element.text = token
453
+ _set_font(element, element.tag, font)
454
+ elif token in OPERATORS:
455
+ element = SubElement(parent, "mo", attrib=attributes)
456
+ element.text = token if symbol is None else "&#x{};".format(symbol)
457
+ if token == r"\|":
458
+ element.attrib["fence"] = "false"
459
+ if token == r"\smallint":
460
+ element.attrib["largeop"] = "false"
461
+ if token in ("(", ")", "[", "]", "|", r"\|", r"\{", r"\}", r"\surd"):
462
+ element.attrib["stretchy"] = "false"
463
+ _set_font(element, "fence", font)
464
+ else:
465
+ _set_font(element, element.tag, font)
466
+ elif (
467
+ symbol
468
+ and (
469
+ int(symbol, 16) in range(int("2200", 16), int("22FF", 16) + 1)
470
+ or int(symbol, 16) in range(int("2190", 16), int("21FF", 16) + 1)
471
+ )
472
+ or symbol == "."
473
+ ):
474
+ element = SubElement(parent, "mo", attrib=attributes)
475
+ element.text = "&#x{};".format(symbol)
476
+ _set_font(element, element.tag, font)
477
+ elif token in (r"\ ", "~", commands.NOBREAKSPACE, commands.SPACE):
478
+ element = SubElement(parent, "mtext", attrib=attributes)
479
+ element.text = "&#x000A0;"
480
+ _set_font(element, "mtext", font)
481
+ elif token == commands.NOT:
482
+ mpadded = SubElement(parent, "mpadded", width="0")
483
+ element = SubElement(mpadded, "mtext")
484
+ element.text = "&#x029F8;"
485
+ elif token in (
486
+ commands.DETERMINANT,
487
+ commands.GCD,
488
+ commands.INTOP,
489
+ commands.INJLIM,
490
+ commands.LIMINF,
491
+ commands.LIMSUP,
492
+ commands.PR,
493
+ commands.PROJLIM,
494
+ ):
495
+ element = SubElement(parent, "mo", attrib={"movablelimits": "true", **attributes})
496
+ texts = {
497
+ commands.INJLIM: "inj&#x02006;lim",
498
+ commands.INTOP: "&#x0222B;",
499
+ commands.LIMINF: "lim&#x02006;inf",
500
+ commands.LIMSUP: "lim&#x02006;sup",
501
+ commands.PROJLIM: "proj&#x02006;lim",
502
+ }
503
+ element.text = texts.get(token, token[1:])
504
+ _set_font(element, element.tag, font)
505
+ elif token == commands.IDOTSINT:
506
+ _parent = SubElement(parent, "mrow", attrib=attributes)
507
+ for s in ("&#x0222B;", "&#x022EF;", "&#x0222B;"):
508
+ element = SubElement(_parent, "mo")
509
+ element.text = s
510
+ elif token in (commands.LATEX, commands.TEX):
511
+ _parent = SubElement(parent, "mrow", attrib=attributes)
512
+ if token == commands.LATEX:
513
+ mi_l = SubElement(_parent, "mi")
514
+ mi_l.text = "L"
515
+ SubElement(_parent, "mspace", width="-.325em")
516
+ mpadded = SubElement(_parent, "mpadded", height="+.21ex", depth="-.21ex", voffset="+.21ex")
517
+ mstyle = SubElement(mpadded, "mstyle", displaystyle="false", scriptlevel="1")
518
+ mrow = SubElement(mstyle, "mrow")
519
+ mi_a = SubElement(mrow, "mi")
520
+ mi_a.text = "A"
521
+ SubElement(_parent, "mspace", width="-.17em")
522
+ _set_font(mi_l, mi_l.tag, font)
523
+ _set_font(mi_a, mi_a.tag, font)
524
+ mi_t = SubElement(_parent, "mi")
525
+ mi_t.text = "T"
526
+ SubElement(_parent, "mspace", width="-.14em")
527
+ mpadded = SubElement(_parent, "mpadded", height="-.5ex", depth="+.5ex", voffset="-.5ex")
528
+ mrow = SubElement(mpadded, "mrow")
529
+ mi_e = SubElement(mrow, "mi")
530
+ mi_e.text = "E"
531
+ SubElement(_parent, "mspace", width="-.115em")
532
+ mi_x = SubElement(_parent, "mi")
533
+ mi_x.text = "X"
534
+ _set_font(mi_t, mi_t.tag, font)
535
+ _set_font(mi_e, mi_e.tag, font)
536
+ _set_font(mi_x, mi_x.tag, font)
537
+ elif token.startswith(commands.OPERATORNAME):
538
+ element = SubElement(parent, "mo", attrib=attributes)
539
+ element.text = token[14:-1]
540
+ elif token.startswith(commands.BACKSLASH):
541
+ element = SubElement(parent, "mi", attrib=attributes)
542
+ if symbol:
543
+ element.text = "&#x{};".format(symbol)
544
+ elif token in commands.FUNCTIONS:
545
+ element.text = token[1:]
546
+ else:
547
+ element.text = token
548
+ _set_font(element, element.tag, font)
549
+ else:
550
+ element = SubElement(parent, "mi", attrib=attributes)
551
+ element.text = token
552
+ _set_font(element, element.tag, font)
553
+
554
+
555
+ def _set_font(element: Element, key: str, font: Optional[Dict[str, Optional[str]]]) -> None:
556
+ if font is None:
557
+ return
558
+ _font = font[key]
559
+ if _font is not None:
560
+ element.attrib["mathvariant"] = _font
561
+
562
+
563
+ def main() -> None: # pragma: no cover
564
+ import argparse
565
+ import sys
566
+
567
+ parser = argparse.ArgumentParser(description="Pure Python library for LaTeX to MathML conversion")
568
+ parser.add_argument("-V", "--version", dest="version", action="store_true", required=False, help="Show version")
569
+ parser.add_argument("-b", "--block", dest="block", action="store_true", required=False, help="Display block")
570
+
571
+ required = parser.add_argument_group("required arguments")
572
+
573
+ group = required.add_mutually_exclusive_group(required=False)
574
+ group.add_argument("-t", "--text", dest="text", type=str, required=False, help="Text")
575
+ group.add_argument("-f", "--file", dest="file", type=str, required=False, help="File")
576
+ group.add_argument("-s", "--stdin", dest="stdin", action="store_true", required=False, help="Stdin")
577
+
578
+ arguments = parser.parse_args()
579
+ display = "block" if arguments.block else "inline"
580
+
581
+ if arguments.version:
582
+ import latex2mathml
583
+
584
+ print("latex2mathml", latex2mathml.__version__)
585
+ elif arguments.text:
586
+ print(convert(arguments.text, display=display))
587
+ elif arguments.file:
588
+ with open(arguments.file) as f:
589
+ print(convert(f.read(), display=display))
590
+ elif arguments.stdin:
591
+ print(convert(sys.stdin.read(), display=display))
592
+
593
+
594
+ if __name__ == "__main__": # pragma: no cover
595
+ main()
videollama2/lib/python3.10/site-packages/latex2mathml/exceptions.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class NumeratorNotFoundError(Exception):
2
+ pass
3
+
4
+
5
+ class DenominatorNotFoundError(Exception):
6
+ pass
7
+
8
+
9
+ class ExtraLeftOrMissingRightError(Exception):
10
+ pass
11
+
12
+
13
+ class MissingSuperScriptOrSubscriptError(Exception):
14
+ pass
15
+
16
+
17
+ class DoubleSubscriptsError(Exception):
18
+ pass
19
+
20
+
21
+ class DoubleSuperscriptsError(Exception):
22
+ pass
23
+
24
+
25
+ class NoAvailableTokensError(Exception):
26
+ pass
27
+
28
+
29
+ class InvalidStyleForGenfracError(Exception):
30
+ pass
31
+
32
+
33
+ class MissingEndError(Exception):
34
+ pass
35
+
36
+
37
+ class InvalidAlignmentError(Exception):
38
+ pass
39
+
40
+
41
+ class InvalidWidthError(Exception):
42
+ pass
43
+
44
+
45
+ class LimitsMustFollowMathOperatorError(Exception):
46
+ pass
videollama2/lib/python3.10/site-packages/latex2mathml/py.typed ADDED
@@ -0,0 +1 @@
 
 
1
+ # PEP 581
videollama2/lib/python3.10/site-packages/latex2mathml/symbols_parser.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import codecs
2
+ import os
3
+ import re
4
+ from typing import Dict, Optional, Union
5
+
6
+ SYMBOLS_FILE: str = os.path.join(os.path.dirname(os.path.realpath(__file__)), "unimathsymbols.txt")
7
+ SYMBOLS: Optional[Dict[str, str]] = None
8
+
9
+
10
+ def convert_symbol(symbol: str) -> Union[str, None]:
11
+ global SYMBOLS
12
+ if not SYMBOLS:
13
+ SYMBOLS = parse_symbols()
14
+ return SYMBOLS.get(symbol, None)
15
+
16
+
17
+ def parse_symbols() -> Dict[str, str]:
18
+ _symbols: Dict[str, str] = {}
19
+ with codecs.open(SYMBOLS_FILE, encoding="utf-8") as f:
20
+ for line in f:
21
+ if line.startswith("#"):
22
+ continue
23
+ columns = line.strip().split("^")
24
+ _unicode = columns[0]
25
+ latex = columns[2]
26
+ unicode_math = columns[3]
27
+ if latex and latex not in _symbols:
28
+ _symbols[latex] = _unicode
29
+ if unicode_math and unicode_math not in _symbols:
30
+ _symbols[unicode_math] = _unicode
31
+ for equivalent in re.findall(r"[=#]\s*(\\[^,^ ]+),?", columns[-1]):
32
+ if equivalent not in _symbols:
33
+ _symbols[equivalent] = _unicode
34
+ _symbols.update(
35
+ {
36
+ r"\And": _symbols[r"\ampersand"],
37
+ r"\bigcirc": _symbols[r"\lgwhtcircle"],
38
+ r"\Box": _symbols[r"\square"],
39
+ r"\circledS": "024C8",
40
+ r"\diagdown": "02572",
41
+ r"\diagup": "02571",
42
+ r"\dots": "02026",
43
+ r"\dotsb": _symbols[r"\cdots"],
44
+ r"\dotsc": "02026",
45
+ r"\dotsi": _symbols[r"\cdots"],
46
+ r"\dotsm": _symbols[r"\cdots"],
47
+ r"\dotso": "02026",
48
+ r"\emptyset": "02205",
49
+ r"\gggtr": "022D9",
50
+ r"\gvertneqq": "02269",
51
+ r"\gt": _symbols[r"\greater"],
52
+ r"\ldotp": _symbols[r"\period"],
53
+ r"\llless": _symbols[r"\lll"],
54
+ r"\lt": _symbols[r"\less"],
55
+ r"\lvert": _symbols[r"\vert"],
56
+ r"\lVert": _symbols[r"\Vert"],
57
+ r"\lvertneqq": _symbols[r"\lneqq"],
58
+ r"\ngeqq": _symbols[r"\ngeq"],
59
+ r"\nshortmid": _symbols[r"\nmid"],
60
+ r"\nshortparallel": _symbols[r"\nparallel"],
61
+ r"\nsubseteqq": _symbols[r"\nsubseteq"],
62
+ r"\omicron": _symbols[r"\upomicron"],
63
+ r"\rvert": _symbols[r"\vert"],
64
+ r"\rVert": _symbols[r"\Vert"],
65
+ r"\shortmid": _symbols[r"\mid"],
66
+ r"\smallfrown": _symbols[r"\frown"],
67
+ r"\smallint": "0222B",
68
+ r"\smallsmile": _symbols[r"\smile"],
69
+ r"\surd": _symbols[r"\sqrt"],
70
+ r"\thicksim": "0223C",
71
+ r"\thickapprox": _symbols[r"\approx"],
72
+ r"\varsubsetneqq": _symbols[r"\subsetneqq"],
73
+ r"\varsupsetneq": "0228B",
74
+ r"\varsupsetneqq": _symbols[r"\supsetneqq"],
75
+ }
76
+ )
77
+ del _symbols[r"\mathring"] # FIXME: improve tokenizer without removing this
78
+ return _symbols
videollama2/lib/python3.10/site-packages/latex2mathml/tokenizer.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import Iterator
3
+
4
+ from latex2mathml import commands
5
+ from latex2mathml.symbols_parser import convert_symbol
6
+
7
+ UNITS = ("in", "mm", "cm", "pt", "em", "ex", "pc", "bp", "dd", "cc", "sp", "mu")
8
+
9
+ PATTERN = re.compile(
10
+ rf"""
11
+ (%[^\n]+) | # comment
12
+ (a-zA-Z) | # letter
13
+ ([_^])(\d) | # number succeeding an underscore or a caret
14
+ (-?\d+(?:\.\d+)?\s*(?:{'|'.join(UNITS)})) | # dimension
15
+ (\d+(?:\.\d+)?) | # integer/decimal
16
+ (\.\d*) | # dot (.) or decimal can start with just a dot
17
+ (\\[\\\[\]{{}}\s!,:>;|_%#$&]) | # escaped characters
18
+ (\\(?:begin|end)\s*{{[a-zA-Z]+\*?}}) | # begin or end
19
+ (\\operatorname\s*{{[a-zA-Z\s*]+\*?\s*}}) | # operatorname
20
+ # color, fbox, href, hbox, mbox, style, text, textbf, textit, textrm, textsf, texttt
21
+ (\\(?:color|fbox|hbox|href|mbox|style|text|textbf|textit|textrm|textsf|texttt))\s*{{([^}}]*)}} |
22
+ (\\[cdt]?frac)\s*([.\d])\s*([.\d])? | # fractions
23
+ (\\math[a-z]+)({{)([a-zA-Z])(}}) | # commands starting with math
24
+ (\\[a-zA-Z]+) | # other commands
25
+ (\S) # non-space character
26
+ """,
27
+ re.VERBOSE,
28
+ )
29
+
30
+
31
+ def tokenize(latex_string: str, skip_comments: bool = True) -> Iterator[str]:
32
+ """
33
+ Converts Latex string into tokens.
34
+
35
+ :param latex_string: Latex string.
36
+ :param skip_comments: Flag to skip comments (default=True).
37
+ """
38
+ for match in PATTERN.finditer(latex_string):
39
+ tokens = tuple(filter(lambda x: x is not None, match.groups()))
40
+ if tokens[0].startswith(commands.MATH):
41
+ full_math = "".join(tokens)
42
+ symbol = convert_symbol(full_math)
43
+ if symbol:
44
+ yield f"&#x{symbol};"
45
+ continue
46
+ for captured in tokens:
47
+ if skip_comments and captured.startswith("%"):
48
+ break
49
+ if captured.endswith(UNITS):
50
+ yield captured.replace(" ", "")
51
+ continue
52
+ if captured.startswith((commands.BEGIN, commands.END, commands.OPERATORNAME)):
53
+ yield "".join(captured.split(" "))
54
+ continue
55
+ yield captured
videollama2/lib/python3.10/site-packages/latex2mathml/unimathsymbols.txt ADDED
The diff for this file is too large to render. See raw diff
 
videollama2/lib/python3.10/site-packages/latex2mathml/walker.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Tuple
2
+
3
+ from latex2mathml import commands
4
+ from latex2mathml.exceptions import (
5
+ DenominatorNotFoundError,
6
+ DoubleSubscriptsError,
7
+ DoubleSuperscriptsError,
8
+ ExtraLeftOrMissingRightError,
9
+ InvalidAlignmentError,
10
+ InvalidStyleForGenfracError,
11
+ InvalidWidthError,
12
+ LimitsMustFollowMathOperatorError,
13
+ MissingEndError,
14
+ MissingSuperScriptOrSubscriptError,
15
+ NoAvailableTokensError,
16
+ NumeratorNotFoundError,
17
+ )
18
+ from latex2mathml.symbols_parser import convert_symbol
19
+ from latex2mathml.tokenizer import tokenize
20
+
21
+
22
+ class Node(NamedTuple):
23
+ token: str
24
+ children: Optional[Tuple[Any, ...]] = None
25
+ delimiter: Optional[str] = None
26
+ alignment: Optional[str] = None
27
+ text: Optional[str] = None
28
+ attributes: Optional[Dict[str, str]] = None
29
+ modifier: Optional[str] = None
30
+
31
+
32
+ def walk(data: str) -> List[Node]:
33
+ tokens = tokenize(data)
34
+ return _walk(tokens)
35
+
36
+
37
+ def _walk(tokens: Iterator[str], terminator: Optional[str] = None, limit: int = 0) -> List[Node]:
38
+ group: List[Node] = []
39
+ token: str
40
+ has_available_tokens = False
41
+ for token in tokens:
42
+ has_available_tokens = True
43
+ if token == terminator:
44
+ delimiter = None
45
+ if terminator == commands.RIGHT:
46
+ delimiter = next(tokens)
47
+ group.append(Node(token=token, delimiter=delimiter))
48
+ break
49
+ elif (token == commands.RIGHT != terminator) or (token == commands.MIDDLE and terminator != commands.RIGHT):
50
+ raise ExtraLeftOrMissingRightError
51
+ elif token == commands.LEFT:
52
+ delimiter = next(tokens)
53
+ children = tuple(_walk(tokens, terminator=commands.RIGHT)) # make \right as a child of \left
54
+ if len(children) == 0 or children[-1].token != commands.RIGHT:
55
+ raise ExtraLeftOrMissingRightError
56
+ node = Node(token=token, children=children if len(children) else None, delimiter=delimiter)
57
+ elif token == commands.OPENING_BRACE:
58
+ children = tuple(_walk(tokens, terminator=commands.CLOSING_BRACE))
59
+ if len(children) and children[-1].token == commands.CLOSING_BRACE:
60
+ children = children[:-1]
61
+ node = Node(token=commands.BRACES, children=children)
62
+ elif token in (commands.SUBSCRIPT, commands.SUPERSCRIPT):
63
+ try:
64
+ previous = group.pop()
65
+ except IndexError:
66
+ previous = Node(token="") # left operand can be empty if not present
67
+
68
+ if token == previous.token == commands.SUBSCRIPT:
69
+ raise DoubleSubscriptsError
70
+ if (token == previous.token == commands.SUPERSCRIPT) and (
71
+ previous.children is not None
72
+ and len(previous.children) >= 2
73
+ and previous.children[1].token != commands.PRIME
74
+ ):
75
+ raise DoubleSuperscriptsError
76
+
77
+ modifier = None
78
+ if previous.token == commands.LIMITS:
79
+ modifier = commands.LIMITS
80
+ try:
81
+ previous = group.pop()
82
+ if not previous.token.startswith("\\"): # TODO: Complete list of operators
83
+ raise LimitsMustFollowMathOperatorError
84
+ except IndexError:
85
+ raise LimitsMustFollowMathOperatorError
86
+
87
+ if token == commands.SUBSCRIPT and previous.token == commands.SUPERSCRIPT and previous.children is not None:
88
+ children = tuple(_walk(tokens, terminator=terminator, limit=1))
89
+ node = Node(
90
+ token=commands.SUBSUP,
91
+ children=(previous.children[0], *children, previous.children[1]),
92
+ modifier=previous.modifier,
93
+ )
94
+ elif (
95
+ token == commands.SUPERSCRIPT and previous.token == commands.SUBSCRIPT and previous.children is not None
96
+ ):
97
+ children = tuple(_walk(tokens, terminator=terminator, limit=1))
98
+ node = Node(token=commands.SUBSUP, children=(*previous.children, *children), modifier=previous.modifier)
99
+ elif (
100
+ token == commands.SUPERSCRIPT
101
+ and previous.token == commands.SUPERSCRIPT
102
+ and previous.children is not None
103
+ and previous.children[1].token == commands.PRIME
104
+ ):
105
+ children = tuple(_walk(tokens, terminator=terminator, limit=1))
106
+
107
+ node = Node(
108
+ token=commands.SUPERSCRIPT,
109
+ children=(
110
+ previous.children[0],
111
+ Node(token=commands.BRACES, children=(previous.children[1], *children)),
112
+ ),
113
+ modifier=previous.modifier,
114
+ )
115
+ else:
116
+ try:
117
+ children = tuple(_walk(tokens, terminator=terminator, limit=1))
118
+ except NoAvailableTokensError:
119
+ raise MissingSuperScriptOrSubscriptError
120
+ if previous.token in (commands.OVERBRACE, commands.UNDERBRACE):
121
+ modifier = previous.token
122
+ node = Node(token=token, children=(previous, *children), modifier=modifier)
123
+ elif token == commands.APOSTROPHE:
124
+ try:
125
+ previous = group.pop()
126
+ except IndexError:
127
+ previous = Node(token="") # left operand can be empty if not present
128
+
129
+ if (
130
+ previous.token == commands.SUPERSCRIPT
131
+ and previous.children is not None
132
+ and len(previous.children) >= 2
133
+ and previous.children[1].token != commands.PRIME
134
+ ):
135
+ raise DoubleSuperscriptsError
136
+
137
+ if (
138
+ previous.token == commands.SUPERSCRIPT
139
+ and previous.children is not None
140
+ and len(previous.children) >= 2
141
+ and previous.children[1].token == commands.PRIME
142
+ ):
143
+ node = Node(token=commands.SUPERSCRIPT, children=(previous.children[0], Node(token=commands.DPRIME)))
144
+ elif previous.token == commands.SUBSCRIPT and previous.children is not None:
145
+ node = Node(
146
+ token=commands.SUBSUP,
147
+ children=(*previous.children, Node(token=commands.PRIME)),
148
+ modifier=previous.modifier,
149
+ )
150
+ else:
151
+ node = Node(token=commands.SUPERSCRIPT, children=(previous, Node(token=commands.PRIME)))
152
+ elif token in commands.COMMANDS_WITH_TWO_PARAMETERS:
153
+ attributes = None
154
+ children = tuple(_walk(tokens, terminator=terminator, limit=2))
155
+ if token in (commands.OVERSET, commands.UNDERSET):
156
+ children = children[::-1]
157
+ node = Node(token=token, children=children, attributes=attributes)
158
+ elif token in commands.COMMANDS_WITH_ONE_PARAMETER or token.startswith(commands.MATH):
159
+ children = tuple(_walk(tokens, terminator=terminator, limit=1))
160
+ node = Node(token=token, children=children)
161
+ elif token == commands.NOT:
162
+ try:
163
+ next_node = tuple(_walk(tokens, terminator=terminator, limit=1))[0]
164
+ if next_node.token.startswith("\\"):
165
+ negated_symbol = r"\n" + next_node.token[1:]
166
+ symbol = convert_symbol(negated_symbol)
167
+ if symbol:
168
+ node = Node(token=negated_symbol)
169
+ group.append(node)
170
+ continue
171
+ node = Node(token=token)
172
+ group.extend((node, next_node))
173
+ continue
174
+ except NoAvailableTokensError:
175
+ node = Node(token=token)
176
+ elif token in (commands.XLEFTARROW, commands.XRIGHTARROW):
177
+ children = tuple(_walk(tokens, terminator=terminator, limit=1))
178
+ if children[0].token == commands.OPENING_BRACKET:
179
+ children = (
180
+ Node(
181
+ token=commands.BRACES, children=tuple(_walk(tokens, terminator=commands.CLOSING_BRACKET))[:-1]
182
+ ),
183
+ *tuple(_walk(tokens, terminator=terminator, limit=1)),
184
+ )
185
+ node = Node(token=token, children=children)
186
+ elif token in (commands.HSKIP, commands.HSPACE, commands.KERN, commands.MKERN, commands.MSKIP, commands.MSPACE):
187
+ children = tuple(_walk(tokens, terminator=terminator, limit=1))
188
+ if children[0].token == commands.BRACES and children[0].children is not None:
189
+ children = children[0].children
190
+ node = Node(token=token, attributes={"width": children[0].token})
191
+ elif token == commands.COLOR:
192
+ attributes = {"mathcolor": next(tokens)}
193
+ children = tuple(_walk(tokens, terminator=terminator))
194
+ sibling = None
195
+ if len(children) and children[-1].token == terminator:
196
+ children, sibling = children[:-1], children[-1]
197
+ group.append(Node(token=token, children=children, attributes=attributes))
198
+ if sibling:
199
+ group.append(sibling)
200
+ break
201
+ elif token == commands.STYLE:
202
+ attributes = {"style": next(tokens)}
203
+ next_node = tuple(_walk(tokens, terminator=terminator, limit=1))[0]
204
+ node = next_node._replace(attributes=attributes)
205
+ elif token in (
206
+ *commands.BIG.keys(),
207
+ *commands.BIG_OPEN_CLOSE.keys(),
208
+ commands.FBOX,
209
+ commands.HBOX,
210
+ commands.MBOX,
211
+ commands.MIDDLE,
212
+ commands.TEXT,
213
+ commands.TEXTBF,
214
+ commands.TEXTIT,
215
+ commands.TEXTRM,
216
+ commands.TEXTSF,
217
+ commands.TEXTTT,
218
+ ):
219
+ node = Node(token=token, text=next(tokens))
220
+ elif token == commands.HREF:
221
+ attributes = {"href": next(tokens)}
222
+ children = tuple(_walk(tokens, terminator=terminator, limit=1))
223
+ node = Node(token=token, children=children, attributes=attributes)
224
+ elif token in (
225
+ commands.ABOVE,
226
+ commands.ATOP,
227
+ commands.ABOVEWITHDELIMS,
228
+ commands.ATOPWITHDELIMS,
229
+ commands.BRACE,
230
+ commands.BRACK,
231
+ commands.CHOOSE,
232
+ commands.OVER,
233
+ ):
234
+ attributes = None
235
+ delimiter = None
236
+
237
+ if token == commands.ABOVEWITHDELIMS:
238
+ delimiter = next(tokens).lstrip("\\") + next(tokens).lstrip("\\")
239
+ elif token == commands.ATOPWITHDELIMS:
240
+ attributes = {"linethickness": "0"}
241
+ delimiter = next(tokens).lstrip("\\") + next(tokens).lstrip("\\")
242
+ elif token == commands.BRACE:
243
+ delimiter = "{}"
244
+ elif token == commands.BRACK:
245
+ delimiter = "[]"
246
+ elif token == commands.CHOOSE:
247
+ delimiter = "()"
248
+
249
+ if token in (commands.ABOVE, commands.ABOVEWITHDELIMS):
250
+ dimension_node = tuple(_walk(tokens, terminator=terminator, limit=1))[0]
251
+ dimension = _get_dimension(dimension_node)
252
+ attributes = {"linethickness": dimension}
253
+ elif token in (commands.ATOP, commands.BRACE, commands.BRACK, commands.CHOOSE):
254
+ attributes = {"linethickness": "0"}
255
+
256
+ denominator = tuple(_walk(tokens, terminator=terminator))
257
+
258
+ sibling = None
259
+ if len(denominator) and denominator[-1].token == terminator:
260
+ denominator, sibling = denominator[:-1], denominator[-1]
261
+
262
+ if len(denominator) == 0:
263
+ if token in (commands.BRACE, commands.BRACK):
264
+ denominator = (Node(token=commands.BRACES, children=()),)
265
+ else:
266
+ raise DenominatorNotFoundError
267
+ if len(group) == 0:
268
+ if token in (commands.BRACE, commands.BRACK):
269
+ group = [Node(token=commands.BRACES, children=())]
270
+ else:
271
+ raise NumeratorNotFoundError
272
+ if len(denominator) > 1:
273
+ denominator = (Node(token=commands.BRACES, children=denominator),)
274
+
275
+ if len(group) == 1:
276
+ children = (*group, *denominator)
277
+ else:
278
+ children = (Node(token=commands.BRACES, children=tuple(group)), *denominator)
279
+ group = [Node(token=commands.FRAC, children=children, attributes=attributes, delimiter=delimiter)]
280
+ if sibling is not None:
281
+ group.append(sibling)
282
+ break
283
+ elif token == commands.SQRT:
284
+ root_nodes = None
285
+ next_node = tuple(_walk(tokens, limit=1))[0]
286
+ if next_node.token == commands.OPENING_BRACKET:
287
+ root_nodes = tuple(_walk(tokens, terminator=commands.CLOSING_BRACKET))[:-1]
288
+ next_node = tuple(_walk(tokens, limit=1))[0]
289
+ if len(root_nodes) > 1:
290
+ root_nodes = (Node(token=commands.BRACES, children=root_nodes),)
291
+
292
+ if root_nodes:
293
+ node = Node(token=commands.ROOT, children=(next_node, *root_nodes))
294
+ else:
295
+ node = Node(token=token, children=(next_node,))
296
+ elif token == commands.ROOT:
297
+ root_nodes = tuple(_walk(tokens, terminator=r"\of"))[:-1]
298
+ next_node = tuple(_walk(tokens, limit=1))[0]
299
+ if len(root_nodes) > 1:
300
+ root_nodes = (Node(token=commands.BRACES, children=root_nodes),)
301
+ if root_nodes:
302
+ node = Node(token=token, children=(next_node, *root_nodes))
303
+ else:
304
+ node = Node(token=token, children=(next_node, Node(token=commands.BRACES, children=())))
305
+ elif token in commands.MATRICES:
306
+ children = tuple(_walk(tokens, terminator=terminator))
307
+ sibling = None
308
+ if len(children) and children[-1].token == terminator:
309
+ children, sibling = children[:-1], children[-1]
310
+ if len(children) == 1 and children[0].token == commands.BRACES and children[0].children:
311
+ children = children[0].children
312
+ if sibling is not None:
313
+ group.extend([Node(token=token, children=children, alignment=""), sibling])
314
+ break
315
+ else:
316
+ node = Node(token=token, children=children, alignment="")
317
+ elif token == commands.GENFRAC:
318
+ delimiter = next(tokens).lstrip("\\") + next(tokens).lstrip("\\")
319
+ dimension_node, style_node = tuple(_walk(tokens, terminator=terminator, limit=2))
320
+ dimension = _get_dimension(dimension_node)
321
+ style = _get_style(style_node)
322
+ attributes = {"linethickness": dimension}
323
+ children = tuple(_walk(tokens, terminator=terminator, limit=2))
324
+ group.extend(
325
+ [Node(token=style), Node(token=token, children=children, delimiter=delimiter, attributes=attributes)]
326
+ )
327
+ break
328
+ elif token == commands.SIDESET:
329
+ left, right, operator = tuple(_walk(tokens, terminator=terminator, limit=3))
330
+ left_token, left_children = _make_subsup(left)
331
+ right_token, right_children = _make_subsup(right)
332
+ attributes = {"movablelimits": "false"}
333
+ node = Node(
334
+ token=token,
335
+ children=(
336
+ Node(
337
+ token=left_token,
338
+ children=(
339
+ Node(
340
+ token=commands.VPHANTOM,
341
+ children=(
342
+ Node(token=operator.token, children=operator.children, attributes=attributes),
343
+ ),
344
+ ),
345
+ *left_children,
346
+ ),
347
+ ),
348
+ Node(
349
+ token=right_token,
350
+ children=(
351
+ Node(token=operator.token, children=operator.children, attributes=attributes),
352
+ *right_children,
353
+ ),
354
+ ),
355
+ ),
356
+ )
357
+ elif token == commands.SKEW:
358
+ width_node, child = tuple(_walk(tokens, terminator=terminator, limit=2))
359
+ width = width_node.token
360
+ if width == commands.BRACES:
361
+ if width_node.children is None or len(width_node.children) == 0:
362
+ raise InvalidWidthError
363
+ width = width_node.children[0].token
364
+ if not width.isdigit():
365
+ raise InvalidWidthError
366
+ node = Node(token=token, children=(child,), attributes={"width": f"{0.0555 * int(width):.3f}em"})
367
+ elif token.startswith(commands.BEGIN):
368
+ node = _get_environment_node(token, tokens)
369
+ else:
370
+ node = Node(token=token)
371
+
372
+ group.append(node)
373
+
374
+ if limit and len(group) >= limit:
375
+ break
376
+ if not has_available_tokens:
377
+ raise NoAvailableTokensError
378
+ return group
379
+
380
+
381
+ def _make_subsup(node: Node) -> Tuple[str, Tuple[Node, ...]]:
382
+ # TODO: raise error instead of assertion
383
+ assert node.token == commands.BRACES
384
+ try:
385
+ assert (
386
+ node.children is not None
387
+ and 2 <= len(node.children[0].children) <= 3
388
+ and node.children[0].token
389
+ in (
390
+ commands.SUBSUP,
391
+ commands.SUBSCRIPT,
392
+ commands.SUPERSCRIPT,
393
+ )
394
+ )
395
+ token = node.children[0].token
396
+ children = node.children[0].children[1:]
397
+ return token, children
398
+ except IndexError:
399
+ return "", ()
400
+
401
+
402
+ def _get_dimension(node: Node) -> str:
403
+ dimension = node.token
404
+ if node.token == commands.BRACES and node.children is not None:
405
+ dimension = node.children[0].token
406
+ return dimension
407
+
408
+
409
+ def _get_style(node: Node) -> str:
410
+ style = node.token
411
+ if node.token == commands.BRACES and node.children is not None:
412
+ style = node.children[0].token
413
+ if style == "0":
414
+ return commands.DISPLAYSTYLE
415
+ if style == "1":
416
+ return commands.TEXTSTYLE
417
+ if style == "2":
418
+ return commands.SCRIPTSTYLE
419
+ if style == "3":
420
+ return commands.SCRIPTSCRIPTSTYLE
421
+ raise InvalidStyleForGenfracError
422
+
423
+
424
+ def _get_environment_node(token: str, tokens: Iterator[str]) -> Node:
425
+ # TODO: support non-matrix environments
426
+ start_index = token.index("{") + 1
427
+ environment = token[start_index:-1]
428
+ terminator = rf"{commands.END}{{{environment}}}"
429
+ children = tuple(_walk(tokens, terminator=terminator))
430
+ if len(children) and children[-1].token != terminator:
431
+ raise MissingEndError
432
+ children = children[:-1]
433
+ alignment = ""
434
+
435
+ if len(children) and children[0].token == commands.OPENING_BRACKET:
436
+ children_iter = iter(children)
437
+ next(children_iter) # remove BRACKET
438
+ for c in children_iter:
439
+ if c.token == commands.CLOSING_BRACKET:
440
+ break
441
+ elif c.token not in "lcr|":
442
+ raise InvalidAlignmentError
443
+ alignment += c.token
444
+ children = tuple(children_iter)
445
+ elif (
446
+ len(children)
447
+ and children[0].children is not None
448
+ and (
449
+ children[0].token == commands.BRACES
450
+ or (environment.endswith("*") and children[0].token == commands.BRACKETS)
451
+ )
452
+ and all(c.token in "lcr|" for c in children[0].children)
453
+ ):
454
+ alignment = "".join(c.token for c in children[0].children)
455
+ children = children[1:]
456
+
457
+ return Node(token=rf"\{environment}", children=children, alignment=alignment)
videollama2/lib/python3.10/site-packages/shortuuid/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from shortuuid.main import decode
2
+ from shortuuid.main import encode
3
+ from shortuuid.main import get_alphabet
4
+ from shortuuid.main import random
5
+ from shortuuid.main import set_alphabet
6
+ from shortuuid.main import ShortUUID
7
+ from shortuuid.main import uuid
8
+
9
+ __version__ = "1.0.11"
10
+ __all__ = [
11
+ "decode",
12
+ "encode",
13
+ "get_alphabet",
14
+ "random",
15
+ "set_alphabet",
16
+ "ShortUUID",
17
+ "uuid",
18
+ ]
videollama2/lib/python3.10/site-packages/shortuuid/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (481 Bytes). View file
 
videollama2/lib/python3.10/site-packages/shortuuid/__pycache__/cli.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
videollama2/lib/python3.10/site-packages/shortuuid/__pycache__/django_fields.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
videollama2/lib/python3.10/site-packages/shortuuid/__pycache__/main.cpython-310.pyc ADDED
Binary file (4.94 kB). View file
 
videollama2/lib/python3.10/site-packages/shortuuid/cli.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import sys
3
+ from typing import Any
4
+ from uuid import UUID
5
+
6
+ from .main import decode
7
+ from .main import encode
8
+ from .main import uuid
9
+
10
+
11
+ def encode_cli(args: argparse.Namespace):
12
+ print(encode(args.uuid))
13
+
14
+
15
+ def decode_cli(args: argparse.Namespace):
16
+ print(str(decode(args.shortuuid, legacy=args.legacy)))
17
+
18
+
19
+ def cli(*args: Any) -> None:
20
+ parser = argparse.ArgumentParser(
21
+ description="Generate, encode and decode shortuuids",
22
+ epilog="top-level command generates a random shortuuid",
23
+ )
24
+
25
+ subparsers = parser.add_subparsers(help="sub-command help")
26
+
27
+ encode_parser = subparsers.add_parser(
28
+ "encode", help="Encode a UUID into a short UUID", description=encode.__doc__
29
+ )
30
+ encode_parser.add_argument("uuid", type=UUID, help="UUID to be encoded")
31
+ encode_parser.set_defaults(func=encode_cli)
32
+
33
+ decode_parser = subparsers.add_parser(
34
+ "decode", help="Decode a short UUID into a UUID", description=decode.__doc__
35
+ )
36
+ decode_parser.add_argument("shortuuid", type=str, help="Short UUID to be decoded")
37
+ decode_parser.add_argument("--legacy", action="store_true")
38
+ decode_parser.set_defaults(func=decode_cli)
39
+
40
+ passed_args = parser.parse_args(*args)
41
+
42
+ if hasattr(passed_args, "func"):
43
+ passed_args.func(passed_args)
44
+ else:
45
+ # Maintain legacy behaviour
46
+ print(uuid())
47
+
48
+
49
+ if __name__ == "__main__":
50
+ cli(sys.argv[1:])
videollama2/lib/python3.10/site-packages/shortuuid/django_fields.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from typing import Dict
3
+ from typing import Tuple
4
+
5
+ from django.db import models
6
+ from django.utils.translation import gettext_lazy as _
7
+
8
+ from . import ShortUUID
9
+
10
+
11
+ class ShortUUIDField(models.CharField):
12
+ description = _("A short UUID field.")
13
+
14
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
15
+ self.length: int = kwargs.pop("length", 22) # type: ignore
16
+ self.prefix: str = kwargs.pop("prefix", "") # type: ignore
17
+
18
+ if "max_length" not in kwargs:
19
+ # If `max_length` was not specified, set it here.
20
+ kwargs["max_length"] = self.length + len(self.prefix) # type: ignore
21
+
22
+ self.alphabet: str = kwargs.pop("alphabet", None) # type: ignore
23
+ kwargs["default"] = self._generate_uuid # type: ignore
24
+
25
+ super().__init__(*args, **kwargs)
26
+
27
+ def _generate_uuid(self) -> str:
28
+ """Generate a short random string."""
29
+ return self.prefix + ShortUUID(alphabet=self.alphabet).random(
30
+ length=self.length
31
+ )
32
+
33
+ def deconstruct(self) -> Tuple[str, str, Tuple, Dict[str, Any]]:
34
+ name, path, args, kwargs = super().deconstruct()
35
+ kwargs["alphabet"] = self.alphabet
36
+ kwargs["length"] = self.length
37
+ kwargs["prefix"] = self.prefix
38
+ kwargs.pop("default", None)
39
+ return name, path, args, kwargs
videollama2/lib/python3.10/site-packages/shortuuid/main.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Concise UUID generation."""
2
+
3
+ import math
4
+ import secrets
5
+ import uuid as _uu
6
+ from typing import List
7
+ from typing import Optional
8
+
9
+
10
+ def int_to_string(
11
+ number: int, alphabet: List[str], padding: Optional[int] = None
12
+ ) -> str:
13
+ """
14
+ Convert a number to a string, using the given alphabet.
15
+
16
+ The output has the most significant digit first.
17
+ """
18
+ output = ""
19
+ alpha_len = len(alphabet)
20
+ while number:
21
+ number, digit = divmod(number, alpha_len)
22
+ output += alphabet[digit]
23
+ if padding:
24
+ remainder = max(padding - len(output), 0)
25
+ output = output + alphabet[0] * remainder
26
+ return output[::-1]
27
+
28
+
29
+ def string_to_int(string: str, alphabet: List[str]) -> int:
30
+ """
31
+ Convert a string to a number, using the given alphabet.
32
+
33
+ The input is assumed to have the most significant digit first.
34
+ """
35
+ number = 0
36
+ alpha_len = len(alphabet)
37
+ for char in string:
38
+ number = number * alpha_len + alphabet.index(char)
39
+ return number
40
+
41
+
42
+ class ShortUUID(object):
43
+ def __init__(self, alphabet: Optional[str] = None) -> None:
44
+ if alphabet is None:
45
+ alphabet = "23456789ABCDEFGHJKLMNPQRSTUVWXYZ" "abcdefghijkmnopqrstuvwxyz"
46
+
47
+ self.set_alphabet(alphabet)
48
+
49
+ @property
50
+ def _length(self) -> int:
51
+ """Return the necessary length to fit the entire UUID given the current alphabet."""
52
+ return int(math.ceil(math.log(2**128, self._alpha_len)))
53
+
54
+ def encode(self, uuid: _uu.UUID, pad_length: Optional[int] = None) -> str:
55
+ """
56
+ Encode a UUID into a string (LSB first) according to the alphabet.
57
+
58
+ If leftmost (MSB) bits are 0, the string might be shorter.
59
+ """
60
+ if not isinstance(uuid, _uu.UUID):
61
+ raise ValueError("Input `uuid` must be a UUID object.")
62
+ if pad_length is None:
63
+ pad_length = self._length
64
+ return int_to_string(uuid.int, self._alphabet, padding=pad_length)
65
+
66
+ def decode(self, string: str, legacy: bool = False) -> _uu.UUID:
67
+ """
68
+ Decode a string according to the current alphabet into a UUID.
69
+
70
+ Raises ValueError when encountering illegal characters or a too-long string.
71
+
72
+ If string too short, fills leftmost (MSB) bits with 0.
73
+
74
+ Pass `legacy=True` if your UUID was encoded with a ShortUUID version prior to
75
+ 1.0.0.
76
+ """
77
+ if not isinstance(string, str):
78
+ raise ValueError("Input `string` must be a str.")
79
+ if legacy:
80
+ string = string[::-1]
81
+ return _uu.UUID(int=string_to_int(string, self._alphabet))
82
+
83
+ def uuid(self, name: Optional[str] = None, pad_length: Optional[int] = None) -> str:
84
+ """
85
+ Generate and return a UUID.
86
+
87
+ If the name parameter is provided, set the namespace to the provided
88
+ name and generate a UUID.
89
+ """
90
+ if pad_length is None:
91
+ pad_length = self._length
92
+
93
+ # If no name is given, generate a random UUID.
94
+ if name is None:
95
+ u = _uu.uuid4()
96
+ elif name.lower().startswith(("http://", "https://")):
97
+ u = _uu.uuid5(_uu.NAMESPACE_URL, name)
98
+ else:
99
+ u = _uu.uuid5(_uu.NAMESPACE_DNS, name)
100
+ return self.encode(u, pad_length)
101
+
102
+ def random(self, length: Optional[int] = None) -> str:
103
+ """Generate and return a cryptographically secure short random string of `length`."""
104
+ if length is None:
105
+ length = self._length
106
+
107
+ return "".join(secrets.choice(self._alphabet) for _ in range(length))
108
+
109
+ def get_alphabet(self) -> str:
110
+ """Return the current alphabet used for new UUIDs."""
111
+ return "".join(self._alphabet)
112
+
113
+ def set_alphabet(self, alphabet: str) -> None:
114
+ """Set the alphabet to be used for new UUIDs."""
115
+ # Turn the alphabet into a set and sort it to prevent duplicates
116
+ # and ensure reproducibility.
117
+ new_alphabet = list(sorted(set(alphabet)))
118
+ if len(new_alphabet) > 1:
119
+ self._alphabet = new_alphabet
120
+ self._alpha_len = len(self._alphabet)
121
+ else:
122
+ raise ValueError("Alphabet with more than " "one unique symbols required.")
123
+
124
+ def encoded_length(self, num_bytes: int = 16) -> int:
125
+ """Return the string length of the shortened UUID."""
126
+ factor = math.log(256) / math.log(self._alpha_len)
127
+ return int(math.ceil(factor * num_bytes))
128
+
129
+
130
+ # For backwards compatibility
131
+ _global_instance = ShortUUID()
132
+ encode = _global_instance.encode
133
+ decode = _global_instance.decode
134
+ uuid = _global_instance.uuid
135
+ random = _global_instance.random
136
+ get_alphabet = _global_instance.get_alphabet
137
+ set_alphabet = _global_instance.set_alphabet
videollama2/lib/python3.10/site-packages/shortuuid/py.typed ADDED
File without changes
videollama2/lib/python3.10/site-packages/shortuuid/test_shortuuid.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import string
3
+ import sys
4
+ import unittest
5
+ from collections import defaultdict
6
+ from unittest.mock import patch
7
+ from uuid import UUID
8
+ from uuid import uuid4
9
+
10
+ from shortuuid.cli import cli
11
+ from shortuuid.main import decode
12
+ from shortuuid.main import encode
13
+ from shortuuid.main import get_alphabet
14
+ from shortuuid.main import random
15
+ from shortuuid.main import set_alphabet
16
+ from shortuuid.main import ShortUUID
17
+ from shortuuid.main import uuid
18
+
19
+ sys.path.insert(0, os.path.abspath(__file__ + "/../.."))
20
+
21
+
22
+ class LegacyShortUUIDTest(unittest.TestCase):
23
+ def test_generation(self):
24
+ self.assertTrue(20 < len(uuid()) < 24)
25
+ self.assertTrue(20 < len(uuid("http://www.example.com/")) < 24)
26
+ self.assertTrue(20 < len(uuid("HTTP://www.example.com/")) < 24)
27
+ self.assertTrue(20 < len(uuid("example.com/")) < 24)
28
+
29
+ def test_encoding(self):
30
+ u = UUID("{3b1f8b40-222c-4a6e-b77e-779d5a94e21c}")
31
+ self.assertEqual(encode(u), "CXc85b4rqinB7s5J52TRYb")
32
+
33
+ def test_decoding(self):
34
+ u = UUID("{3b1f8b40-222c-4a6e-b77e-779d5a94e21c}")
35
+ self.assertEqual(decode("CXc85b4rqinB7s5J52TRYb"), u)
36
+
37
+ def test_alphabet(self):
38
+ backup_alphabet = get_alphabet()
39
+
40
+ alphabet = "01"
41
+ set_alphabet(alphabet)
42
+ self.assertEqual(alphabet, get_alphabet())
43
+
44
+ set_alphabet("01010101010101")
45
+ self.assertEqual(alphabet, get_alphabet())
46
+
47
+ self.assertEqual(set(uuid()), set("01"))
48
+ self.assertTrue(116 < len(uuid()) < 140)
49
+
50
+ u = uuid4()
51
+ self.assertEqual(u, decode(encode(u)))
52
+
53
+ u = uuid()
54
+ self.assertEqual(u, encode(decode(u)))
55
+
56
+ self.assertRaises(ValueError, set_alphabet, "1")
57
+ self.assertRaises(ValueError, set_alphabet, "1111111")
58
+
59
+ set_alphabet(backup_alphabet)
60
+
61
+ self.assertRaises(ValueError, lambda x: ShortUUID(x), "0")
62
+
63
+ def test_random(self):
64
+ self.assertEqual(len(random()), 22)
65
+ for i in range(1, 100):
66
+ self.assertEqual(len(random(i)), i)
67
+
68
+
69
+ class ClassShortUUIDTest(unittest.TestCase):
70
+ def test_generation(self):
71
+ su = ShortUUID()
72
+ self.assertTrue(20 < len(su.uuid()) < 24)
73
+ self.assertTrue(20 < len(su.uuid("http://www.example.com/")) < 24)
74
+ self.assertTrue(20 < len(su.uuid("HTTP://www.example.com/")) < 24)
75
+ self.assertTrue(20 < len(su.uuid("example.com/")) < 24)
76
+
77
+ def test_encoding(self):
78
+ su = ShortUUID()
79
+ u = UUID("{3b1f8b40-222c-4a6e-b77e-779d5a94e21c}")
80
+ self.assertEqual(su.encode(u), "CXc85b4rqinB7s5J52TRYb")
81
+
82
+ def test_decoding(self):
83
+ su = ShortUUID()
84
+ u = UUID("{3b1f8b40-222c-4a6e-b77e-779d5a94e21c}")
85
+ self.assertEqual(su.decode("CXc85b4rqinB7s5J52TRYb"), u)
86
+
87
+ def test_random(self):
88
+ su = ShortUUID()
89
+ for i in range(1000):
90
+ self.assertEqual(len(su.random()), 22)
91
+
92
+ for i in range(1, 100):
93
+ self.assertEqual(len(su.random(i)), i)
94
+
95
+ def test_alphabet(self):
96
+ alphabet = "01"
97
+ su1 = ShortUUID(alphabet)
98
+ su2 = ShortUUID()
99
+
100
+ self.assertEqual(alphabet, su1.get_alphabet())
101
+
102
+ su1.set_alphabet("01010101010101")
103
+ self.assertEqual(alphabet, su1.get_alphabet())
104
+
105
+ self.assertEqual(set(su1.uuid()), set("01"))
106
+ self.assertTrue(116 < len(su1.uuid()) < 140)
107
+ self.assertTrue(20 < len(su2.uuid()) < 24)
108
+
109
+ u = uuid4()
110
+ self.assertEqual(u, su1.decode(su1.encode(u)))
111
+
112
+ u = su1.uuid()
113
+ self.assertEqual(u, su1.encode(su1.decode(u)))
114
+
115
+ self.assertRaises(ValueError, su1.set_alphabet, "1")
116
+ self.assertRaises(ValueError, su1.set_alphabet, "1111111")
117
+
118
+ def test_encoded_length(self):
119
+ su1 = ShortUUID()
120
+ self.assertEqual(su1.encoded_length(), 22)
121
+
122
+ base64_alphabet = (
123
+ string.ascii_uppercase + string.ascii_lowercase + string.digits + "+/"
124
+ )
125
+
126
+ su2 = ShortUUID(base64_alphabet)
127
+ self.assertEqual(su2.encoded_length(), 22)
128
+
129
+ binary_alphabet = "01"
130
+ su3 = ShortUUID(binary_alphabet)
131
+ self.assertEqual(su3.encoded_length(), 128)
132
+
133
+ su4 = ShortUUID()
134
+ self.assertEqual(su4.encoded_length(num_bytes=8), 11)
135
+
136
+
137
+ class ShortUUIDPaddingTest(unittest.TestCase):
138
+ def test_padding(self):
139
+ su = ShortUUID()
140
+ random_uid = uuid4()
141
+ smallest_uid = UUID(int=0)
142
+
143
+ encoded_random = su.encode(random_uid)
144
+ encoded_small = su.encode(smallest_uid)
145
+
146
+ self.assertEqual(len(encoded_random), len(encoded_small))
147
+
148
+ def test_decoding(self):
149
+ su = ShortUUID()
150
+ random_uid = uuid4()
151
+ smallest_uid = UUID(int=0)
152
+
153
+ encoded_random = su.encode(random_uid)
154
+ encoded_small = su.encode(smallest_uid)
155
+
156
+ self.assertEqual(su.decode(encoded_small), smallest_uid)
157
+ self.assertEqual(su.decode(encoded_random), random_uid)
158
+
159
+ def test_consistency(self):
160
+ su = ShortUUID()
161
+ num_iterations = 1000
162
+ uid_lengths = defaultdict(int)
163
+
164
+ for count in range(num_iterations):
165
+ random_uid = uuid4()
166
+ encoded_random = su.encode(random_uid)
167
+ uid_lengths[len(encoded_random)] += 1
168
+ decoded_random = su.decode(encoded_random)
169
+
170
+ self.assertEqual(random_uid, decoded_random)
171
+
172
+ self.assertEqual(len(uid_lengths), 1)
173
+ uid_length = next(iter(uid_lengths.keys())) # Get the 1 value
174
+
175
+ self.assertEqual(uid_lengths[uid_length], num_iterations)
176
+
177
+
178
+ class EncodingEdgeCasesTest(unittest.TestCase):
179
+ def test_decode_dict(self):
180
+ su = ShortUUID()
181
+ self.assertRaises(ValueError, su.encode, [])
182
+ self.assertRaises(ValueError, su.encode, {})
183
+ self.assertRaises(ValueError, su.decode, (2,))
184
+ self.assertRaises(ValueError, su.encode, 42)
185
+ self.assertRaises(ValueError, su.encode, 42.0)
186
+
187
+
188
+ class DecodingEdgeCasesTest(unittest.TestCase):
189
+ def test_decode_dict(self):
190
+ su = ShortUUID()
191
+ self.assertRaises(ValueError, su.decode, [])
192
+ self.assertRaises(ValueError, su.decode, {})
193
+ self.assertRaises(ValueError, su.decode, (2,))
194
+ self.assertRaises(ValueError, su.decode, 42)
195
+ self.assertRaises(ValueError, su.decode, 42.0)
196
+
197
+
198
+ class CliTest(unittest.TestCase):
199
+ @patch("shortuuid.cli.print")
200
+ def test_shortuuid_command_produces_uuid(self, mock_print):
201
+ # When we call the main cli function
202
+ cli([])
203
+ # Then a shortuuid is printed out
204
+ mock_print.assert_called()
205
+ terminal_output = mock_print.call_args[0][0]
206
+ self.assertEqual(len(terminal_output), 22)
207
+
208
+ @patch("shortuuid.cli.print")
209
+ def test_encode_command(self, mock_print):
210
+ cli(["encode", "3b1f8b40-222c-4a6e-b77e-779d5a94e21c"])
211
+
212
+ terminal_output = mock_print.call_args[0][0]
213
+ self.assertEqual(terminal_output, "CXc85b4rqinB7s5J52TRYb")
214
+
215
+ @patch("shortuuid.cli.print")
216
+ def test_decode_command(self, mock_print):
217
+ cli(["decode", "CXc85b4rqinB7s5J52TRYb"])
218
+
219
+ terminal_output = mock_print.call_args[0][0]
220
+ self.assertEqual(terminal_output, "3b1f8b40-222c-4a6e-b77e-779d5a94e21c")
221
+
222
+
223
+ if __name__ == "__main__":
224
+ unittest.main()