diff --git a/.gitattributes b/.gitattributes index 04b1ea087ed52d34cbf97716d76a47bb9d9848c2..f2393ace7c10193d730e9e98485264cacbfa51eb 100644 --- a/.gitattributes +++ b/.gitattributes @@ -541,3 +541,247 @@ lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so filter=lf lib/python3.10/site-packages/pykdtree/kdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/pykdtree.libs/libgomp-870cb1d0.so.1.0.0 filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/matplotlib/_image.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/matplotlib/_path.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/matplotlib/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/matplotlib/_tri.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/matplotlib/_c_internal_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/matplotlib/ft2font.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/mkl/_py_mkl_service.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/mkl_fft/_pydfti.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/mkl_random/mklrand.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/contourpy/_contourpy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/kiwisolver/_cext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/lib/liblexers.a filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/lib/libsys.a filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/lib/libembree4.a filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/bitstream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/buffer.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/bytesource.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/descriptor.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/dictionary.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/error.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/format.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/frame.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/logging.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/opaque.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/option.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/packet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libmp3lame-68ba0ecb.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libmvec-2-8eb5c230.28.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libnettle-a4970681.so.8.10 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libopencore-amrnb-e5261d48.so.0.0.3 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libopenh264-7bd47c3a.so.2.6.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libopus-a676965d.so.0.10.1 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libspeex-dd5a2d1c.so.1.5.2 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libsrt-ccd6ae88.so.1.5.4 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libssl-60250281.so.1.1.1k filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libSvtAv1Enc-6870dfd5.so.3.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/cv2/cv2.abi3.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99-934c22de.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy.libs/libscipy_openblas-68440149.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/functorch/_C.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/gmpy2/gmpy2.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/orb_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/filters/_multiotsu.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/graph/heap.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/graph/_mcp.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/graph/_ncut_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/graph/_spath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/measure/_ccomp.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/measure/_find_contours_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/measure/_marching_cubes_lewiner_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/measure/_moments_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/measure/_pnpoly.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/morphology/_convex_hull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/morphology/_extrema_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/morphology/_flood_fill_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/morphology/_grayreconstruct.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/morphology/_max_tree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/morphology/_misc_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/morphology/_skeletonize_lee_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/morphology/_skeletonize_various_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/video/reformatter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/video/frame.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/resampler.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/format.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/frame.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/codec/codec.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/codec/hwaccel.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/codec/context.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/container/core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/container/input.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/container/output.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/container/pyio.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/container/streams.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/filter/context.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/filter/filter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/filter/link.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libswresample-f1bdf0d4.so.5.3.100 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libswscale-5efb2ca5.so.8.3.100 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libtwolame-dfe0c2c6.so.0.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libunistring-7eaffe9f.so.5.2.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libvorbis-7463f6bd.so.0.4.9 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libvorbisenc-131c2ed7.so.2.0.12 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libvpx-09740bc5.so.11.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libwebp-bc89f640.so.7.1.10 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libx264-b1bb65f5.so.165 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libx265-169666e3.so.215 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libxcb-5ddf6756.so.1.1.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libasound-d5229d1a.so.2.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libaom-170d518b.so.3.11.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libavdevice-0a717e7d.so.61.3.100 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libavfilter-7ceaa51a.so.10.4.100 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libavformat-f6caa08d.so.61.7.100 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libavutil-a63ffd27.so.59.39.100 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libcrypto-bdaed0ea.so.1.1.1k filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libavcodec-7ee0753d.so.61.19.101 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libgmp-29b2ba5e.so.10.5.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libdav1d-f1894f21.so.7.0.0 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libhogweed-033e28eb.so.6.10 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av.libs/libgnutls-cd598300.so.30.40.3 filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/brotlicffi/_brotlicffi.abi3.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/sidedata/motionvectors.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/subtitles/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/subtitles/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/subtitles/subtitle.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/video/format.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/video/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/filter/graph.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/filter/loudnorm.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/filter/pad.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/motorcycle_left.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/motorcycle_right.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/retina.jpg filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/rocket.jpg filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/astronaut.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/brick.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/camera.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/chelsea.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/coffee.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/grass.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/gravel.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/hubble_deep_field.jpg filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/ihc.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/data/logo.png filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/draw/_draw.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/_canny_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/_cascade.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/_haar.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/_hoghistogram.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/_sift.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/_texture.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/brief_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/censure_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/feature/corner_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/restoration/_denoise_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/restoration/_inpaint.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/restoration/_nl_means_denoising.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/restoration/_rolling_ball_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/restoration/_unwrap_1d.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/restoration/_unwrap_2d.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/restoration/_unwrap_3d.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/segmentation/_slic.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/transform/_hough_transform.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/transform/_radon_transform.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/transform/_warps_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/util/_remap.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/_shared/geometry.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/_shared/transform.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/filters/rank/core_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/filters/rank/bilateral_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/filters/rank/core_cy_3d.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/filters/rank/percentile_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/skimage/filters/rank/generic_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/share/doc/libigl/readme.pdf filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/interpolate/_interpnd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_linalg_pythran.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_decomp_interpolative.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/signal/_sosfilt.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_decomp_lu_cython.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/optimize/_cython_nnls.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/special/_special_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/special/_gufuncs.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/__pycache__/_multivariate.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/__pycache__/_distribution_infrastructure.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/lib/python3.10/site-packages/av.libs/libSvtAv1Enc-6870dfd5.so.3.0.0 b/lib/python3.10/site-packages/av.libs/libSvtAv1Enc-6870dfd5.so.3.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..16d00772a3d1438f0b1a4b51480c40952cde13ce --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libSvtAv1Enc-6870dfd5.so.3.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0955622e9cd38dfc78d6977a53a942cdf54b8b1eb38e5e589f68a3bf75a0931c +size 7946585 diff --git a/lib/python3.10/site-packages/av.libs/libaom-170d518b.so.3.11.0 b/lib/python3.10/site-packages/av.libs/libaom-170d518b.so.3.11.0 new file mode 100644 index 0000000000000000000000000000000000000000..785c67d02337c643e4fe38ed6a7ad74f0fee3f92 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libaom-170d518b.so.3.11.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e819d28743a34e87d16e8f4fa8c45386229f0722bc1ee76378758085f328bd9c +size 7731785 diff --git a/lib/python3.10/site-packages/av.libs/libasound-d5229d1a.so.2.0.0 b/lib/python3.10/site-packages/av.libs/libasound-d5229d1a.so.2.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..375ceb8358a8cb2a7e68a3deb39a3706cede4f1d --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libasound-d5229d1a.so.2.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3c958c58a0eb012a48fb9ab88fcbe164027a8abd48647876b4cfb327141800f +size 1162329 diff --git a/lib/python3.10/site-packages/av.libs/libavcodec-7ee0753d.so.61.19.101 b/lib/python3.10/site-packages/av.libs/libavcodec-7ee0753d.so.61.19.101 new file mode 100644 index 0000000000000000000000000000000000000000..03fe8058fa696c59b248c62e771c02ac8f0d937f --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libavcodec-7ee0753d.so.61.19.101 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea371be1b2711cf7f1b38a6f97beab079cbf932b691f0a2d480fcf73d53b842a +size 15457321 diff --git a/lib/python3.10/site-packages/av.libs/libavdevice-0a717e7d.so.61.3.100 b/lib/python3.10/site-packages/av.libs/libavdevice-0a717e7d.so.61.3.100 new file mode 100644 index 0000000000000000000000000000000000000000..8fa47bb856698de300b80d7cf248a0e1efd41eb3 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libavdevice-0a717e7d.so.61.3.100 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:431b66d130a55e3c9ed6ad19ddc7b3d57f705173b5562a5f10ea38a8e41deffb +size 111513 diff --git a/lib/python3.10/site-packages/av.libs/libavfilter-7ceaa51a.so.10.4.100 b/lib/python3.10/site-packages/av.libs/libavfilter-7ceaa51a.so.10.4.100 new file mode 100644 index 0000000000000000000000000000000000000000..8f64af7d5aa79e5584d754c827c943896bba8105 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libavfilter-7ceaa51a.so.10.4.100 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bbf737a05eb9ecccc080861b6204d4b115569b2d37000495311d1e213b212f0 +size 4509361 diff --git a/lib/python3.10/site-packages/av.libs/libavformat-f6caa08d.so.61.7.100 b/lib/python3.10/site-packages/av.libs/libavformat-f6caa08d.so.61.7.100 new file mode 100644 index 0000000000000000000000000000000000000000..604a6a30b075ec6e0c3d37242ed6ad5f6379e987 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libavformat-f6caa08d.so.61.7.100 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff4c0c69c5a2ecfb60cee596dc79a5501edb6dc3056ec40e7662944eaf9c4382 +size 2777777 diff --git a/lib/python3.10/site-packages/av.libs/libavutil-a63ffd27.so.59.39.100 b/lib/python3.10/site-packages/av.libs/libavutil-a63ffd27.so.59.39.100 new file mode 100644 index 0000000000000000000000000000000000000000..5e2c8f7f42178d2102d5956380165b67e110aaef --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libavutil-a63ffd27.so.59.39.100 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58199bb31a37f51b4fc50ba28275a3a3bf2eefe11d515d6a903ff939682b304d +size 1053481 diff --git a/lib/python3.10/site-packages/av.libs/libcrypto-bdaed0ea.so.1.1.1k b/lib/python3.10/site-packages/av.libs/libcrypto-bdaed0ea.so.1.1.1k new file mode 100644 index 0000000000000000000000000000000000000000..d4bb826c577ed50444c749869b8f7bde8bd34a54 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libcrypto-bdaed0ea.so.1.1.1k @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5e4d4d446f022a7312ca1304020b15430cb367314caf6e0251ccf6f7ec8deea +size 3215921 diff --git a/lib/python3.10/site-packages/av.libs/libdav1d-f1894f21.so.7.0.0 b/lib/python3.10/site-packages/av.libs/libdav1d-f1894f21.so.7.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..cc30c4edbba88fa6f4a513fcfa6b76b3cd7a62e1 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libdav1d-f1894f21.so.7.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd6a9fb57526764732f58b28b82a9e172b2e6673871a0008a0e8c524d282f51a +size 2024769 diff --git a/lib/python3.10/site-packages/av.libs/libgmp-29b2ba5e.so.10.5.0 b/lib/python3.10/site-packages/av.libs/libgmp-29b2ba5e.so.10.5.0 new file mode 100644 index 0000000000000000000000000000000000000000..7e1f9de8e518ddc806b72995d7f8e53ab1d768cd --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libgmp-29b2ba5e.so.10.5.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5257c84cfbae5924c7099046ba924eb0ee15d53d4e9a020d00e4e34169d5a3aa +size 515785 diff --git a/lib/python3.10/site-packages/av.libs/libgnutls-cd598300.so.30.40.3 b/lib/python3.10/site-packages/av.libs/libgnutls-cd598300.so.30.40.3 new file mode 100644 index 0000000000000000000000000000000000000000..794a9ad0ea6cc85ec0a474c34f6d072e94ecf816 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libgnutls-cd598300.so.30.40.3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31e889e0ba9ead833b4bc4c0f0b66e172e2ecc479f2fad2240779fade582e810 +size 2301097 diff --git a/lib/python3.10/site-packages/av.libs/libhogweed-033e28eb.so.6.10 b/lib/python3.10/site-packages/av.libs/libhogweed-033e28eb.so.6.10 new file mode 100644 index 0000000000000000000000000000000000000000..cdad37938a6c8c25b66c51433f710a280d6a254b --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libhogweed-033e28eb.so.6.10 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:495345f7b87e995c37b8a997b874bd0e5e1975fff4b3986c52e1a92f1e580c8a +size 320913 diff --git a/lib/python3.10/site-packages/av.libs/libmp3lame-68ba0ecb.so.0.0.0 b/lib/python3.10/site-packages/av.libs/libmp3lame-68ba0ecb.so.0.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..290305845a571d247da292d7f626f816eb776a10 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libmp3lame-68ba0ecb.so.0.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a141bfe6d6c865e87075a4746aa805b3b7b8804b21b205962fc38982cb9a391 +size 421105 diff --git a/lib/python3.10/site-packages/av.libs/libmvec-2-8eb5c230.28.so b/lib/python3.10/site-packages/av.libs/libmvec-2-8eb5c230.28.so new file mode 100644 index 0000000000000000000000000000000000000000..c871c6347bf3b15e476731c50ed9d8f8c215c32d --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libmvec-2-8eb5c230.28.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb9917089855b967f26d04edadb66fbfea2683ec78ad49733e490f85b6716bba +size 181969 diff --git a/lib/python3.10/site-packages/av.libs/libnettle-a4970681.so.8.10 b/lib/python3.10/site-packages/av.libs/libnettle-a4970681.so.8.10 new file mode 100644 index 0000000000000000000000000000000000000000..f7dc3e2710d44d401a5973d61e21d7bbfc9c39b6 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libnettle-a4970681.so.8.10 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3621fd4cad89bf8e622f51dccbe3a3b9b62e2b0e19c307e04470109965ffd5a +size 359689 diff --git a/lib/python3.10/site-packages/av.libs/libopencore-amrnb-e5261d48.so.0.0.3 b/lib/python3.10/site-packages/av.libs/libopencore-amrnb-e5261d48.so.0.0.3 new file mode 100644 index 0000000000000000000000000000000000000000..2d4b0d80ed97b47730e073974b6eb8c87e9089fe --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libopencore-amrnb-e5261d48.so.0.0.3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0b492770d19a03b2d566a8bd25d45e53288a7ba4c95a74f4b46c24f3cdb862d +size 172889 diff --git a/lib/python3.10/site-packages/av.libs/libopenh264-7bd47c3a.so.2.6.0 b/lib/python3.10/site-packages/av.libs/libopenh264-7bd47c3a.so.2.6.0 new file mode 100644 index 0000000000000000000000000000000000000000..4bc6456a6ee93705591f43871bd211a1f9b1f722 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libopenh264-7bd47c3a.so.2.6.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55fa2b8a2239db0363acffdcb98a0176b09f06b110cb6eb4104404aea6ebef60 +size 1211297 diff --git a/lib/python3.10/site-packages/av.libs/libopus-a676965d.so.0.10.1 b/lib/python3.10/site-packages/av.libs/libopus-a676965d.so.0.10.1 new file mode 100644 index 0000000000000000000000000000000000000000..28e294ace25441dabcdaf663045c38cb66e92d74 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libopus-a676965d.so.0.10.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c4a66fd4d256037a68c01a8e2c7aa3b8b6fc2229dea6d54395b570b4cf0cbfc +size 408617 diff --git a/lib/python3.10/site-packages/av.libs/libspeex-dd5a2d1c.so.1.5.2 b/lib/python3.10/site-packages/av.libs/libspeex-dd5a2d1c.so.1.5.2 new file mode 100644 index 0000000000000000000000000000000000000000..6f6a71ba36f7d92f8029c81e5237195255d3b82e --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libspeex-dd5a2d1c.so.1.5.2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c3ab3a7c976213a963eda8cae42456c3c6096cba90e150a363044fd0183246 +size 116697 diff --git a/lib/python3.10/site-packages/av.libs/libsrt-ccd6ae88.so.1.5.4 b/lib/python3.10/site-packages/av.libs/libsrt-ccd6ae88.so.1.5.4 new file mode 100644 index 0000000000000000000000000000000000000000..4ae2760db551315307f687426a7d3ac68d79cde6 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libsrt-ccd6ae88.so.1.5.4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98a4195195ea3102e5f518e45b85fd5576dafc225cff80f471ddc769588dc5e2 +size 1076345 diff --git a/lib/python3.10/site-packages/av.libs/libssl-60250281.so.1.1.1k b/lib/python3.10/site-packages/av.libs/libssl-60250281.so.1.1.1k new file mode 100644 index 0000000000000000000000000000000000000000..fc6495c95092f6d92d0ff7d69a6782d303abac7e --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libssl-60250281.so.1.1.1k @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30236e4c8dcaf2f73e97f06f3799c496a3e9b36c56dc239ed84667bb1b5f42b1 +size 666865 diff --git a/lib/python3.10/site-packages/av.libs/libswresample-f1bdf0d4.so.5.3.100 b/lib/python3.10/site-packages/av.libs/libswresample-f1bdf0d4.so.5.3.100 new file mode 100644 index 0000000000000000000000000000000000000000..90f5107c203d1cd882f255206890700d2a49ed97 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libswresample-f1bdf0d4.so.5.3.100 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69ed02c6961b52aab847fe87ea0879c866fb5f0977d1ac27b485c43972971470 +size 132361 diff --git a/lib/python3.10/site-packages/av.libs/libswscale-5efb2ca5.so.8.3.100 b/lib/python3.10/site-packages/av.libs/libswscale-5efb2ca5.so.8.3.100 new file mode 100644 index 0000000000000000000000000000000000000000..4a6850327910a18ae5d8a5e71aa12a1a51c5b6db --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libswscale-5efb2ca5.so.8.3.100 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bce7ad4ae62bd14fedf353b5616dc4004c2fe4668d092a32f72414cd91bda79d +size 644497 diff --git a/lib/python3.10/site-packages/av.libs/libtwolame-dfe0c2c6.so.0.0.0 b/lib/python3.10/site-packages/av.libs/libtwolame-dfe0c2c6.so.0.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..9e7f5fca2ceb9c4633808688ae96092397afd59c --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libtwolame-dfe0c2c6.so.0.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0761cd56b53955470846effd1556db5fcbc3b22a75d4f6fda9ee58f4ae546b84 +size 143489 diff --git a/lib/python3.10/site-packages/av.libs/libunistring-7eaffe9f.so.5.2.0 b/lib/python3.10/site-packages/av.libs/libunistring-7eaffe9f.so.5.2.0 new file mode 100644 index 0000000000000000000000000000000000000000..18a9b9e3693809d622ac62494d7cd86c31b3213f --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libunistring-7eaffe9f.so.5.2.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b714a28c5d18922134f26cb0beba1d3ed01a9950c622aa6805bf8585d4e560d +size 2028817 diff --git a/lib/python3.10/site-packages/av.libs/libvorbis-7463f6bd.so.0.4.9 b/lib/python3.10/site-packages/av.libs/libvorbis-7463f6bd.so.0.4.9 new file mode 100644 index 0000000000000000000000000000000000000000..2bb0472c647c6daa6aa99b780e9daabeb1100099 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libvorbis-7463f6bd.so.0.4.9 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d7eed32f033449a9b9191c54e2a7d78713077fe7820ba9ad4a6b22568472ae2 +size 260897 diff --git a/lib/python3.10/site-packages/av.libs/libvorbisenc-131c2ed7.so.2.0.12 b/lib/python3.10/site-packages/av.libs/libvorbisenc-131c2ed7.so.2.0.12 new file mode 100644 index 0000000000000000000000000000000000000000..d38c4fc81f07e574ab2a0ccd6d448ea5ee28c998 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libvorbisenc-131c2ed7.so.2.0.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f6ebdcac9eb8f5ef6ecf2563b5c9eef2ca694166d9eea42a5fd384ca1e95027 +size 709041 diff --git a/lib/python3.10/site-packages/av.libs/libvpx-09740bc5.so.11.0.0 b/lib/python3.10/site-packages/av.libs/libvpx-09740bc5.so.11.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..80ca97535d00259a0aee38655140128543fdb3d2 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libvpx-09740bc5.so.11.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18bb3062cb6544204185720dcd234319f836389fe9f01c9ce5e288d38cf0f6dc +size 2344985 diff --git a/lib/python3.10/site-packages/av.libs/libwebp-bc89f640.so.7.1.10 b/lib/python3.10/site-packages/av.libs/libwebp-bc89f640.so.7.1.10 new file mode 100644 index 0000000000000000000000000000000000000000..1d6e507beec43dc15bbed4536f909ffee3851bd4 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libwebp-bc89f640.so.7.1.10 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89dafd3e0e23265489e8de3d47c2f4fe4013de498fbed652e365652d02fdb9b9 +size 735321 diff --git a/lib/python3.10/site-packages/av.libs/libx264-b1bb65f5.so.165 b/lib/python3.10/site-packages/av.libs/libx264-b1bb65f5.so.165 new file mode 100644 index 0000000000000000000000000000000000000000..79a351280064fdedecd0af8ec43d91022ddd016c --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libx264-b1bb65f5.so.165 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad37c702ddcb95fc43e86a74a2574cb669b85771b88774efaad9ca26ee77689f +size 2288385 diff --git a/lib/python3.10/site-packages/av.libs/libx265-169666e3.so.215 b/lib/python3.10/site-packages/av.libs/libx265-169666e3.so.215 new file mode 100644 index 0000000000000000000000000000000000000000..f5fe5157145c4d399ee29a4910a8963a8da56403 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libx265-169666e3.so.215 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9449b1434b67994d17a77cbbe59a13d81cf8f1b0b4ca02533559c876ff4ee42 +size 20636217 diff --git a/lib/python3.10/site-packages/av.libs/libxcb-5ddf6756.so.1.1.0 b/lib/python3.10/site-packages/av.libs/libxcb-5ddf6756.so.1.1.0 new file mode 100644 index 0000000000000000000000000000000000000000..4b31a4195b8b3c9c982f5503bea674e622d5c661 --- /dev/null +++ b/lib/python3.10/site-packages/av.libs/libxcb-5ddf6756.so.1.1.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f34b406b3bcf9ef40df06984600c124733144ce4b7bdaa15622e58f9ef27dd8 +size 280065 diff --git a/lib/python3.10/site-packages/av/_core.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/_core.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..04affe4b560b41d346e21053ed15ff95cbb5fedc --- /dev/null +++ b/lib/python3.10/site-packages/av/_core.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fd63cf172feb75a43ade4bae99b88a4afdeb22b95d6f220b8b943fdecdf6753 +size 175249 diff --git a/lib/python3.10/site-packages/av/audio/codeccontext.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/codeccontext.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3aee00a37922cf90693f47893e03ef111d41e301 --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/codeccontext.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ef250d0163fd9066d3349910f2329cd5272add080add39051f357bb49dc44a8 +size 355185 diff --git a/lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..472b632984eba794d07d09ee75ffb73821b8ff74 --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d2cf5eb7020963ff4080cef4fc196920c7ec9204a4c78c3249f1f3197c1e545 +size 601385 diff --git a/lib/python3.10/site-packages/av/audio/format.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/format.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2a8dea43b2c0b88aec1a5f25ae176ebb811669d4 --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/format.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d97225990edbb529025419bc054cf9685796fe5295d0ae6f0801b9edb405850 +size 298081 diff --git a/lib/python3.10/site-packages/av/audio/frame.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/frame.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..98d1ba18d51104c9a2f36c71f455a22fe6f460fd --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/frame.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1a70d2bfaaabcaf4daa5e4d63f2654133fc5c4283a65513e3828a220794f6b3 +size 818897 diff --git a/lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bb80203294d8e359dfefd1efe27c509939dceb54 --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3da82d79ecff04962874050344118a5560f95cb2a4e0bdce793ad04201e0f2a3 +size 446161 diff --git a/lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1f37666f6c0ff7bc27a468366da976d2b01060f4 --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd416342083ecd3095d820b19952134f74bd2199bf185bea090dd93da5ac4216 +size 281129 diff --git a/lib/python3.10/site-packages/av/audio/resampler.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/resampler.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..839616b05d410a28f9952e08033d5dd251eb55d9 --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/resampler.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3beb14828de0e70125a7e26ed9021f38903fbf1d0d120afee71ed387086da00f +size 630025 diff --git a/lib/python3.10/site-packages/av/audio/stream.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/stream.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dbbaacc8ecda3e067101e0539e4f05778cae99c7 --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/stream.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8839f9a5bfe0f1303db3d0ced47e1ace7c07bafbf804f41660cab30f026b2fb +size 375753 diff --git a/lib/python3.10/site-packages/av/bitstream.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/bitstream.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7e8e1e10934740f49027701b71d1a85c28056433 --- /dev/null +++ b/lib/python3.10/site-packages/av/bitstream.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b1d0002bc358a869e1e885a50eddc4c337f9cae9773c43fcc030ef74db53096 +size 351313 diff --git a/lib/python3.10/site-packages/av/buffer.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/buffer.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e3561fb519f9073ddd47a70ff52c6452f0230580 --- /dev/null +++ b/lib/python3.10/site-packages/av/buffer.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1da93ec4aba8d06c09deba6bc666e678fedd505b95055c7a8dd0a24f6c13e929 +size 363609 diff --git a/lib/python3.10/site-packages/av/bytesource.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/bytesource.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1a0e6166fd2aff2d6c67a3cc9ab40cf2122cf644 --- /dev/null +++ b/lib/python3.10/site-packages/av/bytesource.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10ba69f16165af9e73996bdf2829a0b8eaebc05103c8a4fd366591fa93574676 +size 223961 diff --git a/lib/python3.10/site-packages/av/codec/codec.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/codec/codec.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6b23dd4677b776f17ec84e5c7fd6ef5096811d75 --- /dev/null +++ b/lib/python3.10/site-packages/av/codec/codec.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc81fb70e91cc88ce694218d1e4bce03a98b642be856a331120b894ae9af6a95 +size 831553 diff --git a/lib/python3.10/site-packages/av/codec/context.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/codec/context.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..21a1bc58c232df68bff11e6b7aa9e4bb1730787d --- /dev/null +++ b/lib/python3.10/site-packages/av/codec/context.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43f6aa5a9fd36bc0a6ac3a1ef05dc76cfabc03e7291d03203011b9f66c2f43d7 +size 1266081 diff --git a/lib/python3.10/site-packages/av/codec/hwaccel.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/codec/hwaccel.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5da773817fde7bdb87e7525d890e320c3a42cd54 --- /dev/null +++ b/lib/python3.10/site-packages/av/codec/hwaccel.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d16ff94222af9377423cd06377763da956026e5c431174213632753ed4d6d720 +size 720545 diff --git a/lib/python3.10/site-packages/av/container/core.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/container/core.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dba744e78a47d8da41d9e86797f00bc439e88bd0 --- /dev/null +++ b/lib/python3.10/site-packages/av/container/core.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1bf8fb4bbc40915271daa84fc247fb63b0d652ef27a7991fc810dfb452ab445 +size 1134817 diff --git a/lib/python3.10/site-packages/av/container/input.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/container/input.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1168a3905b6ad0ba6b4b3d425910f0d430e17434 --- /dev/null +++ b/lib/python3.10/site-packages/av/container/input.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5eb45735da5a85a23dff19649c187a75914c23e1836b9e01b39530591a8f2bc +size 831425 diff --git a/lib/python3.10/site-packages/av/container/output.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/container/output.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6ee787ad7e3a202ee3d4b75a5da044977c3fe751 --- /dev/null +++ b/lib/python3.10/site-packages/av/container/output.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:487940f81b8252be4fc070d052be865606c9489dafbfcdab2696c912d473e261 +size 925713 diff --git a/lib/python3.10/site-packages/av/container/pyio.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/container/pyio.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..326ca0e2f8147e3b3b006a5684cee5702e710aa4 --- /dev/null +++ b/lib/python3.10/site-packages/av/container/pyio.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dca6c6c7bc3ca93545634c66943abed95925fcc8c0db63bc2dbcd57710ef3d84 +size 560241 diff --git a/lib/python3.10/site-packages/av/container/streams.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/container/streams.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cb6781934d5aed8b06b583461ebcf68ecd2cfdd9 --- /dev/null +++ b/lib/python3.10/site-packages/av/container/streams.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16870b9fa8fe0888656e5436be13fac0f9104cd720f8a45dcf1034a535ab3ff0 +size 798329 diff --git a/lib/python3.10/site-packages/av/descriptor.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/descriptor.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..42d2d001126da3113c853e3fc98b3585e1149bdc --- /dev/null +++ b/lib/python3.10/site-packages/av/descriptor.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfaebfc3da35d0cc16c227c7544fa090c69121eaeedeb8494ac385dc2f9453c9 +size 256977 diff --git a/lib/python3.10/site-packages/av/dictionary.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/dictionary.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..288b78c0e161f7bb199dea1b7cbd44e5aa2e7f0c --- /dev/null +++ b/lib/python3.10/site-packages/av/dictionary.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86b0d621c713312fb9f397db4eced1c8b25cdab16e93ad43dd930067da482325 +size 528153 diff --git a/lib/python3.10/site-packages/av/error.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/error.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ceea58a1ec66d1e91bb98a2c55c8a215b8d0ac46 --- /dev/null +++ b/lib/python3.10/site-packages/av/error.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa1cfa96757bd2de4a1914cc7680f382b550a8cc82ac386c5a41db2130f8173f +size 1454073 diff --git a/lib/python3.10/site-packages/av/filter/context.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/filter/context.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0458e58d315b65d4b585be852821eff9796bf9a4 --- /dev/null +++ b/lib/python3.10/site-packages/av/filter/context.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e447fe0b52094938c496f0fa2c284ee8728e45f31312f955be3345f9ea22f447 +size 638609 diff --git a/lib/python3.10/site-packages/av/filter/filter.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/filter/filter.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..799ae2cfd29bddf001e2976b84fdc4ff118a245d --- /dev/null +++ b/lib/python3.10/site-packages/av/filter/filter.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01d6dc702860fb62737ff459acea1132e9355d84b83208a30b5f12b295d64dd6 +size 322625 diff --git a/lib/python3.10/site-packages/av/filter/graph.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/filter/graph.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5cb73c2534ce8f9d6845a1c814f35f7848a12b7d --- /dev/null +++ b/lib/python3.10/site-packages/av/filter/graph.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0d598f94b5b7f7169ec67a47d100f2d8035d65c8b4feb282f8f9f28810841b2 +size 990881 diff --git a/lib/python3.10/site-packages/av/filter/link.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/filter/link.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2f8cb47f79cf953e26992b5c2e95c486e9fa9630 --- /dev/null +++ b/lib/python3.10/site-packages/av/filter/link.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f59b49ca8f72c005ae352a0f26c90c9ef137b59e7a89e76c07c4c7624d2dbc0c +size 314145 diff --git a/lib/python3.10/site-packages/av/filter/loudnorm.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/filter/loudnorm.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..68079af1c2eb7372525b1ae2035055e29314924f --- /dev/null +++ b/lib/python3.10/site-packages/av/filter/loudnorm.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67e50ec9e5bf411fd4f0d8e4b09866f3cc6557a50adb62f274eedbaaf2e5130a +size 327169 diff --git a/lib/python3.10/site-packages/av/filter/pad.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/filter/pad.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fcf1febeb1be55710ee76bb91e19f6ef4ff6faa8 --- /dev/null +++ b/lib/python3.10/site-packages/av/filter/pad.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a966f3a3f62f799043621a8caa88d7ba5c9a611b398a3108510fcdb68c3a952 +size 363585 diff --git a/lib/python3.10/site-packages/av/format.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/format.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4c792b1f9e3fb02927ca243be89704a2e905e3bf --- /dev/null +++ b/lib/python3.10/site-packages/av/format.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ae7653d50e2e4e693a6d3f455325be74084bb4f21c5812b9574efa797afb0e9 +size 396737 diff --git a/lib/python3.10/site-packages/av/frame.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/frame.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b28a3ec0bc82c02244f4d074160aae50f199a391 --- /dev/null +++ b/lib/python3.10/site-packages/av/frame.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4da643922dfb7e9d3c3d0bdd88c3ce4cb3904f06a735e63dbe0e36955c01d282 +size 396681 diff --git a/lib/python3.10/site-packages/av/logging.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/logging.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ce82c7034488619cbb12ca81df03e8ddc77b34eb --- /dev/null +++ b/lib/python3.10/site-packages/av/logging.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95014ceca9c872959ceee79baeede910194eb8d161b8b67bf74aaa9df68bd978 +size 925377 diff --git a/lib/python3.10/site-packages/av/opaque.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/opaque.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fbacc56de93002b82514023b8a38f8bf0539ca77 --- /dev/null +++ b/lib/python3.10/site-packages/av/opaque.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b92e900f7e6415b67bac68effc6324ca2f425000aa0754044b32e180afb8c517 +size 232265 diff --git a/lib/python3.10/site-packages/av/option.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/option.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..001d661d119c7fd9ce47168cd623b41394ae9aed --- /dev/null +++ b/lib/python3.10/site-packages/av/option.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:724c91c9d798c22f24d89599580958b8a78788625308fdced65f590133b0b4a0 +size 503017 diff --git a/lib/python3.10/site-packages/av/packet.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/packet.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ad6758fd56eecbbddc1bbd4faf717a5ae746cefa --- /dev/null +++ b/lib/python3.10/site-packages/av/packet.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8a7dfd416730f4b6ed0420c1b9806841ccdb9916c9d21b7126806605b556f80 +size 466217 diff --git a/lib/python3.10/site-packages/av/plane.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/plane.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fff6161fee0fa3ac3b7961a31cb55ee6d4f6fbc5 --- /dev/null +++ b/lib/python3.10/site-packages/av/plane.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11a49af3f344d620e8665686f526abe45d11614da3e4560dd827459f3e5acd0f +size 314305 diff --git a/lib/python3.10/site-packages/av/sidedata/motionvectors.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/sidedata/motionvectors.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..206f847408a26d3f045785464acced37b79a8201 --- /dev/null +++ b/lib/python3.10/site-packages/av/sidedata/motionvectors.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b8a2c565b618e6041bc8e696139f2e7bee6d2d3a197efe9aecca979be176412 +size 552281 diff --git a/lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..721d81c66bc998919f814e0cc643a0d4eb8ef1c8 --- /dev/null +++ b/lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19dcd4553ac203a29d8543143080bebf99249c7e4405997a346392ba90c4e276 +size 658953 diff --git a/lib/python3.10/site-packages/av/stream.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/stream.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ca563cc229228b940751ce9df6ad81913e11958a --- /dev/null +++ b/lib/python3.10/site-packages/av/stream.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77e6980153b6921c6c97731d89bcf1f423b6954315ad855fdf5def2d695abc62 +size 617937 diff --git a/lib/python3.10/site-packages/av/subtitles/codeccontext.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/subtitles/codeccontext.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ae66a2f2d94d25697c0c123bd2354c9ce144f107 --- /dev/null +++ b/lib/python3.10/site-packages/av/subtitles/codeccontext.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f4c77660262043aa52ffb27bdd34ae2714439cb6c0accb2f6b12123b680b21a +size 314001 diff --git a/lib/python3.10/site-packages/av/subtitles/stream.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/subtitles/stream.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..baecba2882018ba6a63759dc277841227c475782 --- /dev/null +++ b/lib/python3.10/site-packages/av/subtitles/stream.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a55d341494c5caac565820088a4085941548f16f47370d76b65d8a9c98425cd +size 289409 diff --git a/lib/python3.10/site-packages/av/subtitles/subtitle.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/subtitles/subtitle.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3f91386ff8f75b3d821736d9ef455191d8446f7f --- /dev/null +++ b/lib/python3.10/site-packages/av/subtitles/subtitle.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31cc791ff6df414240a6365faef34660e43e09aaa07446819e4ce4ccb1d137f7 +size 937865 diff --git a/lib/python3.10/site-packages/av/utils.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e00df3dbfd37c57c75e70803f6df4bd4f5f1761d --- /dev/null +++ b/lib/python3.10/site-packages/av/utils.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7726b970c286e1bb575f009c7577e5796b5f90bff4e99bd56c9a80a45e6def03 +size 252737 diff --git a/lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..350c0ef7a010401a39d99d8c60c3c171a02eec23 --- /dev/null +++ b/lib/python3.10/site-packages/av/video/codeccontext.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1a15bfabf9d4b09e9236b89fdf65b6260e06eafabb9228ad09aea5cf8399322 +size 666897 diff --git a/lib/python3.10/site-packages/av/video/format.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/video/format.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d171bfdf4f5048b3bef4df9c989717e27b03aec3 --- /dev/null +++ b/lib/python3.10/site-packages/av/video/format.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04383e15c97801c2d59dd16572a2415139d052eceb64e4ee8589cd94d3b33917 +size 777945 diff --git a/lib/python3.10/site-packages/av/video/frame.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/video/frame.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..54e7bd03fb21bbf9bac7460b54c75481bf3db9a7 --- /dev/null +++ b/lib/python3.10/site-packages/av/video/frame.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41bb4c00bee69f77fa9442788d76196bad94de7e06a02cd879b74195f75c5beb +size 3371721 diff --git a/lib/python3.10/site-packages/av/video/plane.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/video/plane.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..828104345605bc910e122324d0a2dedf292b8749 --- /dev/null +++ b/lib/python3.10/site-packages/av/video/plane.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94314bd1e2acb2ee007319ac50ad2f1ef4d7366a524fc06917b24cdcded6992f +size 367513 diff --git a/lib/python3.10/site-packages/av/video/reformatter.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/video/reformatter.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7d97c396dda4df43a8b0ca097704048fa3ce8aac --- /dev/null +++ b/lib/python3.10/site-packages/av/video/reformatter.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eff7ae4bdd445411c77e1cfd2028c5b7a851c6c02e93f5e9de0c9127a1e6c173 +size 601545 diff --git a/lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4947a6848dce7686808245c0050b7e8eeef0f449 --- /dev/null +++ b/lib/python3.10/site-packages/av/video/stream.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf2ace0d3f2b0952a52e3dd48e3773267251a4740f75cd77b0cdce330d4521d9 +size 461937 diff --git a/lib/python3.10/site-packages/brotlicffi/_brotlicffi.abi3.so b/lib/python3.10/site-packages/brotlicffi/_brotlicffi.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..3d0b62df7cf717648e03fdba8c63d78a53a21e06 --- /dev/null +++ b/lib/python3.10/site-packages/brotlicffi/_brotlicffi.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2adefd7477008ded9c4460401570e5ca9fe9794f567d5becbcbc83fe771df555 +size 795224 diff --git a/lib/python3.10/site-packages/clip/bpe_simple_vocab_16e6.txt.gz b/lib/python3.10/site-packages/clip/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113 --- /dev/null +++ b/lib/python3.10/site-packages/clip/bpe_simple_vocab_16e6.txt.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a +size 1356917 diff --git a/lib/python3.10/site-packages/contourpy/_contourpy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/contourpy/_contourpy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0dfff539343c939cef072609c86e78d0f5dcf3b5 --- /dev/null +++ b/lib/python3.10/site-packages/contourpy/_contourpy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f22b7f307c1f94161cda346e6b9295057635e2064eeccb87e8602e4159bc57e1 +size 854312 diff --git a/lib/python3.10/site-packages/cv2/cv2.abi3.so b/lib/python3.10/site-packages/cv2/cv2.abi3.so new file mode 100644 index 0000000000000000000000000000000000000000..0543e6062a28d11c712341fe8d41f1b0fdf10ebd --- /dev/null +++ b/lib/python3.10/site-packages/cv2/cv2.abi3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:188d38ace4716a74534ddf4dd4f7d77a06641bd0908bf12eb218a3c2fef35f38 +size 66322321 diff --git a/lib/python3.10/site-packages/fontTools/misc/__pycache__/lazyTools.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/misc/__pycache__/lazyTools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..203251708bac2ea81fc5052d14982eae79f4b52a Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/misc/__pycache__/lazyTools.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/misc/__pycache__/loggingTools.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/misc/__pycache__/loggingTools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4499dfafae3958e5560c097a924b440d3966393f Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/misc/__pycache__/loggingTools.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/misc/__pycache__/macCreatorType.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/misc/__pycache__/macCreatorType.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af437a0b2c166c233da91b27b2be37f7cff90d24 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/misc/__pycache__/macCreatorType.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/misc/__pycache__/macRes.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/misc/__pycache__/macRes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ed7cae8d90d8986ec6277118ab195c89d6746b4 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/misc/__pycache__/macRes.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/misc/__pycache__/psCharStrings.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/misc/__pycache__/psCharStrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74d5800c7cbba02f0a6bc9f4c6e3b2f2c44a2d4f Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/misc/__pycache__/psCharStrings.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/misc/__pycache__/psLib.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/misc/__pycache__/psLib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7022b97ad3c0a3b8f2794a375a63b8a947b257 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/misc/__pycache__/psLib.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/misc/__pycache__/psOperators.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/misc/__pycache__/psOperators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2146b2225fdaa73b418f82713ba44e4b490c73d0 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/misc/__pycache__/psOperators.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/misc/plistlib/__init__.py b/lib/python3.10/site-packages/fontTools/misc/plistlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..066eef38fc720265366afee9a8cd415fc560459e --- /dev/null +++ b/lib/python3.10/site-packages/fontTools/misc/plistlib/__init__.py @@ -0,0 +1,681 @@ +import collections.abc +import re +from typing import ( + Any, + Callable, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Type, + Union, + IO, +) +import warnings +from io import BytesIO +from datetime import datetime +from base64 import b64encode, b64decode +from numbers import Integral +from types import SimpleNamespace +from functools import singledispatch + +from fontTools.misc import etree + +from fontTools.misc.textTools import tostr + + +# By default, we +# - deserialize elements as bytes and +# - serialize bytes as elements. +# Before, on Python 2, we +# - deserialized elements as plistlib.Data objects, in order to +# distinguish them from the built-in str type (which is bytes on python2) +# - serialized bytes as elements (they must have only contained +# ASCII characters in this case) +# You can pass use_builtin_types=[True|False] to the load/dump etc. functions +# to enforce a specific treatment. +# NOTE that unicode type always maps to element, and plistlib.Data +# always maps to element, regardless of use_builtin_types. +USE_BUILTIN_TYPES = True + +XML_DECLARATION = b"""""" + +PLIST_DOCTYPE = ( + b'' +) + + +# Date should conform to a subset of ISO 8601: +# YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z' +_date_parser = re.compile( + r"(?P\d\d\d\d)" + r"(?:-(?P\d\d)" + r"(?:-(?P\d\d)" + r"(?:T(?P\d\d)" + r"(?::(?P\d\d)" + r"(?::(?P\d\d))" + r"?)?)?)?)?Z", + re.ASCII, +) + + +def _date_from_string(s: str) -> datetime: + order = ("year", "month", "day", "hour", "minute", "second") + m = _date_parser.match(s) + if m is None: + raise ValueError(f"Expected ISO 8601 date string, but got '{s:r}'.") + gd = m.groupdict() + lst = [] + for key in order: + val = gd[key] + if val is None: + break + lst.append(int(val)) + # NOTE: mypy doesn't know that lst is 6 elements long. + return datetime(*lst) # type:ignore + + +def _date_to_string(d: datetime) -> str: + return "%04d-%02d-%02dT%02d:%02d:%02dZ" % ( + d.year, + d.month, + d.day, + d.hour, + d.minute, + d.second, + ) + + +class Data: + """Represents binary data when ``use_builtin_types=False.`` + + This class wraps binary data loaded from a plist file when the + ``use_builtin_types`` argument to the loading function (:py:func:`fromtree`, + :py:func:`load`, :py:func:`loads`) is false. + + The actual binary data is retrieved using the ``data`` attribute. + """ + + def __init__(self, data: bytes) -> None: + if not isinstance(data, bytes): + raise TypeError("Expected bytes, found %s" % type(data).__name__) + self.data = data + + @classmethod + def fromBase64(cls, data: Union[bytes, str]) -> "Data": + return cls(b64decode(data)) + + def asBase64(self, maxlinelength: int = 76, indent_level: int = 1) -> bytes: + return _encode_base64( + self.data, maxlinelength=maxlinelength, indent_level=indent_level + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self.data == other.data + elif isinstance(other, bytes): + return self.data == other + else: + return NotImplemented + + def __repr__(self) -> str: + return "%s(%s)" % (self.__class__.__name__, repr(self.data)) + + +def _encode_base64( + data: bytes, maxlinelength: Optional[int] = 76, indent_level: int = 1 +) -> bytes: + data = b64encode(data) + if data and maxlinelength: + # split into multiple lines right-justified to 'maxlinelength' chars + indent = b"\n" + b" " * indent_level + max_length = max(16, maxlinelength - len(indent)) + chunks = [] + for i in range(0, len(data), max_length): + chunks.append(indent) + chunks.append(data[i : i + max_length]) + chunks.append(indent) + data = b"".join(chunks) + return data + + +# Mypy does not support recursive type aliases as of 0.782, Pylance does. +# https://github.com/python/mypy/issues/731 +# https://devblogs.microsoft.com/python/pylance-introduces-five-new-features-that-enable-type-magic-for-python-developers/#1-support-for-recursive-type-aliases +PlistEncodable = Union[ + bool, + bytes, + Data, + datetime, + float, + Integral, + Mapping[str, Any], + Sequence[Any], + str, +] + + +class PlistTarget: + """Event handler using the ElementTree Target API that can be + passed to a XMLParser to produce property list objects from XML. + It is based on the CPython plistlib module's _PlistParser class, + but does not use the expat parser. + + >>> from fontTools.misc import etree + >>> parser = etree.XMLParser(target=PlistTarget()) + >>> result = etree.XML( + ... "" + ... " something" + ... " blah" + ... "", + ... parser=parser) + >>> result == {"something": "blah"} + True + + Links: + https://github.com/python/cpython/blob/main/Lib/plistlib.py + http://lxml.de/parsing.html#the-target-parser-interface + """ + + def __init__( + self, + use_builtin_types: Optional[bool] = None, + dict_type: Type[MutableMapping[str, Any]] = dict, + ) -> None: + self.stack: List[PlistEncodable] = [] + self.current_key: Optional[str] = None + self.root: Optional[PlistEncodable] = None + if use_builtin_types is None: + self._use_builtin_types = USE_BUILTIN_TYPES + else: + if use_builtin_types is False: + warnings.warn( + "Setting use_builtin_types to False is deprecated and will be " + "removed soon.", + DeprecationWarning, + ) + self._use_builtin_types = use_builtin_types + self._dict_type = dict_type + + def start(self, tag: str, attrib: Mapping[str, str]) -> None: + self._data: List[str] = [] + handler = _TARGET_START_HANDLERS.get(tag) + if handler is not None: + handler(self) + + def end(self, tag: str) -> None: + handler = _TARGET_END_HANDLERS.get(tag) + if handler is not None: + handler(self) + + def data(self, data: str) -> None: + self._data.append(data) + + def close(self) -> PlistEncodable: + if self.root is None: + raise ValueError("No root set.") + return self.root + + # helpers + + def add_object(self, value: PlistEncodable) -> None: + if self.current_key is not None: + stack_top = self.stack[-1] + if not isinstance(stack_top, collections.abc.MutableMapping): + raise ValueError("unexpected element: %r" % stack_top) + stack_top[self.current_key] = value + self.current_key = None + elif not self.stack: + # this is the root object + self.root = value + else: + stack_top = self.stack[-1] + if not isinstance(stack_top, list): + raise ValueError("unexpected element: %r" % stack_top) + stack_top.append(value) + + def get_data(self) -> str: + data = "".join(self._data) + self._data = [] + return data + + +# event handlers + + +def start_dict(self: PlistTarget) -> None: + d = self._dict_type() + self.add_object(d) + self.stack.append(d) + + +def end_dict(self: PlistTarget) -> None: + if self.current_key: + raise ValueError("missing value for key '%s'" % self.current_key) + self.stack.pop() + + +def end_key(self: PlistTarget) -> None: + if self.current_key or not isinstance(self.stack[-1], collections.abc.Mapping): + raise ValueError("unexpected key") + self.current_key = self.get_data() + + +def start_array(self: PlistTarget) -> None: + a: List[PlistEncodable] = [] + self.add_object(a) + self.stack.append(a) + + +def end_array(self: PlistTarget) -> None: + self.stack.pop() + + +def end_true(self: PlistTarget) -> None: + self.add_object(True) + + +def end_false(self: PlistTarget) -> None: + self.add_object(False) + + +def end_integer(self: PlistTarget) -> None: + self.add_object(int(self.get_data())) + + +def end_real(self: PlistTarget) -> None: + self.add_object(float(self.get_data())) + + +def end_string(self: PlistTarget) -> None: + self.add_object(self.get_data()) + + +def end_data(self: PlistTarget) -> None: + if self._use_builtin_types: + self.add_object(b64decode(self.get_data())) + else: + self.add_object(Data.fromBase64(self.get_data())) + + +def end_date(self: PlistTarget) -> None: + self.add_object(_date_from_string(self.get_data())) + + +_TARGET_START_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = { + "dict": start_dict, + "array": start_array, +} + +_TARGET_END_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = { + "dict": end_dict, + "array": end_array, + "key": end_key, + "true": end_true, + "false": end_false, + "integer": end_integer, + "real": end_real, + "string": end_string, + "data": end_data, + "date": end_date, +} + + +# functions to build element tree from plist data + + +def _string_element(value: str, ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("string") + el.text = value + return el + + +def _bool_element(value: bool, ctx: SimpleNamespace) -> etree.Element: + if value: + return etree.Element("true") + return etree.Element("false") + + +def _integer_element(value: int, ctx: SimpleNamespace) -> etree.Element: + if -1 << 63 <= value < 1 << 64: + el = etree.Element("integer") + el.text = "%d" % value + return el + raise OverflowError(value) + + +def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("real") + el.text = repr(value) + return el + + +def _dict_element( + d: Mapping[str, PlistEncodable], ctx: SimpleNamespace +) -> etree.Element: + el = etree.Element("dict") + items = d.items() + if ctx.sort_keys: + items = sorted(items) # type: ignore + ctx.indent_level += 1 + for key, value in items: + if not isinstance(key, str): + if ctx.skipkeys: + continue + raise TypeError("keys must be strings") + k = etree.SubElement(el, "key") + k.text = tostr(key, "utf-8") + el.append(_make_element(value, ctx)) + ctx.indent_level -= 1 + return el + + +def _array_element( + array: Sequence[PlistEncodable], ctx: SimpleNamespace +) -> etree.Element: + el = etree.Element("array") + if len(array) == 0: + return el + ctx.indent_level += 1 + for value in array: + el.append(_make_element(value, ctx)) + ctx.indent_level -= 1 + return el + + +def _date_element(date: datetime, ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("date") + el.text = _date_to_string(date) + return el + + +def _data_element(data: bytes, ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("data") + # NOTE: mypy is confused about whether el.text should be str or bytes. + el.text = _encode_base64( # type: ignore + data, + maxlinelength=(76 if ctx.pretty_print else None), + indent_level=ctx.indent_level, + ) + return el + + +def _string_or_data_element(raw_bytes: bytes, ctx: SimpleNamespace) -> etree.Element: + if ctx.use_builtin_types: + return _data_element(raw_bytes, ctx) + else: + try: + string = raw_bytes.decode(encoding="ascii", errors="strict") + except UnicodeDecodeError: + raise ValueError( + "invalid non-ASCII bytes; use unicode string instead: %r" % raw_bytes + ) + return _string_element(string, ctx) + + +# The following is probably not entirely correct. The signature should take `Any` +# and return `NoReturn`. At the time of this writing, neither mypy nor Pyright +# can deal with singledispatch properly and will apply the signature of the base +# function to all others. Being slightly dishonest makes it type-check and return +# usable typing information for the optimistic case. +@singledispatch +def _make_element(value: PlistEncodable, ctx: SimpleNamespace) -> etree.Element: + raise TypeError("unsupported type: %s" % type(value)) + + +_make_element.register(str)(_string_element) +_make_element.register(bool)(_bool_element) +_make_element.register(Integral)(_integer_element) +_make_element.register(float)(_real_element) +_make_element.register(collections.abc.Mapping)(_dict_element) +_make_element.register(list)(_array_element) +_make_element.register(tuple)(_array_element) +_make_element.register(datetime)(_date_element) +_make_element.register(bytes)(_string_or_data_element) +_make_element.register(bytearray)(_data_element) +_make_element.register(Data)(lambda v, ctx: _data_element(v.data, ctx)) + + +# Public functions to create element tree from plist-compatible python +# data structures and viceversa, for use when (de)serializing GLIF xml. + + +def totree( + value: PlistEncodable, + sort_keys: bool = True, + skipkeys: bool = False, + use_builtin_types: Optional[bool] = None, + pretty_print: bool = True, + indent_level: int = 1, +) -> etree.Element: + """Convert a value derived from a plist into an XML tree. + + Args: + value: Any kind of value to be serialized to XML. + sort_keys: Whether keys of dictionaries should be sorted. + skipkeys (bool): Whether to silently skip non-string dictionary + keys. + use_builtin_types (bool): If true, byte strings will be + encoded in Base-64 and wrapped in a ``data`` tag; if + false, they will be either stored as ASCII strings or an + exception raised if they cannot be decoded as such. Defaults + to ``True`` if not present. Deprecated. + pretty_print (bool): Whether to indent the output. + indent_level (int): Level of indentation when serializing. + + Returns: an ``etree`` ``Element`` object. + + Raises: + ``TypeError`` + if non-string dictionary keys are serialized + and ``skipkeys`` is false. + ``ValueError`` + if non-ASCII binary data is present + and `use_builtin_types` is false. + """ + if use_builtin_types is None: + use_builtin_types = USE_BUILTIN_TYPES + else: + use_builtin_types = use_builtin_types + context = SimpleNamespace( + sort_keys=sort_keys, + skipkeys=skipkeys, + use_builtin_types=use_builtin_types, + pretty_print=pretty_print, + indent_level=indent_level, + ) + return _make_element(value, context) + + +def fromtree( + tree: etree.Element, + use_builtin_types: Optional[bool] = None, + dict_type: Type[MutableMapping[str, Any]] = dict, +) -> Any: + """Convert an XML tree to a plist structure. + + Args: + tree: An ``etree`` ``Element``. + use_builtin_types: If True, binary data is deserialized to + bytes strings. If False, it is wrapped in :py:class:`Data` + objects. Defaults to True if not provided. Deprecated. + dict_type: What type to use for dictionaries. + + Returns: An object (usually a dictionary). + """ + target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type) + for action, element in etree.iterwalk(tree, events=("start", "end")): + if action == "start": + target.start(element.tag, element.attrib) + elif action == "end": + # if there are no children, parse the leaf's data + if not len(element): + # always pass str, not None + target.data(element.text or "") + target.end(element.tag) + return target.close() + + +# python3 plistlib API + + +def load( + fp: IO[bytes], + use_builtin_types: Optional[bool] = None, + dict_type: Type[MutableMapping[str, Any]] = dict, +) -> Any: + """Load a plist file into an object. + + Args: + fp: An opened file. + use_builtin_types: If True, binary data is deserialized to + bytes strings. If False, it is wrapped in :py:class:`Data` + objects. Defaults to True if not provided. Deprecated. + dict_type: What type to use for dictionaries. + + Returns: + An object (usually a dictionary) representing the top level of + the plist file. + """ + + if not hasattr(fp, "read"): + raise AttributeError("'%s' object has no attribute 'read'" % type(fp).__name__) + target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type) + parser = etree.XMLParser(target=target) + result = etree.parse(fp, parser=parser) + # lxml returns the target object directly, while ElementTree wraps + # it as the root of an ElementTree object + try: + return result.getroot() + except AttributeError: + return result + + +def loads( + value: bytes, + use_builtin_types: Optional[bool] = None, + dict_type: Type[MutableMapping[str, Any]] = dict, +) -> Any: + """Load a plist file from a string into an object. + + Args: + value: A bytes string containing a plist. + use_builtin_types: If True, binary data is deserialized to + bytes strings. If False, it is wrapped in :py:class:`Data` + objects. Defaults to True if not provided. Deprecated. + dict_type: What type to use for dictionaries. + + Returns: + An object (usually a dictionary) representing the top level of + the plist file. + """ + + fp = BytesIO(value) + return load(fp, use_builtin_types=use_builtin_types, dict_type=dict_type) + + +def dump( + value: PlistEncodable, + fp: IO[bytes], + sort_keys: bool = True, + skipkeys: bool = False, + use_builtin_types: Optional[bool] = None, + pretty_print: bool = True, +) -> None: + """Write a Python object to a plist file. + + Args: + value: An object to write. + fp: A file opened for writing. + sort_keys (bool): Whether keys of dictionaries should be sorted. + skipkeys (bool): Whether to silently skip non-string dictionary + keys. + use_builtin_types (bool): If true, byte strings will be + encoded in Base-64 and wrapped in a ``data`` tag; if + false, they will be either stored as ASCII strings or an + exception raised if they cannot be represented. Defaults + pretty_print (bool): Whether to indent the output. + indent_level (int): Level of indentation when serializing. + + Raises: + ``TypeError`` + if non-string dictionary keys are serialized + and ``skipkeys`` is false. + ``ValueError`` + if non-representable binary data is present + and `use_builtin_types` is false. + """ + + if not hasattr(fp, "write"): + raise AttributeError("'%s' object has no attribute 'write'" % type(fp).__name__) + root = etree.Element("plist", version="1.0") + el = totree( + value, + sort_keys=sort_keys, + skipkeys=skipkeys, + use_builtin_types=use_builtin_types, + pretty_print=pretty_print, + ) + root.append(el) + tree = etree.ElementTree(root) + # we write the doctype ourselves instead of using the 'doctype' argument + # of 'write' method, becuse lxml will force adding a '\n' even when + # pretty_print is False. + if pretty_print: + header = b"\n".join((XML_DECLARATION, PLIST_DOCTYPE, b"")) + else: + header = XML_DECLARATION + PLIST_DOCTYPE + fp.write(header) + tree.write( # type: ignore + fp, + encoding="utf-8", + pretty_print=pretty_print, + xml_declaration=False, + ) + + +def dumps( + value: PlistEncodable, + sort_keys: bool = True, + skipkeys: bool = False, + use_builtin_types: Optional[bool] = None, + pretty_print: bool = True, +) -> bytes: + """Write a Python object to a string in plist format. + + Args: + value: An object to write. + sort_keys (bool): Whether keys of dictionaries should be sorted. + skipkeys (bool): Whether to silently skip non-string dictionary + keys. + use_builtin_types (bool): If true, byte strings will be + encoded in Base-64 and wrapped in a ``data`` tag; if + false, they will be either stored as strings or an + exception raised if they cannot be represented. Defaults + pretty_print (bool): Whether to indent the output. + indent_level (int): Level of indentation when serializing. + + Returns: + string: A plist representation of the Python object. + + Raises: + ``TypeError`` + if non-string dictionary keys are serialized + and ``skipkeys`` is false. + ``ValueError`` + if non-representable binary data is present + and `use_builtin_types` is false. + """ + fp = BytesIO() + dump( + value, + fp, + sort_keys=sort_keys, + skipkeys=skipkeys, + use_builtin_types=use_builtin_types, + pretty_print=pretty_print, + ) + return fp.getvalue() diff --git a/lib/python3.10/site-packages/fontTools/misc/plistlib/py.typed b/lib/python3.10/site-packages/fontTools/misc/plistlib/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/fontTools/mtiLib/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/mtiLib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0abe475364151e2afd42767c59f48931648ead46 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/mtiLib/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/mtiLib/__pycache__/__main__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/mtiLib/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4471d048cce01dbf46e7e3d82c0e4bf4a5489328 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/mtiLib/__pycache__/__main__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02ef311aed7d41a617cd11231eeeab5a424bd710 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/error.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..888410093402fb03f0c6fcae06c9300bed16433f Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/error.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/maxContextCalc.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/maxContextCalc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71ad55e9e356dd3ef5af1c6fce41fa6232dd1fc8 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/otlLib/__pycache__/maxContextCalc.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/otlLib/optimize/__init__.py b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..25bce9cd2cdaa51338c83b7ecb9059b592b5574f --- /dev/null +++ b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__init__.py @@ -0,0 +1,53 @@ +from argparse import RawTextHelpFormatter +from fontTools.otlLib.optimize.gpos import COMPRESSION_LEVEL, compact +from fontTools.ttLib import TTFont + + +def main(args=None): + """Optimize the layout tables of an existing font""" + from argparse import ArgumentParser + + from fontTools import configLogger + + parser = ArgumentParser( + prog="otlLib.optimize", + description=main.__doc__, + formatter_class=RawTextHelpFormatter, + ) + parser.add_argument("font") + parser.add_argument( + "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file" + ) + parser.add_argument( + "--gpos-compression-level", + help=COMPRESSION_LEVEL.help, + default=COMPRESSION_LEVEL.default, + choices=list(range(10)), + type=int, + ) + logging_group = parser.add_mutually_exclusive_group(required=False) + logging_group.add_argument( + "-v", "--verbose", action="store_true", help="Run more verbosely." + ) + logging_group.add_argument( + "-q", "--quiet", action="store_true", help="Turn verbosity off." + ) + options = parser.parse_args(args) + + configLogger( + level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") + ) + + font = TTFont(options.font) + compact(font, options.gpos_compression_level) + font.save(options.outfile or options.font) + + +if __name__ == "__main__": + import sys + + if len(sys.argv) > 1: + sys.exit(main()) + import doctest + + sys.exit(doctest.testmod().failed) diff --git a/lib/python3.10/site-packages/fontTools/otlLib/optimize/__main__.py b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0ae9081ca8dac338bcf085c71adad87805e3bad --- /dev/null +++ b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__main__.py @@ -0,0 +1,6 @@ +import sys +from fontTools.otlLib.optimize import main + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..942d6a68b6b9abae21c04282a6b0f563b86b320a Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/__main__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c7a4766cf4cada4a474bd8e5b382d292aa85926 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/__main__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/gpos.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/gpos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bdc5ddc55b2101a730f6defd4fc0ee18c2f8ab2 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/otlLib/optimize/__pycache__/gpos.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/otlLib/optimize/gpos.py b/lib/python3.10/site-packages/fontTools/otlLib/optimize/gpos.py new file mode 100644 index 0000000000000000000000000000000000000000..3edbfeb306ca38adb839e997cc3b6f1ede914572 --- /dev/null +++ b/lib/python3.10/site-packages/fontTools/otlLib/optimize/gpos.py @@ -0,0 +1,439 @@ +import logging +import os +from collections import defaultdict, namedtuple +from dataclasses import dataclass +from functools import cached_property, reduce +from itertools import chain +from math import log2 +from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple + +from fontTools.config import OPTIONS +from fontTools.misc.intTools import bit_count, bit_indices +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables import otBase, otTables + +log = logging.getLogger(__name__) + +COMPRESSION_LEVEL = OPTIONS[f"{__name__}:COMPRESSION_LEVEL"] + +# Kept because ufo2ft depends on it, to be removed once ufo2ft uses the config instead +# https://github.com/fonttools/fonttools/issues/2592 +GPOS_COMPACT_MODE_ENV_KEY = "FONTTOOLS_GPOS_COMPACT_MODE" +GPOS_COMPACT_MODE_DEFAULT = str(COMPRESSION_LEVEL.default) + + +def _compression_level_from_env() -> int: + env_level = GPOS_COMPACT_MODE_DEFAULT + if GPOS_COMPACT_MODE_ENV_KEY in os.environ: + import warnings + + warnings.warn( + f"'{GPOS_COMPACT_MODE_ENV_KEY}' environment variable is deprecated. " + "Please set the 'fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL' option " + "in TTFont.cfg.", + DeprecationWarning, + ) + + env_level = os.environ[GPOS_COMPACT_MODE_ENV_KEY] + if len(env_level) == 1 and env_level in "0123456789": + return int(env_level) + raise ValueError(f"Bad {GPOS_COMPACT_MODE_ENV_KEY}={env_level}") + + +def compact(font: TTFont, level: int) -> TTFont: + # Ideal plan: + # 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable + # https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable + # 2. Extract glyph-glyph kerning and class-kerning from all present subtables + # 3. Regroup into different subtable arrangements + # 4. Put back into the lookup + # + # Actual implementation: + # 2. Only class kerning is optimized currently + # 3. If the input kerning is already in several subtables, the subtables + # are not grouped together first; instead each subtable is treated + # independently, so currently this step is: + # Split existing subtables into more smaller subtables + gpos = font.get("GPOS") + + # If the font does not contain a GPOS table, there is nothing to do. + if gpos is None: + return font + + for lookup in gpos.table.LookupList.Lookup: + if lookup.LookupType == 2: + compact_lookup(font, level, lookup) + elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2: + compact_ext_lookup(font, level, lookup) + + return font + + +def compact_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None: + new_subtables = compact_pair_pos(font, level, lookup.SubTable) + lookup.SubTable = new_subtables + lookup.SubTableCount = len(new_subtables) + + +def compact_ext_lookup(font: TTFont, level: int, lookup: otTables.Lookup) -> None: + new_subtables = compact_pair_pos( + font, level, [ext_subtable.ExtSubTable for ext_subtable in lookup.SubTable] + ) + new_ext_subtables = [] + for subtable in new_subtables: + ext_subtable = otTables.ExtensionPos() + ext_subtable.Format = 1 + ext_subtable.ExtSubTable = subtable + new_ext_subtables.append(ext_subtable) + lookup.SubTable = new_ext_subtables + lookup.SubTableCount = len(new_ext_subtables) + + +def compact_pair_pos( + font: TTFont, level: int, subtables: Sequence[otTables.PairPos] +) -> Sequence[otTables.PairPos]: + new_subtables = [] + for subtable in subtables: + if subtable.Format == 1: + # Not doing anything to Format 1 (yet?) + new_subtables.append(subtable) + elif subtable.Format == 2: + new_subtables.extend(compact_class_pairs(font, level, subtable)) + return new_subtables + + +def compact_class_pairs( + font: TTFont, level: int, subtable: otTables.PairPos +) -> List[otTables.PairPos]: + from fontTools.otlLib.builder import buildPairPosClassesSubtable + + subtables = [] + classes1: DefaultDict[int, List[str]] = defaultdict(list) + for g in subtable.Coverage.glyphs: + classes1[subtable.ClassDef1.classDefs.get(g, 0)].append(g) + classes2: DefaultDict[int, List[str]] = defaultdict(list) + for g, i in subtable.ClassDef2.classDefs.items(): + classes2[i].append(g) + all_pairs = {} + for i, class1 in enumerate(subtable.Class1Record): + for j, class2 in enumerate(class1.Class2Record): + if is_really_zero(class2): + continue + all_pairs[(tuple(sorted(classes1[i])), tuple(sorted(classes2[j])))] = ( + getattr(class2, "Value1", None), + getattr(class2, "Value2", None), + ) + grouped_pairs = cluster_pairs_by_class2_coverage_custom_cost(font, all_pairs, level) + for pairs in grouped_pairs: + subtables.append(buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap())) + return subtables + + +def is_really_zero(class2: otTables.Class2Record) -> bool: + v1 = getattr(class2, "Value1", None) + v2 = getattr(class2, "Value2", None) + return (v1 is None or v1.getEffectiveFormat() == 0) and ( + v2 is None or v2.getEffectiveFormat() == 0 + ) + + +Pairs = Dict[ + Tuple[Tuple[str, ...], Tuple[str, ...]], + Tuple[otBase.ValueRecord, otBase.ValueRecord], +] + + +# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958 +def _getClassRanges(glyphIDs: Iterable[int]): + glyphIDs = sorted(glyphIDs) + last = glyphIDs[0] + ranges = [[last]] + for glyphID in glyphIDs[1:]: + if glyphID != last + 1: + ranges[-1].append(last) + ranges.append([glyphID]) + last = glyphID + ranges[-1].append(last) + return ranges, glyphIDs[0], glyphIDs[-1] + + +# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L960-L989 +def _classDef_bytes( + class_data: List[Tuple[List[Tuple[int, int]], int, int]], + class_ids: List[int], + coverage=False, +): + if not class_ids: + return 0 + first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]] + range_count = len(first_ranges) + for i in class_ids[1:]: + data = class_data[i] + range_count += len(data[0]) + min_glyph_id = min(min_glyph_id, data[1]) + max_glyph_id = max(max_glyph_id, data[2]) + glyphCount = max_glyph_id - min_glyph_id + 1 + # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1 + format1_bytes = 6 + glyphCount * 2 + # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2 + format2_bytes = 4 + range_count * 6 + return min(format1_bytes, format2_bytes) + + +ClusteringContext = namedtuple( + "ClusteringContext", + [ + "lines", + "all_class1", + "all_class1_data", + "all_class2_data", + "valueFormat1_bytes", + "valueFormat2_bytes", + ], +) + + +@dataclass +class Cluster: + ctx: ClusteringContext + indices_bitmask: int + + @cached_property + def indices(self): + return bit_indices(self.indices_bitmask) + + @cached_property + def column_indices(self): + # Indices of columns that have a 1 in at least 1 line + # => binary OR all the lines + bitmask = reduce(int.__or__, (self.ctx.lines[i] for i in self.indices)) + return bit_indices(bitmask) + + @property + def width(self): + # Add 1 because Class2=0 cannot be used but needs to be encoded. + return len(self.column_indices) + 1 + + @cached_property + def cost(self): + return ( + # 2 bytes to store the offset to this subtable in the Lookup table above + 2 + # Contents of the subtable + # From: https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#pair-adjustment-positioning-format-2-class-pair-adjustment + # uint16 posFormat Format identifier: format = 2 + + 2 + # Offset16 coverageOffset Offset to Coverage table, from beginning of PairPos subtable. + + 2 + + self.coverage_bytes + # uint16 valueFormat1 ValueRecord definition — for the first glyph of the pair (may be zero). + + 2 + # uint16 valueFormat2 ValueRecord definition — for the second glyph of the pair (may be zero). + + 2 + # Offset16 classDef1Offset Offset to ClassDef table, from beginning of PairPos subtable — for the first glyph of the pair. + + 2 + + self.classDef1_bytes + # Offset16 classDef2Offset Offset to ClassDef table, from beginning of PairPos subtable — for the second glyph of the pair. + + 2 + + self.classDef2_bytes + # uint16 class1Count Number of classes in classDef1 table — includes Class 0. + + 2 + # uint16 class2Count Number of classes in classDef2 table — includes Class 0. + + 2 + # Class1Record class1Records[class1Count] Array of Class1 records, ordered by classes in classDef1. + + (self.ctx.valueFormat1_bytes + self.ctx.valueFormat2_bytes) + * len(self.indices) + * self.width + ) + + @property + def coverage_bytes(self): + format1_bytes = ( + # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-1 + # uint16 coverageFormat Format identifier — format = 1 + # uint16 glyphCount Number of glyphs in the glyph array + 4 + # uint16 glyphArray[glyphCount] Array of glyph IDs — in numerical order + + sum(len(self.ctx.all_class1[i]) for i in self.indices) * 2 + ) + ranges = sorted( + chain.from_iterable(self.ctx.all_class1_data[i][0] for i in self.indices) + ) + merged_range_count = 0 + last = None + for start, end in ranges: + if last is not None and start != last + 1: + merged_range_count += 1 + last = end + format2_bytes = ( + # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-2 + # uint16 coverageFormat Format identifier — format = 2 + # uint16 rangeCount Number of RangeRecords + 4 + # RangeRecord rangeRecords[rangeCount] Array of glyph ranges — ordered by startGlyphID. + # uint16 startGlyphID First glyph ID in the range + # uint16 endGlyphID Last glyph ID in the range + # uint16 startCoverageIndex Coverage Index of first glyph ID in range + + merged_range_count * 6 + ) + return min(format1_bytes, format2_bytes) + + @property + def classDef1_bytes(self): + # We can skip encoding one of the Class1 definitions, and use + # Class1=0 to represent it instead, because Class1 is gated by the + # Coverage definition. Use Class1=0 for the highest byte savings. + # Going through all options takes too long, pick the biggest class + # = what happens in otlLib.builder.ClassDefBuilder.classes() + biggest_index = max(self.indices, key=lambda i: len(self.ctx.all_class1[i])) + return _classDef_bytes( + self.ctx.all_class1_data, [i for i in self.indices if i != biggest_index] + ) + + @property + def classDef2_bytes(self): + # All Class2 need to be encoded because we can't use Class2=0 + return _classDef_bytes(self.ctx.all_class2_data, self.column_indices) + + +def cluster_pairs_by_class2_coverage_custom_cost( + font: TTFont, + pairs: Pairs, + compression: int = 5, +) -> List[Pairs]: + if not pairs: + # The subtable was actually empty? + return [pairs] + + # Sorted for reproducibility/determinism + all_class1 = sorted(set(pair[0] for pair in pairs)) + all_class2 = sorted(set(pair[1] for pair in pairs)) + + # Use Python's big ints for binary vectors representing each line + lines = [ + sum( + 1 << i if (class1, class2) in pairs else 0 + for i, class2 in enumerate(all_class2) + ) + for class1 in all_class1 + ] + + # Map glyph names to ids and work with ints throughout for ClassDef formats + name_to_id = font.getReverseGlyphMap() + # Each entry in the arrays below is (range_count, min_glyph_id, max_glyph_id) + all_class1_data = [ + _getClassRanges(name_to_id[name] for name in cls) for cls in all_class1 + ] + all_class2_data = [ + _getClassRanges(name_to_id[name] for name in cls) for cls in all_class2 + ] + + format1 = 0 + format2 = 0 + for pair, value in pairs.items(): + format1 |= value[0].getEffectiveFormat() if value[0] else 0 + format2 |= value[1].getEffectiveFormat() if value[1] else 0 + valueFormat1_bytes = bit_count(format1) * 2 + valueFormat2_bytes = bit_count(format2) * 2 + + ctx = ClusteringContext( + lines, + all_class1, + all_class1_data, + all_class2_data, + valueFormat1_bytes, + valueFormat2_bytes, + ) + + cluster_cache: Dict[int, Cluster] = {} + + def make_cluster(indices: int) -> Cluster: + cluster = cluster_cache.get(indices, None) + if cluster is not None: + return cluster + cluster = Cluster(ctx, indices) + cluster_cache[indices] = cluster + return cluster + + def merge(cluster: Cluster, other: Cluster) -> Cluster: + return make_cluster(cluster.indices_bitmask | other.indices_bitmask) + + # Agglomerative clustering by hand, checking the cost gain of the new + # cluster against the previously separate clusters + # Start with 1 cluster per line + # cluster = set of lines = new subtable + clusters = [make_cluster(1 << i) for i in range(len(lines))] + + # Cost of 1 cluster with everything + # `(1 << len) - 1` gives a bitmask full of 1's of length `len` + cost_before_splitting = make_cluster((1 << len(lines)) - 1).cost + log.debug(f" len(clusters) = {len(clusters)}") + + while len(clusters) > 1: + lowest_cost_change = None + best_cluster_index = None + best_other_index = None + best_merged = None + for i, cluster in enumerate(clusters): + for j, other in enumerate(clusters[i + 1 :]): + merged = merge(cluster, other) + cost_change = merged.cost - cluster.cost - other.cost + if lowest_cost_change is None or cost_change < lowest_cost_change: + lowest_cost_change = cost_change + best_cluster_index = i + best_other_index = i + 1 + j + best_merged = merged + assert lowest_cost_change is not None + assert best_cluster_index is not None + assert best_other_index is not None + assert best_merged is not None + + # If the best merge we found is still taking down the file size, then + # there's no question: we must do it, because it's beneficial in both + # ways (lower file size and lower number of subtables). However, if the + # best merge we found is not reducing file size anymore, then we need to + # look at the other stop criteria = the compression factor. + if lowest_cost_change > 0: + # Stop critera: check whether we should keep merging. + # Compute size reduction brought by splitting + cost_after_splitting = sum(c.cost for c in clusters) + # size_reduction so that after = before * (1 - size_reduction) + # E.g. before = 1000, after = 800, 1 - 800/1000 = 0.2 + size_reduction = 1 - cost_after_splitting / cost_before_splitting + + # Force more merging by taking into account the compression number. + # Target behaviour: compression number = 1 to 9, default 5 like gzip + # - 1 = accept to add 1 subtable to reduce size by 50% + # - 5 = accept to add 5 subtables to reduce size by 50% + # See https://github.com/harfbuzz/packtab/blob/master/Lib/packTab/__init__.py#L690-L691 + # Given the size reduction we have achieved so far, compute how many + # new subtables are acceptable. + max_new_subtables = -log2(1 - size_reduction) * compression + log.debug( + f" len(clusters) = {len(clusters):3d} size_reduction={size_reduction:5.2f} max_new_subtables={max_new_subtables}", + ) + if compression == 9: + # Override level 9 to mean: create any number of subtables + max_new_subtables = len(clusters) + + # If we have managed to take the number of new subtables below the + # threshold, then we can stop. + if len(clusters) <= max_new_subtables + 1: + break + + # No reason to stop yet, do the merge and move on to the next. + del clusters[best_other_index] + clusters[best_cluster_index] = best_merged + + # All clusters are final; turn bitmasks back into the "Pairs" format + pairs_by_class1: Dict[Tuple[str, ...], Pairs] = defaultdict(dict) + for pair, values in pairs.items(): + pairs_by_class1[pair[0]][pair] = values + pairs_groups: List[Pairs] = [] + for cluster in clusters: + pairs_group: Pairs = dict() + for i in cluster.indices: + class1 = all_class1[i] + pairs_group.update(pairs_by_class1[class1]) + pairs_groups.append(pairs_group) + return pairs_groups diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7cb2bc7665f300236c25b70333da74a258ba372 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/areaPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/areaPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd1bf76952884cae6adeefb1171e877cd9471d4c Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/areaPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/basePen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/basePen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f58c02ecc6d6c894ff24aa8f9b2edd0e9c0857ea Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/basePen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/boundsPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/boundsPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f43150cd4c8ea372f431f380ebfda5a7d7c6082d Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/boundsPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/cairoPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/cairoPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e8b9995a746f09292647c5807c2eebc05c497ee Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/cairoPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/cocoaPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/cocoaPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb90ccc380959896cfa622139b027274eed4e1bf Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/cocoaPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/cu2quPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/cu2quPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f93651c26ab4c5288163a6ecb94b8677afa9061 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/cu2quPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/explicitClosingLinePen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/explicitClosingLinePen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31fcf4a5d8faccb441d567823c81395dd4e72c39 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/explicitClosingLinePen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/filterPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/filterPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dab701452341ba67d3f8c9d65acaa6e2d800727e Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/filterPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/freetypePen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/freetypePen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3b3918ee821f39afa5dc0faf9106d58df6586d2 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/freetypePen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/hashPointPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/hashPointPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb1da9ee381581dfcae2593c0a9825312cbac6c1 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/hashPointPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/momentsPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/momentsPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7202086b591f8825bcbbe7bc0fe8cabea8f1fb10 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/momentsPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/perimeterPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/perimeterPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c8ed63ddd90cd3c2d1c34ab2400fa3ab818ec02 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/perimeterPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/pointInsidePen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/pointInsidePen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4189ea0c34b68bc2faf74ab9753a48fc1b82794 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/pointInsidePen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/pointPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/pointPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d755eb0bd6d6909043df249fc20c87526fab1588 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/pointPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/qtPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/qtPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f59d8106724feaaab0e4d4f7619ffb8c6e334ec2 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/qtPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/qu2cuPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/qu2cuPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8de6254f234c6b8e79de4544fbd00e6b0b78663 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/qu2cuPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/quartzPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/quartzPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15f886c6fd69abf56c445f42f767ada74534b0c9 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/quartzPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/recordingPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/recordingPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cd979336adc5257651fa62e8b604446f887d6cc Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/recordingPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/reportLabPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/reportLabPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6fe283012ce42ca6a45f60a68d4e442e7f6f5f1 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/reportLabPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/reverseContourPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/reverseContourPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bafb4b45eafbe99eaded6aa3a78d3b186f43fc8 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/reverseContourPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/roundingPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/roundingPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..decc244d879cf79c97bc00e00e488b12c049beed Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/roundingPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/statisticsPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/statisticsPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbcdf8c8713f45ddd4e4c5130cc5a6943efb8389 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/statisticsPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/svgPathPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/svgPathPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37ad216b74fc09985e9eb4b47393f9578e45d180 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/svgPathPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/t2CharStringPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/t2CharStringPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36663605280e23592ff85c9e5e1c5fe1672c010c Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/t2CharStringPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/teePen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/teePen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c653a5d3c610258a49148a1bf915cac357a8cb86 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/teePen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/transformPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/transformPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0072d62229082cb042d3690c2f0235ddc88f2b6f Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/transformPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/ttGlyphPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/ttGlyphPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cf53083b58ce1212095af0932ef2d28f799fe97 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/ttGlyphPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/pens/__pycache__/wxPen.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/pens/__pycache__/wxPen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d1e56a8d90bf2af8e644ed96b8edb5af9a0c309 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/pens/__pycache__/wxPen.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78ffaf2ce9ce5a9015740313c60405b324c5d690 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/__main__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39cdc997a94ca1872153c2e8ed1eb0dd7a899516 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/__main__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/benchmark.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/benchmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fc90a94a79885a3b575efba7a3bace776193f44 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/benchmark.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/cli.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f6a5bbefab3212c388d991a423da720a7e9e82f Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/cli.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/qu2cu.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/qu2cu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34d5abc410cc2c49f50c5f0026eff5c56310d8d9 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/qu2cu/__pycache__/qu2cu.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/subset/__pycache__/__main__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/subset/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ba6c9fbd7dfae05d8f9b7d4262187560eae3b7d Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/subset/__pycache__/__main__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/subset/__pycache__/cff.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/subset/__pycache__/cff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bfea308bb3f5d0fdaba8c03e27452e143419de2 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/subset/__pycache__/cff.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/subset/__pycache__/svg.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/subset/__pycache__/svg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dce4f03631d26b513d014a34ba847f7721dcdcd5 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/subset/__pycache__/svg.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/subset/__pycache__/util.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/subset/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b8239fc778d0b9d4b380658434c3d4f3c14e5e1 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/subset/__pycache__/util.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/svgLib/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/svgLib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fee19296f97f802a1640e4b70659aac68464f3c Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/svgLib/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/svgLib/path/__init__.py b/lib/python3.10/site-packages/fontTools/svgLib/path/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..043b4dbe1deb2d3a56e06fa43ed30694e7149544 --- /dev/null +++ b/lib/python3.10/site-packages/fontTools/svgLib/path/__init__.py @@ -0,0 +1,65 @@ +from fontTools.pens.transformPen import TransformPen +from fontTools.misc import etree +from fontTools.misc.textTools import tostr +from .parser import parse_path +from .shapes import PathBuilder + + +__all__ = [tostr(s) for s in ("SVGPath", "parse_path")] + + +class SVGPath(object): + """Parse SVG ``path`` elements from a file or string, and draw them + onto a glyph object that supports the FontTools Pen protocol. + + For example, reading from an SVG file and drawing to a Defcon Glyph: + + .. code-block:: + + import defcon + glyph = defcon.Glyph() + pen = glyph.getPen() + svg = SVGPath("path/to/a.svg") + svg.draw(pen) + + Or reading from a string containing SVG data, using the alternative + 'fromstring' (a class method): + + .. code-block:: + + data = ' 1: + rx *= sqrt(radii_scale) + ry *= sqrt(radii_scale) + self.rx, self.ry = rx, ry + + point_transform = Scale(1 / rx, 1 / ry).rotate(-self.angle) + + point1 = _map_point(point_transform, self.current_point) + point2 = _map_point(point_transform, self.target_point) + delta = point2 - point1 + + d = delta.real * delta.real + delta.imag * delta.imag + scale_factor_squared = max(1 / d - 0.25, 0.0) + + scale_factor = sqrt(scale_factor_squared) + if self.sweep == self.large: + scale_factor = -scale_factor + + delta *= scale_factor + center_point = (point1 + point2) * 0.5 + center_point += complex(-delta.imag, delta.real) + point1 -= center_point + point2 -= center_point + + theta1 = atan2(point1.imag, point1.real) + theta2 = atan2(point2.imag, point2.real) + + theta_arc = theta2 - theta1 + if theta_arc < 0 and self.sweep: + theta_arc += TWO_PI + elif theta_arc > 0 and not self.sweep: + theta_arc -= TWO_PI + + self.theta1 = theta1 + self.theta2 = theta1 + theta_arc + self.theta_arc = theta_arc + self.center_point = center_point + + return True + + def _decompose_to_cubic_curves(self): + if self.center_point is None and not self._parametrize(): + return + + point_transform = Identity.rotate(self.angle).scale(self.rx, self.ry) + + # Some results of atan2 on some platform implementations are not exact + # enough. So that we get more cubic curves than expected here. Adding 0.001f + # reduces the count of sgements to the correct count. + num_segments = int(ceil(fabs(self.theta_arc / (PI_OVER_TWO + 0.001)))) + for i in range(num_segments): + start_theta = self.theta1 + i * self.theta_arc / num_segments + end_theta = self.theta1 + (i + 1) * self.theta_arc / num_segments + + t = (4 / 3) * tan(0.25 * (end_theta - start_theta)) + if not isfinite(t): + return + + sin_start_theta = sin(start_theta) + cos_start_theta = cos(start_theta) + sin_end_theta = sin(end_theta) + cos_end_theta = cos(end_theta) + + point1 = complex( + cos_start_theta - t * sin_start_theta, + sin_start_theta + t * cos_start_theta, + ) + point1 += self.center_point + target_point = complex(cos_end_theta, sin_end_theta) + target_point += self.center_point + point2 = target_point + point2 += complex(t * sin_end_theta, -t * cos_end_theta) + + point1 = _map_point(point_transform, point1) + point2 = _map_point(point_transform, point2) + target_point = _map_point(point_transform, target_point) + + yield point1, point2, target_point + + def draw(self, pen): + for point1, point2, target_point in self._decompose_to_cubic_curves(): + pen.curveTo( + (point1.real, point1.imag), + (point2.real, point2.imag), + (target_point.real, target_point.imag), + ) diff --git a/lib/python3.10/site-packages/fontTools/svgLib/path/parser.py b/lib/python3.10/site-packages/fontTools/svgLib/path/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..18c8e77f7f7e5636530fa4424083f97d4208d810 --- /dev/null +++ b/lib/python3.10/site-packages/fontTools/svgLib/path/parser.py @@ -0,0 +1,322 @@ +# SVG Path specification parser. +# This is an adaptation from 'svg.path' by Lennart Regebro (@regebro), +# modified so that the parser takes a FontTools Pen object instead of +# returning a list of svg.path Path objects. +# The original code can be found at: +# https://github.com/regebro/svg.path/blob/4f9b6e3/src/svg/path/parser.py +# Copyright (c) 2013-2014 Lennart Regebro +# License: MIT + +from .arc import EllipticalArc +import re + + +COMMANDS = set("MmZzLlHhVvCcSsQqTtAa") +ARC_COMMANDS = set("Aa") +UPPERCASE = set("MZLHVCSQTA") + +COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])") + +# https://www.w3.org/TR/css-syntax-3/#number-token-diagram +# but -6.e-5 will be tokenized as "-6" then "-5" and confuse parsing +FLOAT_RE = re.compile( + r"[-+]?" # optional sign + r"(?:" + r"(?:0|[1-9][0-9]*)(?:\.[0-9]+)?(?:[eE][-+]?[0-9]+)?" # int/float + r"|" + r"(?:\.[0-9]+(?:[eE][-+]?[0-9]+)?)" # float with leading dot (e.g. '.42') + r")" +) +BOOL_RE = re.compile("^[01]") +SEPARATOR_RE = re.compile(f"[, \t]") + + +def _tokenize_path(pathdef): + arc_cmd = None + for x in COMMAND_RE.split(pathdef): + if x in COMMANDS: + arc_cmd = x if x in ARC_COMMANDS else None + yield x + continue + + if arc_cmd: + try: + yield from _tokenize_arc_arguments(x) + except ValueError as e: + raise ValueError(f"Invalid arc command: '{arc_cmd}{x}'") from e + else: + for token in FLOAT_RE.findall(x): + yield token + + +ARC_ARGUMENT_TYPES = ( + ("rx", FLOAT_RE), + ("ry", FLOAT_RE), + ("x-axis-rotation", FLOAT_RE), + ("large-arc-flag", BOOL_RE), + ("sweep-flag", BOOL_RE), + ("x", FLOAT_RE), + ("y", FLOAT_RE), +) + + +def _tokenize_arc_arguments(arcdef): + raw_args = [s for s in SEPARATOR_RE.split(arcdef) if s] + if not raw_args: + raise ValueError(f"Not enough arguments: '{arcdef}'") + raw_args.reverse() + + i = 0 + while raw_args: + arg = raw_args.pop() + + name, pattern = ARC_ARGUMENT_TYPES[i] + match = pattern.search(arg) + if not match: + raise ValueError(f"Invalid argument for '{name}' parameter: {arg!r}") + + j, k = match.span() + yield arg[j:k] + arg = arg[k:] + + if arg: + raw_args.append(arg) + + # wrap around every 7 consecutive arguments + if i == 6: + i = 0 + else: + i += 1 + + if i != 0: + raise ValueError(f"Not enough arguments: '{arcdef}'") + + +def parse_path(pathdef, pen, current_pos=(0, 0), arc_class=EllipticalArc): + """Parse SVG path definition (i.e. "d" attribute of elements) + and call a 'pen' object's moveTo, lineTo, curveTo, qCurveTo and closePath + methods. + + If 'current_pos' (2-float tuple) is provided, the initial moveTo will + be relative to that instead being absolute. + + If the pen has an "arcTo" method, it is called with the original values + of the elliptical arc curve commands: + + .. code-block:: + + pen.arcTo(rx, ry, rotation, arc_large, arc_sweep, (x, y)) + + Otherwise, the arcs are approximated by series of cubic Bezier segments + ("curveTo"), one every 90 degrees. + """ + # In the SVG specs, initial movetos are absolute, even if + # specified as 'm'. This is the default behavior here as well. + # But if you pass in a current_pos variable, the initial moveto + # will be relative to that current_pos. This is useful. + current_pos = complex(*current_pos) + + elements = list(_tokenize_path(pathdef)) + # Reverse for easy use of .pop() + elements.reverse() + + start_pos = None + command = None + last_control = None + + have_arcTo = hasattr(pen, "arcTo") + + while elements: + if elements[-1] in COMMANDS: + # New command. + last_command = command # Used by S and T + command = elements.pop() + absolute = command in UPPERCASE + command = command.upper() + else: + # If this element starts with numbers, it is an implicit command + # and we don't change the command. Check that it's allowed: + if command is None: + raise ValueError( + "Unallowed implicit command in %s, position %s" + % (pathdef, len(pathdef.split()) - len(elements)) + ) + last_command = command # Used by S and T + + if command == "M": + # Moveto command. + x = elements.pop() + y = elements.pop() + pos = float(x) + float(y) * 1j + if absolute: + current_pos = pos + else: + current_pos += pos + + # M is not preceded by Z; it's an open subpath + if start_pos is not None: + pen.endPath() + + pen.moveTo((current_pos.real, current_pos.imag)) + + # when M is called, reset start_pos + # This behavior of Z is defined in svg spec: + # http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand + start_pos = current_pos + + # Implicit moveto commands are treated as lineto commands. + # So we set command to lineto here, in case there are + # further implicit commands after this moveto. + command = "L" + + elif command == "Z": + # Close path + if current_pos != start_pos: + pen.lineTo((start_pos.real, start_pos.imag)) + pen.closePath() + current_pos = start_pos + start_pos = None + command = None # You can't have implicit commands after closing. + + elif command == "L": + x = elements.pop() + y = elements.pop() + pos = float(x) + float(y) * 1j + if not absolute: + pos += current_pos + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == "H": + x = elements.pop() + pos = float(x) + current_pos.imag * 1j + if not absolute: + pos += current_pos.real + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == "V": + y = elements.pop() + pos = current_pos.real + float(y) * 1j + if not absolute: + pos += current_pos.imag * 1j + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == "C": + control1 = float(elements.pop()) + float(elements.pop()) * 1j + control2 = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control1 += current_pos + control2 += current_pos + end += current_pos + + pen.curveTo( + (control1.real, control1.imag), + (control2.real, control2.imag), + (end.real, end.imag), + ) + current_pos = end + last_control = control2 + + elif command == "S": + # Smooth curve. First control point is the "reflection" of + # the second control point in the previous path. + + if last_command not in "CS": + # If there is no previous command or if the previous command + # was not an C, c, S or s, assume the first control point is + # coincident with the current point. + control1 = current_pos + else: + # The first control point is assumed to be the reflection of + # the second control point on the previous command relative + # to the current point. + control1 = current_pos + current_pos - last_control + + control2 = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control2 += current_pos + end += current_pos + + pen.curveTo( + (control1.real, control1.imag), + (control2.real, control2.imag), + (end.real, end.imag), + ) + current_pos = end + last_control = control2 + + elif command == "Q": + control = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control += current_pos + end += current_pos + + pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) + current_pos = end + last_control = control + + elif command == "T": + # Smooth curve. Control point is the "reflection" of + # the second control point in the previous path. + + if last_command not in "QT": + # If there is no previous command or if the previous command + # was not an Q, q, T or t, assume the first control point is + # coincident with the current point. + control = current_pos + else: + # The control point is assumed to be the reflection of + # the control point on the previous command relative + # to the current point. + control = current_pos + current_pos - last_control + + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + end += current_pos + + pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) + current_pos = end + last_control = control + + elif command == "A": + rx = abs(float(elements.pop())) + ry = abs(float(elements.pop())) + rotation = float(elements.pop()) + arc_large = bool(int(elements.pop())) + arc_sweep = bool(int(elements.pop())) + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + end += current_pos + + # if the pen supports arcs, pass the values unchanged, otherwise + # approximate the arc with a series of cubic bezier curves + if have_arcTo: + pen.arcTo( + rx, + ry, + rotation, + arc_large, + arc_sweep, + (end.real, end.imag), + ) + else: + arc = arc_class( + current_pos, rx, ry, rotation, arc_large, arc_sweep, end + ) + arc.draw(pen) + + current_pos = end + + # no final Z command, it's an open path + if start_pos is not None: + pen.endPath() diff --git a/lib/python3.10/site-packages/fontTools/svgLib/path/shapes.py b/lib/python3.10/site-packages/fontTools/svgLib/path/shapes.py new file mode 100644 index 0000000000000000000000000000000000000000..3f22e6c6a3e4d24636e710f1920ebf04a822b159 --- /dev/null +++ b/lib/python3.10/site-packages/fontTools/svgLib/path/shapes.py @@ -0,0 +1,183 @@ +import re + + +def _prefer_non_zero(*args): + for arg in args: + if arg != 0: + return arg + return 0.0 + + +def _ntos(n): + # %f likes to add unnecessary 0's, %g isn't consistent about # decimals + return ("%.3f" % n).rstrip("0").rstrip(".") + + +def _strip_xml_ns(tag): + # ElementTree API doesn't provide a way to ignore XML namespaces in tags + # so we here strip them ourselves: cf. https://bugs.python.org/issue18304 + return tag.split("}", 1)[1] if "}" in tag else tag + + +def _transform(raw_value): + # TODO assumes a 'matrix' transform. + # No other transform functions are supported at the moment. + # https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform + # start simple: if you aren't exactly matrix(...) then no love + match = re.match(r"matrix\((.*)\)", raw_value) + if not match: + raise NotImplementedError + matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1))) + if len(matrix) != 6: + raise ValueError("wrong # of terms in %s" % raw_value) + return matrix + + +class PathBuilder(object): + def __init__(self): + self.paths = [] + self.transforms = [] + + def _start_path(self, initial_path=""): + self.paths.append(initial_path) + self.transforms.append(None) + + def _end_path(self): + self._add("z") + + def _add(self, path_snippet): + path = self.paths[-1] + if path: + path += " " + path_snippet + else: + path = path_snippet + self.paths[-1] = path + + def _move(self, c, x, y): + self._add("%s%s,%s" % (c, _ntos(x), _ntos(y))) + + def M(self, x, y): + self._move("M", x, y) + + def m(self, x, y): + self._move("m", x, y) + + def _arc(self, c, rx, ry, x, y, large_arc): + self._add( + "%s%s,%s 0 %d 1 %s,%s" + % (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y)) + ) + + def A(self, rx, ry, x, y, large_arc=0): + self._arc("A", rx, ry, x, y, large_arc) + + def a(self, rx, ry, x, y, large_arc=0): + self._arc("a", rx, ry, x, y, large_arc) + + def _vhline(self, c, x): + self._add("%s%s" % (c, _ntos(x))) + + def H(self, x): + self._vhline("H", x) + + def h(self, x): + self._vhline("h", x) + + def V(self, y): + self._vhline("V", y) + + def v(self, y): + self._vhline("v", y) + + def _line(self, c, x, y): + self._add("%s%s,%s" % (c, _ntos(x), _ntos(y))) + + def L(self, x, y): + self._line("L", x, y) + + def l(self, x, y): + self._line("l", x, y) + + def _parse_line(self, line): + x1 = float(line.attrib.get("x1", 0)) + y1 = float(line.attrib.get("y1", 0)) + x2 = float(line.attrib.get("x2", 0)) + y2 = float(line.attrib.get("y2", 0)) + + self._start_path() + self.M(x1, y1) + self.L(x2, y2) + + def _parse_rect(self, rect): + x = float(rect.attrib.get("x", 0)) + y = float(rect.attrib.get("y", 0)) + w = float(rect.attrib.get("width")) + h = float(rect.attrib.get("height")) + rx = float(rect.attrib.get("rx", 0)) + ry = float(rect.attrib.get("ry", 0)) + + rx = _prefer_non_zero(rx, ry) + ry = _prefer_non_zero(ry, rx) + # TODO there are more rules for adjusting rx, ry + + self._start_path() + self.M(x + rx, y) + self.H(x + w - rx) + if rx > 0: + self.A(rx, ry, x + w, y + ry) + self.V(y + h - ry) + if rx > 0: + self.A(rx, ry, x + w - rx, y + h) + self.H(x + rx) + if rx > 0: + self.A(rx, ry, x, y + h - ry) + self.V(y + ry) + if rx > 0: + self.A(rx, ry, x + rx, y) + self._end_path() + + def _parse_path(self, path): + if "d" in path.attrib: + self._start_path(initial_path=path.attrib["d"]) + + def _parse_polygon(self, poly): + if "points" in poly.attrib: + self._start_path("M" + poly.attrib["points"]) + self._end_path() + + def _parse_polyline(self, poly): + if "points" in poly.attrib: + self._start_path("M" + poly.attrib["points"]) + + def _parse_circle(self, circle): + cx = float(circle.attrib.get("cx", 0)) + cy = float(circle.attrib.get("cy", 0)) + r = float(circle.attrib.get("r")) + + # arc doesn't seem to like being a complete shape, draw two halves + self._start_path() + self.M(cx - r, cy) + self.A(r, r, cx + r, cy, large_arc=1) + self.A(r, r, cx - r, cy, large_arc=1) + + def _parse_ellipse(self, ellipse): + cx = float(ellipse.attrib.get("cx", 0)) + cy = float(ellipse.attrib.get("cy", 0)) + rx = float(ellipse.attrib.get("rx")) + ry = float(ellipse.attrib.get("ry")) + + # arc doesn't seem to like being a complete shape, draw two halves + self._start_path() + self.M(cx - rx, cy) + self.A(rx, ry, cx + rx, cy, large_arc=1) + self.A(rx, ry, cx - rx, cy, large_arc=1) + + def add_path_from_element(self, el): + tag = _strip_xml_ns(el.tag) + parse_fn = getattr(self, "_parse_%s" % tag.lower(), None) + if not callable(parse_fn): + return False + parse_fn(el) + if "transform" in el.attrib: + self.transforms[-1] = _transform(el.attrib["transform"]) + return True diff --git a/lib/python3.10/site-packages/fontTools/t1Lib/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/t1Lib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3935601f1c5d8f8c4d00c827d6387c5561d567e6 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/t1Lib/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/F__e_a_t.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/F__e_a_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c155a1d70101ac5f6fac7307ff2ec9049c4b7439 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/F__e_a_t.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/O_S_2f_2.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/O_S_2f_2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e732f5bd0edb9b0f3b27282eb8b39a03d18f26b Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/O_S_2f_2.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/S__i_l_f.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/S__i_l_f.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd51624834701d307f50c3e55a645f5e7a9141f0 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/S__i_l_f.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/S__i_l_l.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/S__i_l_l.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b375add96561acb50a981f50813b84b84bf4e7f6 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/S__i_l_l.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_B_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_B_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e819d5f90e73a09f894725e89e43991e530c54b Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_B_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_C_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_C_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9658b6a6f09d953668ffea786698757e76370276 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_C_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_D_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_D_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c55098f19a8cba28f04af330665f6eab6501256e Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_D_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_J_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_J_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a71bf48dd257e4781f232aeee39f1c62c397a69 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_J_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_P_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_P_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a6c11ce23e6de76751a14f8a3c8dfeef3a1788f Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_P_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e86bbb7cac74d96483866e861294ab4e3f7c1db Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_V_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_V_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8439f21d8d169267d144d0509005ce9db095fec2 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_V_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__0.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__0.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07a0f80e78c77b2a616b783c4d8626c2d12f9432 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__0.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__1.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e42c53bf8d0ba1f41a4049d601f9cdbe643897e Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__1.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__2.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c018221921f0a37b16fbcbbd424443cdec0e11b Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__2.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__3.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccf8aa0bc93185410729ca689e4837d15931243d Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__3.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__5.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f1d86aa5322d6a39e6acee9c30c50fff92812b0 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__5.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_T_F_A_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_T_F_A_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f64e72a82d9b7da0c377d46a0815feec3d6c7a5 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_T_F_A_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/TupleVariation.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/TupleVariation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7eb82c6eb10e7e29d4c278e6af5c9d2dff5f1d1 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/TupleVariation.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_A_R_C_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_A_R_C_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1557d102e60a05c2bc11cffb33e336176702b5d Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_A_R_C_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_D_M_X_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_D_M_X_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e07807a32e5d6614d14ceb8d6a7091421b4c1b6d Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_D_M_X_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e781bfdb7cbaa9d68441cd7a75a7e1ffba4e9ca9 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_V_A_R_.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_V_A_R_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f21c8620e187b2156118ef5ecb1a825ff2c7590 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_V_A_R_.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_n_k_r.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_n_k_r.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8c54b20ec9e99c4fe0e9db625ea080ff41216bd Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_n_k_r.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_v_a_r.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_v_a_r.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a26eb287a5795f90c503eb1e43132d73db60ece2 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_v_a_r.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_b_s_l_n.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_b_s_l_n.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4a215b7c9942089680fb6de4a19a5666f0b993f Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_b_s_l_n.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_i_d_g.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_i_d_g.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c97e320adef084c3d6a9d605c72d31b4424fdc7 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_i_d_g.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_m_a_p.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_m_a_p.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a305e1afa848889a393e00ab7c07b79eec4fd284 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_m_a_p.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_a_r.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_a_r.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ade2425f64706194b2fe6c77ee8a24bf211a47c6 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_a_r.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63ce41233565c92b80bf2dc4843dea1443d10716 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/ttProgram.cpython-310.pyc b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/ttProgram.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5774a6a9923d53d06e7751648c168085b100889 Binary files /dev/null and b/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/ttProgram.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/functorch/_C.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/functorch/_C.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..58a9e65236f0b1cdf90cc5ca26cfc1d2b3378113 --- /dev/null +++ b/lib/python3.10/site-packages/functorch/_C.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f6b86d81c23c19a26d0f698c9346cced6c712e56e9e1681e31fb5e670f38282 +size 320240 diff --git a/lib/python3.10/site-packages/gmpy2/gmpy2.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/gmpy2/gmpy2.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a93203951b495d2624ca04e1c8c1dc3c8dad0da4 --- /dev/null +++ b/lib/python3.10/site-packages/gmpy2/gmpy2.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f78293ef6bdaffad1d33750d695f846eccb4c810405bac7d0fba8ff6ddf6e3cd +size 671224 diff --git a/lib/python3.10/site-packages/jedi/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12798d4bd21d0970fa1aec3aa30f2987d3f1d095 Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/__main__.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23c3fc385d72687db46059406fed2f9496b048d1 Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/__main__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/_compatibility.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/_compatibility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..502151c26efa5b4ab42b12d0e7fa83dab9267d87 Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/_compatibility.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/cache.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0365d6de5253b8dce6da3556f4b23b8073549152 Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/cache.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/common.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac2bb7aa1b613c2a7e2b31da3f81f6ed5deca2e9 Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/common.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/debug.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/debug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2e0b05cb52b6a8b0202410230e5ea0324157b35 Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/debug.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/file_io.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/file_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdc7c63db074093c9ba0b772dfef7591ec53764a Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/file_io.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/parser_utils.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/parser_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa20a598eb1645b0b89e7081eeac88e47a0e675b Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/parser_utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/settings.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c62ab76a801cfda8d26fafd049a452448d635870 Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/settings.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/__pycache__/utils.cpython-310.pyc b/lib/python3.10/site-packages/jedi/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e12fcca613f8834b2485475d7d898dd3249ea6b8 Binary files /dev/null and b/lib/python3.10/site-packages/jedi/__pycache__/utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jedi/api/__init__.py b/lib/python3.10/site-packages/jedi/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aa4ffa8aff15d8aadf46a3668dd769356e0bb300 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/__init__.py @@ -0,0 +1,798 @@ +""" +The API basically only provides one class. You can create a :class:`Script` and +use its methods. + +Additionally you can add a debug function with :func:`set_debug_function`. +Alternatively, if you don't need a custom function and are happy with printing +debug messages to stdout, simply call :func:`set_debug_function` without +arguments. +""" +import sys +from pathlib import Path + +import parso +from parso.python import tree + +from jedi.parser_utils import get_executable_nodes +from jedi import debug +from jedi import settings +from jedi import cache +from jedi.file_io import KnownContentFileIO +from jedi.api import classes +from jedi.api import interpreter +from jedi.api import helpers +from jedi.api.helpers import validate_line_column +from jedi.api.completion import Completion, search_in_module +from jedi.api.keywords import KeywordName +from jedi.api.environment import InterpreterEnvironment +from jedi.api.project import get_default_project, Project +from jedi.api.errors import parso_to_jedi_errors +from jedi.api import refactoring +from jedi.api.refactoring.extract import extract_function, extract_variable +from jedi.inference import InferenceState +from jedi.inference import imports +from jedi.inference.references import find_references +from jedi.inference.arguments import try_iter_content +from jedi.inference.helpers import infer_call_of_leaf +from jedi.inference.sys_path import transform_path_to_dotted +from jedi.inference.syntax_tree import tree_name_to_values +from jedi.inference.value import ModuleValue +from jedi.inference.base_value import ValueSet +from jedi.inference.value.iterable import unpack_tuple_to_dict +from jedi.inference.gradual.conversion import convert_names, convert_values +from jedi.inference.gradual.utils import load_proper_stub_module +from jedi.inference.utils import to_list + +# Jedi uses lots and lots of recursion. By setting this a little bit higher, we +# can remove some "maximum recursion depth" errors. +sys.setrecursionlimit(3000) + + +class Script: + """ + A Script is the base for completions, goto or whatever you want to do with + Jedi. The counter part of this class is :class:`Interpreter`, which works + with actual dictionaries and can work with a REPL. This class + should be used when a user edits code in an editor. + + You can either use the ``code`` parameter or ``path`` to read a file. + Usually you're going to want to use both of them (in an editor). + + The Script's ``sys.path`` is very customizable: + + - If `project` is provided with a ``sys_path``, that is going to be used. + - If `environment` is provided, its ``sys.path`` will be used + (see :func:`Environment.get_sys_path `); + - Otherwise ``sys.path`` will match that of the default environment of + Jedi, which typically matches the sys path that was used at the time + when Jedi was imported. + + Most methods have a ``line`` and a ``column`` parameter. Lines in Jedi are + always 1-based and columns are always zero based. To avoid repetition they + are not always documented. You can omit both line and column. Jedi will + then just do whatever action you are calling at the end of the file. If you + provide only the line, just will complete at the end of that line. + + .. warning:: By default :attr:`jedi.settings.fast_parser` is enabled, which means + that parso reuses modules (i.e. they are not immutable). With this setting + Jedi is **not thread safe** and it is also not safe to use multiple + :class:`.Script` instances and its definitions at the same time. + + If you are a normal plugin developer this should not be an issue. It is + an issue for people that do more complex stuff with Jedi. + + This is purely a performance optimization and works pretty well for all + typical usages, however consider to turn the setting off if it causes + you problems. See also + `this discussion `_. + + :param code: The source code of the current file, separated by newlines. + :type code: str + :param path: The path of the file in the file system, or ``''`` if + it hasn't been saved yet. + :type path: str or pathlib.Path or None + :param Environment environment: Provide a predefined :ref:`Environment ` + to work with a specific Python version or virtualenv. + :param Project project: Provide a :class:`.Project` to make sure finding + references works well, because the right folder is searched. There are + also ways to modify the sys path and other things. + """ + def __init__(self, code=None, *, path=None, environment=None, project=None): + self._orig_path = path + if isinstance(path, str): + path = Path(path) + + self.path = path.absolute() if path else None + + if code is None: + if path is None: + raise ValueError("Must provide at least one of code or path") + + # TODO add a better warning than the traceback! + with open(path, 'rb') as f: + code = f.read() + + if project is None: + # Load the Python grammar of the current interpreter. + project = get_default_project(None if self.path is None else self.path.parent) + + self._inference_state = InferenceState( + project, environment=environment, script_path=self.path + ) + debug.speed('init') + self._module_node, code = self._inference_state.parse_and_get_code( + code=code, + path=self.path, + use_latest_grammar=path and path.suffix == '.pyi', + cache=False, # No disk cache, because the current script often changes. + diff_cache=settings.fast_parser, + cache_path=settings.cache_directory, + ) + debug.speed('parsed') + self._code_lines = parso.split_lines(code, keepends=True) + self._code = code + + cache.clear_time_caches() + debug.reset_time() + + # Cache the module, this is mostly useful for testing, since this shouldn't + # be called multiple times. + @cache.memoize_method + def _get_module(self): + names = None + is_package = False + if self.path is not None: + import_names, is_p = transform_path_to_dotted( + self._inference_state.get_sys_path(add_parent_paths=False), + self.path + ) + if import_names is not None: + names = import_names + is_package = is_p + + if self.path is None: + file_io = None + else: + file_io = KnownContentFileIO(self.path, self._code) + if self.path is not None and self.path.suffix == '.pyi': + # We are in a stub file. Try to load the stub properly. + stub_module = load_proper_stub_module( + self._inference_state, + self._inference_state.latest_grammar, + file_io, + names, + self._module_node + ) + if stub_module is not None: + return stub_module + + if names is None: + names = ('__main__',) + + module = ModuleValue( + self._inference_state, self._module_node, + file_io=file_io, + string_names=names, + code_lines=self._code_lines, + is_package=is_package, + ) + if names[0] not in ('builtins', 'typing'): + # These modules are essential for Jedi, so don't overwrite them. + self._inference_state.module_cache.add(names, ValueSet([module])) + return module + + def _get_module_context(self): + return self._get_module().as_context() + + def __repr__(self): + return '<%s: %s %r>' % ( + self.__class__.__name__, + repr(self._orig_path), + self._inference_state.environment, + ) + + @validate_line_column + def complete(self, line=None, column=None, *, fuzzy=False): + """ + Completes objects under the cursor. + + Those objects contain information about the completions, more than just + names. + + :param fuzzy: Default False. Will return fuzzy completions, which means + that e.g. ``ooa`` will match ``foobar``. + :return: Completion objects, sorted by name. Normal names appear + before "private" names that start with ``_`` and those appear + before magic methods and name mangled names that start with ``__``. + :rtype: list of :class:`.Completion` + """ + self._inference_state.reset_recursion_limitations() + with debug.increase_indent_cm('complete'): + completion = Completion( + self._inference_state, self._get_module_context(), self._code_lines, + (line, column), self.get_signatures, fuzzy=fuzzy, + ) + return completion.complete() + + @validate_line_column + def infer(self, line=None, column=None, *, only_stubs=False, prefer_stubs=False): + """ + Return the definitions of under the cursor. It is basically a wrapper + around Jedi's type inference. + + This method follows complicated paths and returns the end, not the + first definition. The big difference between :meth:`goto` and + :meth:`infer` is that :meth:`goto` doesn't + follow imports and statements. Multiple objects may be returned, + because depending on an option you can have two different versions of a + function. + + :param only_stubs: Only return stubs for this method. + :param prefer_stubs: Prefer stubs to Python objects for this method. + :rtype: list of :class:`.Name` + """ + self._inference_state.reset_recursion_limitations() + pos = line, column + leaf = self._module_node.get_name_of_position(pos) + if leaf is None: + leaf = self._module_node.get_leaf_for_position(pos) + if leaf is None or leaf.type == 'string': + return [] + if leaf.end_pos == (line, column) and leaf.type == 'operator': + next_ = leaf.get_next_leaf() + if next_.start_pos == leaf.end_pos \ + and next_.type in ('number', 'string', 'keyword'): + leaf = next_ + + context = self._get_module_context().create_context(leaf) + + values = helpers.infer(self._inference_state, context, leaf) + values = convert_values( + values, + only_stubs=only_stubs, + prefer_stubs=prefer_stubs, + ) + + defs = [classes.Name(self._inference_state, c.name) for c in values] + # The additional set here allows the definitions to become unique in an + # API sense. In the internals we want to separate more things than in + # the API. + return helpers.sorted_definitions(set(defs)) + + @validate_line_column + def goto(self, line=None, column=None, *, follow_imports=False, follow_builtin_imports=False, + only_stubs=False, prefer_stubs=False): + """ + Goes to the name that defined the object under the cursor. Optionally + you can follow imports. + Multiple objects may be returned, depending on an if you can have two + different versions of a function. + + :param follow_imports: The method will follow imports. + :param follow_builtin_imports: If ``follow_imports`` is True will try + to look up names in builtins (i.e. compiled or extension modules). + :param only_stubs: Only return stubs for this method. + :param prefer_stubs: Prefer stubs to Python objects for this method. + :rtype: list of :class:`.Name` + """ + self._inference_state.reset_recursion_limitations() + tree_name = self._module_node.get_name_of_position((line, column)) + if tree_name is None: + # Without a name we really just want to jump to the result e.g. + # executed by `foo()`, if we the cursor is after `)`. + return self.infer(line, column, only_stubs=only_stubs, prefer_stubs=prefer_stubs) + name = self._get_module_context().create_name(tree_name) + + # Make it possible to goto the super class function/attribute + # definitions, when they are overwritten. + names = [] + if name.tree_name.is_definition() and name.parent_context.is_class(): + class_node = name.parent_context.tree_node + class_value = self._get_module_context().create_value(class_node) + mro = class_value.py__mro__() + next(mro) # Ignore the first entry, because it's the class itself. + for cls in mro: + names = cls.goto(tree_name.value) + if names: + break + + if not names: + names = list(name.goto()) + + if follow_imports: + names = helpers.filter_follow_imports(names, follow_builtin_imports) + names = convert_names( + names, + only_stubs=only_stubs, + prefer_stubs=prefer_stubs, + ) + + defs = [classes.Name(self._inference_state, d) for d in set(names)] + # Avoid duplicates + return list(set(helpers.sorted_definitions(defs))) + + def search(self, string, *, all_scopes=False): + """ + Searches a name in the current file. For a description of how the + search string should look like, please have a look at + :meth:`.Project.search`. + + :param bool all_scopes: Default False; searches not only for + definitions on the top level of a module level, but also in + functions and classes. + :yields: :class:`.Name` + """ + return self._search_func(string, all_scopes=all_scopes) + + @to_list + def _search_func(self, string, all_scopes=False, complete=False, fuzzy=False): + names = self._names(all_scopes=all_scopes) + wanted_type, wanted_names = helpers.split_search_string(string) + return search_in_module( + self._inference_state, + self._get_module_context(), + names=names, + wanted_type=wanted_type, + wanted_names=wanted_names, + complete=complete, + fuzzy=fuzzy, + ) + + def complete_search(self, string, **kwargs): + """ + Like :meth:`.Script.search`, but completes that string. If you want to + have all possible definitions in a file you can also provide an empty + string. + + :param bool all_scopes: Default False; searches not only for + definitions on the top level of a module level, but also in + functions and classes. + :param fuzzy: Default False. Will return fuzzy completions, which means + that e.g. ``ooa`` will match ``foobar``. + :yields: :class:`.Completion` + """ + return self._search_func(string, complete=True, **kwargs) + + @validate_line_column + def help(self, line=None, column=None): + """ + Used to display a help window to users. Uses :meth:`.Script.goto` and + returns additional definitions for keywords and operators. + + Typically you will want to display :meth:`.BaseName.docstring` to the + user for all the returned definitions. + + The additional definitions are ``Name(...).type == 'keyword'``. + These definitions do not have a lot of value apart from their docstring + attribute, which contains the output of Python's :func:`help` function. + + :rtype: list of :class:`.Name` + """ + self._inference_state.reset_recursion_limitations() + definitions = self.goto(line, column, follow_imports=True) + if definitions: + return definitions + leaf = self._module_node.get_leaf_for_position((line, column)) + + if leaf is not None and leaf.end_pos == (line, column) and leaf.type == 'newline': + next_ = leaf.get_next_leaf() + if next_ is not None and next_.start_pos == leaf.end_pos: + leaf = next_ + + if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'): + def need_pydoc(): + if leaf.value in ('(', ')', '[', ']'): + if leaf.parent.type == 'trailer': + return False + if leaf.parent.type == 'atom': + return False + grammar = self._inference_state.grammar + # This parso stuff is not public, but since I control it, this + # is fine :-) ~dave + reserved = grammar._pgen_grammar.reserved_syntax_strings.keys() + return leaf.value in reserved + + if need_pydoc(): + name = KeywordName(self._inference_state, leaf.value) + return [classes.Name(self._inference_state, name)] + return [] + + @validate_line_column + def get_references(self, line=None, column=None, **kwargs): + """ + Lists all references of a variable in a project. Since this can be + quite hard to do for Jedi, if it is too complicated, Jedi will stop + searching. + + :param include_builtins: Default ``True``. If ``False``, checks if a definition + is a builtin (e.g. ``sys``) and in that case does not return it. + :param scope: Default ``'project'``. If ``'file'``, include references in + the current module only. + :rtype: list of :class:`.Name` + """ + self._inference_state.reset_recursion_limitations() + + def _references(include_builtins=True, scope='project'): + if scope not in ('project', 'file'): + raise ValueError('Only the scopes "file" and "project" are allowed') + tree_name = self._module_node.get_name_of_position((line, column)) + if tree_name is None: + # Must be syntax + return [] + + names = find_references(self._get_module_context(), tree_name, scope == 'file') + + definitions = [classes.Name(self._inference_state, n) for n in names] + if not include_builtins or scope == 'file': + definitions = [d for d in definitions if not d.in_builtin_module()] + return helpers.sorted_definitions(definitions) + return _references(**kwargs) + + @validate_line_column + def get_signatures(self, line=None, column=None): + """ + Return the function object of the call under the cursor. + + E.g. if the cursor is here:: + + abs(# <-- cursor is here + + This would return the ``abs`` function. On the other hand:: + + abs()# <-- cursor is here + + This would return an empty list.. + + :rtype: list of :class:`.Signature` + """ + self._inference_state.reset_recursion_limitations() + pos = line, column + call_details = helpers.get_signature_details(self._module_node, pos) + if call_details is None: + return [] + + context = self._get_module_context().create_context(call_details.bracket_leaf) + definitions = helpers.cache_signatures( + self._inference_state, + context, + call_details.bracket_leaf, + self._code_lines, + pos + ) + debug.speed('func_call followed') + + # TODO here we use stubs instead of the actual values. We should use + # the signatures from stubs, but the actual values, probably?! + return [classes.Signature(self._inference_state, signature, call_details) + for signature in definitions.get_signatures()] + + @validate_line_column + def get_context(self, line=None, column=None): + """ + Returns the scope context under the cursor. This basically means the + function, class or module where the cursor is at. + + :rtype: :class:`.Name` + """ + pos = (line, column) + leaf = self._module_node.get_leaf_for_position(pos, include_prefixes=True) + if leaf.start_pos > pos or leaf.type == 'endmarker': + previous_leaf = leaf.get_previous_leaf() + if previous_leaf is not None: + leaf = previous_leaf + + module_context = self._get_module_context() + + n = tree.search_ancestor(leaf, 'funcdef', 'classdef') + if n is not None and n.start_pos < pos <= n.children[-1].start_pos: + # This is a bit of a special case. The context of a function/class + # name/param/keyword is always it's parent context, not the + # function itself. Catch all the cases here where we are before the + # suite object, but still in the function. + context = module_context.create_value(n).as_context() + else: + context = module_context.create_context(leaf) + + while context.name is None: + context = context.parent_context # comprehensions + + definition = classes.Name(self._inference_state, context.name) + while definition.type != 'module': + name = definition._name # TODO private access + tree_name = name.tree_name + if tree_name is not None: # Happens with lambdas. + scope = tree_name.get_definition() + if scope.start_pos[1] < column: + break + definition = definition.parent() + return definition + + def _analysis(self): + self._inference_state.is_analysis = True + self._inference_state.analysis_modules = [self._module_node] + module = self._get_module_context() + try: + for node in get_executable_nodes(self._module_node): + context = module.create_context(node) + if node.type in ('funcdef', 'classdef'): + # Resolve the decorators. + tree_name_to_values(self._inference_state, context, node.children[1]) + elif isinstance(node, tree.Import): + import_names = set(node.get_defined_names()) + if node.is_nested(): + import_names |= set(path[-1] for path in node.get_paths()) + for n in import_names: + imports.infer_import(context, n) + elif node.type == 'expr_stmt': + types = context.infer_node(node) + for testlist in node.children[:-1:2]: + # Iterate tuples. + unpack_tuple_to_dict(context, types, testlist) + else: + if node.type == 'name': + defs = self._inference_state.infer(context, node) + else: + defs = infer_call_of_leaf(context, node) + try_iter_content(defs) + self._inference_state.reset_recursion_limitations() + + ana = [a for a in self._inference_state.analysis if self.path == a.path] + return sorted(set(ana), key=lambda x: x.line) + finally: + self._inference_state.is_analysis = False + + def get_names(self, **kwargs): + """ + Returns names defined in the current file. + + :param all_scopes: If True lists the names of all scopes instead of + only the module namespace. + :param definitions: If True lists the names that have been defined by a + class, function or a statement (``a = b`` returns ``a``). + :param references: If True lists all the names that are not listed by + ``definitions=True``. E.g. ``a = b`` returns ``b``. + :rtype: list of :class:`.Name` + """ + names = self._names(**kwargs) + return [classes.Name(self._inference_state, n) for n in names] + + def get_syntax_errors(self): + """ + Lists all syntax errors in the current file. + + :rtype: list of :class:`.SyntaxError` + """ + return parso_to_jedi_errors(self._inference_state.grammar, self._module_node) + + def _names(self, all_scopes=False, definitions=True, references=False): + self._inference_state.reset_recursion_limitations() + # Set line/column to a random position, because they don't matter. + module_context = self._get_module_context() + defs = [ + module_context.create_name(name) + for name in helpers.get_module_names( + self._module_node, + all_scopes=all_scopes, + definitions=definitions, + references=references, + ) + ] + return sorted(defs, key=lambda x: x.start_pos) + + def rename(self, line=None, column=None, *, new_name): + """ + Renames all references of the variable under the cursor. + + :param new_name: The variable under the cursor will be renamed to this + string. + :raises: :exc:`.RefactoringError` + :rtype: :class:`.Refactoring` + """ + definitions = self.get_references(line, column, include_builtins=False) + return refactoring.rename(self._inference_state, definitions, new_name) + + @validate_line_column + def extract_variable(self, line, column, *, new_name, until_line=None, until_column=None): + """ + Moves an expression to a new statement. + + For example if you have the cursor on ``foo`` and provide a + ``new_name`` called ``bar``:: + + foo = 3.1 + x = int(foo + 1) + + the code above will become:: + + foo = 3.1 + bar = foo + 1 + x = int(bar) + + :param new_name: The expression under the cursor will be renamed to + this string. + :param int until_line: The the selection range ends at this line, when + omitted, Jedi will be clever and try to define the range itself. + :param int until_column: The the selection range ends at this column, when + omitted, Jedi will be clever and try to define the range itself. + :raises: :exc:`.RefactoringError` + :rtype: :class:`.Refactoring` + """ + if until_line is None and until_column is None: + until_pos = None + else: + if until_line is None: + until_line = line + if until_column is None: + until_column = len(self._code_lines[until_line - 1]) + until_pos = until_line, until_column + return extract_variable( + self._inference_state, self.path, self._module_node, + new_name, (line, column), until_pos + ) + + @validate_line_column + def extract_function(self, line, column, *, new_name, until_line=None, until_column=None): + """ + Moves an expression to a new function. + + For example if you have the cursor on ``foo`` and provide a + ``new_name`` called ``bar``:: + + global_var = 3 + + def x(): + foo = 3.1 + x = int(foo + 1 + global_var) + + the code above will become:: + + global_var = 3 + + def bar(foo): + return int(foo + 1 + global_var) + + def x(): + foo = 3.1 + x = bar(foo) + + :param new_name: The expression under the cursor will be replaced with + a function with this name. + :param int until_line: The the selection range ends at this line, when + omitted, Jedi will be clever and try to define the range itself. + :param int until_column: The the selection range ends at this column, when + omitted, Jedi will be clever and try to define the range itself. + :raises: :exc:`.RefactoringError` + :rtype: :class:`.Refactoring` + """ + if until_line is None and until_column is None: + until_pos = None + else: + if until_line is None: + until_line = line + if until_column is None: + until_column = len(self._code_lines[until_line - 1]) + until_pos = until_line, until_column + return extract_function( + self._inference_state, self.path, self._get_module_context(), + new_name, (line, column), until_pos + ) + + def inline(self, line=None, column=None): + """ + Inlines a variable under the cursor. This is basically the opposite of + extracting a variable. For example with the cursor on bar:: + + foo = 3.1 + bar = foo + 1 + x = int(bar) + + the code above will become:: + + foo = 3.1 + x = int(foo + 1) + + :raises: :exc:`.RefactoringError` + :rtype: :class:`.Refactoring` + """ + names = [d._name for d in self.get_references(line, column, include_builtins=True)] + return refactoring.inline(self._inference_state, names) + + +class Interpreter(Script): + """ + Jedi's API for Python REPLs. + + Implements all of the methods that are present in :class:`.Script` as well. + + In addition to completions that normal REPL completion does like + ``str.upper``, Jedi also supports code completion based on static code + analysis. For example Jedi will complete ``str().upper``. + + >>> from os.path import join + >>> namespace = locals() + >>> script = Interpreter('join("").up', [namespace]) + >>> print(script.complete()[0].name) + upper + + All keyword arguments are same as the arguments for :class:`.Script`. + + :param str code: Code to parse. + :type namespaces: typing.List[dict] + :param namespaces: A list of namespace dictionaries such as the one + returned by :func:`globals` and :func:`locals`. + """ + + def __init__(self, code, namespaces, *, project=None, **kwds): + try: + namespaces = [dict(n) for n in namespaces] + except Exception: + raise TypeError("namespaces must be a non-empty list of dicts.") + + environment = kwds.get('environment', None) + if environment is None: + environment = InterpreterEnvironment() + else: + if not isinstance(environment, InterpreterEnvironment): + raise TypeError("The environment needs to be an InterpreterEnvironment subclass.") + + if project is None: + project = Project(Path.cwd()) + + super().__init__(code, environment=environment, project=project, **kwds) + + self.namespaces = namespaces + self._inference_state.allow_unsafe_executions = \ + settings.allow_unsafe_interpreter_executions + # Dynamic params search is important when we work on functions that are + # called by other pieces of code. However for interpreter completions + # this is not important at all, because the current code is always new + # and will never be called by something. + # Also sometimes this logic goes a bit too far like in + # https://github.com/ipython/ipython/issues/13866, where it takes + # seconds to do a simple completion. + self._inference_state.do_dynamic_params_search = False + + @cache.memoize_method + def _get_module_context(self): + if self.path is None: + file_io = None + else: + file_io = KnownContentFileIO(self.path, self._code) + tree_module_value = ModuleValue( + self._inference_state, self._module_node, + file_io=file_io, + string_names=('__main__',), + code_lines=self._code_lines, + ) + return interpreter.MixedModuleContext( + tree_module_value, + self.namespaces, + ) + + +def preload_module(*modules): + """ + Preloading modules tells Jedi to load a module now, instead of lazy parsing + of modules. This can be useful for IDEs, to control which modules to load + on startup. + + :param modules: different module names, list of string. + """ + for m in modules: + s = "import %s as x; x." % m + Script(s).complete(1, len(s)) + + +def set_debug_function(func_cb=debug.print_to_stdout, warnings=True, + notices=True, speed=True): + """ + Define a callback debug function to get all the debug messages. + + If you don't specify any arguments, debug messages will be printed to stdout. + + :param func_cb: The callback function for debug messages. + """ + debug.debug_function = func_cb + debug.enable_warning = warnings + debug.enable_notice = notices + debug.enable_speed = speed diff --git a/lib/python3.10/site-packages/jedi/api/classes.py b/lib/python3.10/site-packages/jedi/api/classes.py new file mode 100644 index 0000000000000000000000000000000000000000..7054788e3fe19f3892513d1430a2fb79808d9eb8 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/classes.py @@ -0,0 +1,895 @@ +""" +There are a couple of classes documented in here: + +- :class:`.BaseName` as an abstact base class for almost everything. +- :class:`.Name` used in a lot of places +- :class:`.Completion` for completions +- :class:`.BaseSignature` as a base class for signatures +- :class:`.Signature` for :meth:`.Script.get_signatures` only +- :class:`.ParamName` used for parameters of signatures +- :class:`.Refactoring` for refactorings +- :class:`.SyntaxError` for :meth:`.Script.get_syntax_errors` only + +These classes are the much biggest part of the API, because they contain +the interesting information about all operations. +""" +import re +from pathlib import Path +from typing import Optional + +from parso.tree import search_ancestor + +from jedi import settings +from jedi import debug +from jedi.inference.utils import unite +from jedi.cache import memoize_method +from jedi.inference.compiled.mixed import MixedName +from jedi.inference.names import ImportName, SubModuleName +from jedi.inference.gradual.stub_value import StubModuleValue +from jedi.inference.gradual.conversion import convert_names, convert_values +from jedi.inference.base_value import ValueSet, HasNoContext +from jedi.api.keywords import KeywordName +from jedi.api import completion_cache +from jedi.api.helpers import filter_follow_imports + + +def _sort_names_by_start_pos(names): + return sorted(names, key=lambda s: s.start_pos or (0, 0)) + + +def defined_names(inference_state, value): + """ + List sub-definitions (e.g., methods in class). + + :type scope: Scope + :rtype: list of Name + """ + try: + context = value.as_context() + except HasNoContext: + return [] + filter = next(context.get_filters()) + names = [name for name in filter.values()] + return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)] + + +def _values_to_definitions(values): + return [Name(c.inference_state, c.name) for c in values] + + +class BaseName: + """ + The base class for all definitions, completions and signatures. + """ + _mapping = { + 'posixpath': 'os.path', + 'riscospath': 'os.path', + 'ntpath': 'os.path', + 'os2emxpath': 'os.path', + 'macpath': 'os.path', + 'genericpath': 'os.path', + 'posix': 'os', + '_io': 'io', + '_functools': 'functools', + '_collections': 'collections', + '_socket': 'socket', + '_sqlite3': 'sqlite3', + } + + _tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in { + 'argparse._ActionsContainer': 'argparse.ArgumentParser', + }.items()) + + def __init__(self, inference_state, name): + self._inference_state = inference_state + self._name = name + """ + An instance of :class:`parso.python.tree.Name` subclass. + """ + self.is_keyword = isinstance(self._name, KeywordName) + + @memoize_method + def _get_module_context(self): + # This can take a while to complete, because in the worst case of + # imports (consider `import a` completions), we need to load all + # modules starting with a first. + return self._name.get_root_context() + + @property + def module_path(self) -> Optional[Path]: + """ + Shows the file path of a module. e.g. ``/usr/lib/python3.9/os.py`` + """ + module = self._get_module_context() + if module.is_stub() or not module.is_compiled(): + # Compiled modules should not return a module path even if they + # have one. + path: Optional[Path] = self._get_module_context().py__file__() + return path + + return None + + @property + def name(self): + """ + Name of variable/function/class/module. + + For example, for ``x = None`` it returns ``'x'``. + + :rtype: str or None + """ + return self._name.get_public_name() + + @property + def type(self): + """ + The type of the definition. + + Here is an example of the value of this attribute. Let's consider + the following source. As what is in ``variable`` is unambiguous + to Jedi, :meth:`jedi.Script.infer` should return a list of + definition for ``sys``, ``f``, ``C`` and ``x``. + + >>> from jedi import Script + >>> source = ''' + ... import keyword + ... + ... class C: + ... pass + ... + ... class D: + ... pass + ... + ... x = D() + ... + ... def f(): + ... pass + ... + ... for variable in [keyword, f, C, x]: + ... variable''' + + >>> script = Script(source) + >>> defs = script.infer() + + Before showing what is in ``defs``, let's sort it by :attr:`line` + so that it is easy to relate the result to the source code. + + >>> defs = sorted(defs, key=lambda d: d.line) + >>> print(defs) # doctest: +NORMALIZE_WHITESPACE + [, + , + , + ] + + Finally, here is what you can get from :attr:`type`: + + >>> defs = [d.type for d in defs] + >>> defs[0] + 'module' + >>> defs[1] + 'class' + >>> defs[2] + 'instance' + >>> defs[3] + 'function' + + Valid values for type are ``module``, ``class``, ``instance``, ``function``, + ``param``, ``path``, ``keyword``, ``property`` and ``statement``. + + """ + tree_name = self._name.tree_name + resolve = False + if tree_name is not None: + # TODO move this to their respective names. + definition = tree_name.get_definition() + if definition is not None and definition.type == 'import_from' and \ + tree_name.is_definition(): + resolve = True + + if isinstance(self._name, SubModuleName) or resolve: + for value in self._name.infer(): + return value.api_type + return self._name.api_type + + @property + def module_name(self): + """ + The module name, a bit similar to what ``__name__`` is in a random + Python module. + + >>> from jedi import Script + >>> source = 'import json' + >>> script = Script(source, path='example.py') + >>> d = script.infer()[0] + >>> print(d.module_name) # doctest: +ELLIPSIS + json + """ + return self._get_module_context().py__name__() + + def in_builtin_module(self): + """ + Returns True, if this is a builtin module. + """ + value = self._get_module_context().get_value() + if isinstance(value, StubModuleValue): + return any(v.is_compiled() for v in value.non_stub_value_set) + return value.is_compiled() + + @property + def line(self): + """The line where the definition occurs (starting with 1).""" + start_pos = self._name.start_pos + if start_pos is None: + return None + return start_pos[0] + + @property + def column(self): + """The column where the definition occurs (starting with 0).""" + start_pos = self._name.start_pos + if start_pos is None: + return None + return start_pos[1] + + def get_definition_start_position(self): + """ + The (row, column) of the start of the definition range. Rows start with + 1, columns start with 0. + + :rtype: Optional[Tuple[int, int]] + """ + if self._name.tree_name is None: + return None + definition = self._name.tree_name.get_definition() + if definition is None: + return self._name.start_pos + return definition.start_pos + + def get_definition_end_position(self): + """ + The (row, column) of the end of the definition range. Rows start with + 1, columns start with 0. + + :rtype: Optional[Tuple[int, int]] + """ + if self._name.tree_name is None: + return None + definition = self._name.tree_name.get_definition() + if definition is None: + return self._name.tree_name.end_pos + if self.type in ("function", "class"): + last_leaf = definition.get_last_leaf() + if last_leaf.type == "newline": + return last_leaf.get_previous_leaf().end_pos + return last_leaf.end_pos + return definition.end_pos + + def docstring(self, raw=False, fast=True): + r""" + Return a document string for this completion object. + + Example: + + >>> from jedi import Script + >>> source = '''\ + ... def f(a, b=1): + ... "Document for function f." + ... ''' + >>> script = Script(source, path='example.py') + >>> doc = script.infer(1, len('def f'))[0].docstring() + >>> print(doc) + f(a, b=1) + + Document for function f. + + Notice that useful extra information is added to the actual + docstring, e.g. function signatures are prepended to their docstrings. + If you need the actual docstring, use ``raw=True`` instead. + + >>> print(script.infer(1, len('def f'))[0].docstring(raw=True)) + Document for function f. + + :param fast: Don't follow imports that are only one level deep like + ``import foo``, but follow ``from foo import bar``. This makes + sense for speed reasons. Completing `import a` is slow if you use + the ``foo.docstring(fast=False)`` on every object, because it + parses all libraries starting with ``a``. + """ + if isinstance(self._name, ImportName) and fast: + return '' + doc = self._get_docstring() + if raw: + return doc + + signature_text = self._get_docstring_signature() + if signature_text and doc: + return signature_text + '\n\n' + doc + else: + return signature_text + doc + + def _get_docstring(self): + return self._name.py__doc__() + + def _get_docstring_signature(self): + return '\n'.join( + signature.to_string() + for signature in self._get_signatures(for_docstring=True) + ) + + @property + def description(self): + """ + A description of the :class:`.Name` object, which is heavily used + in testing. e.g. for ``isinstance`` it returns ``def isinstance``. + + Example: + + >>> from jedi import Script + >>> source = ''' + ... def f(): + ... pass + ... + ... class C: + ... pass + ... + ... variable = f if random.choice([0,1]) else C''' + >>> script = Script(source) # line is maximum by default + >>> defs = script.infer(column=3) + >>> defs = sorted(defs, key=lambda d: d.line) + >>> print(defs) # doctest: +NORMALIZE_WHITESPACE + [, + ] + >>> str(defs[0].description) + 'def f' + >>> str(defs[1].description) + 'class C' + + """ + typ = self.type + tree_name = self._name.tree_name + if typ == 'param': + return typ + ' ' + self._name.to_string() + if typ in ('function', 'class', 'module', 'instance') or tree_name is None: + if typ == 'function': + # For the description we want a short and a pythonic way. + typ = 'def' + return typ + ' ' + self._name.get_public_name() + + definition = tree_name.get_definition(include_setitem=True) or tree_name + # Remove the prefix, because that's not what we want for get_code + # here. + txt = definition.get_code(include_prefix=False) + # Delete comments: + txt = re.sub(r'#[^\n]+\n', ' ', txt) + # Delete multi spaces/newlines + txt = re.sub(r'\s+', ' ', txt).strip() + return txt + + @property + def full_name(self): + """ + Dot-separated path of this object. + + It is in the form of ``[.[...]][.]``. + It is useful when you want to look up Python manual of the + object at hand. + + Example: + + >>> from jedi import Script + >>> source = ''' + ... import os + ... os.path.join''' + >>> script = Script(source, path='example.py') + >>> print(script.infer(3, len('os.path.join'))[0].full_name) + os.path.join + + Notice that it returns ``'os.path.join'`` instead of (for example) + ``'posixpath.join'``. This is not correct, since the modules name would + be `````. However most users find the latter + more practical. + """ + if not self._name.is_value_name: + return None + + names = self._name.get_qualified_names(include_module_names=True) + if names is None: + return None + + names = list(names) + try: + names[0] = self._mapping[names[0]] + except KeyError: + pass + + return '.'.join(names) + + def is_stub(self): + """ + Returns True if the current name is defined in a stub file. + """ + if not self._name.is_value_name: + return False + + return self._name.get_root_context().is_stub() + + def is_side_effect(self): + """ + Checks if a name is defined as ``self.foo = 3``. In case of self, this + function would return False, for foo it would return True. + """ + tree_name = self._name.tree_name + if tree_name is None: + return False + return tree_name.is_definition() and tree_name.parent.type == 'trailer' + + @debug.increase_indent_cm('goto on name') + def goto(self, *, follow_imports=False, follow_builtin_imports=False, + only_stubs=False, prefer_stubs=False): + + """ + Like :meth:`.Script.goto` (also supports the same params), but does it + for the current name. This is typically useful if you are using + something like :meth:`.Script.get_names()`. + + :param follow_imports: The goto call will follow imports. + :param follow_builtin_imports: If follow_imports is True will try to + look up names in builtins (i.e. compiled or extension modules). + :param only_stubs: Only return stubs for this goto call. + :param prefer_stubs: Prefer stubs to Python objects for this goto call. + :rtype: list of :class:`Name` + """ + if not self._name.is_value_name: + return [] + + names = self._name.goto() + if follow_imports: + names = filter_follow_imports(names, follow_builtin_imports) + names = convert_names( + names, + only_stubs=only_stubs, + prefer_stubs=prefer_stubs, + ) + return [self if n == self._name else Name(self._inference_state, n) + for n in names] + + @debug.increase_indent_cm('infer on name') + def infer(self, *, only_stubs=False, prefer_stubs=False): + """ + Like :meth:`.Script.infer`, it can be useful to understand which type + the current name has. + + Return the actual definitions. I strongly recommend not using it for + your completions, because it might slow down |jedi|. If you want to + read only a few objects (<=20), it might be useful, especially to get + the original docstrings. The basic problem of this function is that it + follows all results. This means with 1000 completions (e.g. numpy), + it's just very, very slow. + + :param only_stubs: Only return stubs for this goto call. + :param prefer_stubs: Prefer stubs to Python objects for this type + inference call. + :rtype: list of :class:`Name` + """ + assert not (only_stubs and prefer_stubs) + + if not self._name.is_value_name: + return [] + + # First we need to make sure that we have stub names (if possible) that + # we can follow. If we don't do that, we can end up with the inferred + # results of Python objects instead of stubs. + names = convert_names([self._name], prefer_stubs=True) + values = convert_values( + ValueSet.from_sets(n.infer() for n in names), + only_stubs=only_stubs, + prefer_stubs=prefer_stubs, + ) + resulting_names = [c.name for c in values] + return [self if n == self._name else Name(self._inference_state, n) + for n in resulting_names] + + def parent(self): + """ + Returns the parent scope of this identifier. + + :rtype: Name + """ + if not self._name.is_value_name: + return None + + if self.type in ('function', 'class', 'param') and self._name.tree_name is not None: + # Since the parent_context doesn't really match what the user + # thinks of that the parent is here, we do these cases separately. + # The reason for this is the following: + # - class: Nested classes parent_context is always the + # parent_context of the most outer one. + # - function: Functions in classes have the module as + # parent_context. + # - param: The parent_context of a param is not its function but + # e.g. the outer class or module. + cls_or_func_node = self._name.tree_name.get_definition() + parent = search_ancestor(cls_or_func_node, 'funcdef', 'classdef', 'file_input') + context = self._get_module_context().create_value(parent).as_context() + else: + context = self._name.parent_context + + if context is None: + return None + while context.name is None: + # Happens for comprehension contexts + context = context.parent_context + + return Name(self._inference_state, context.name) + + def __repr__(self): + return "<%s %sname=%r, description=%r>" % ( + self.__class__.__name__, + 'full_' if self.full_name else '', + self.full_name or self.name, + self.description, + ) + + def get_line_code(self, before=0, after=0): + """ + Returns the line of code where this object was defined. + + :param before: Add n lines before the current line to the output. + :param after: Add n lines after the current line to the output. + + :return str: Returns the line(s) of code or an empty string if it's a + builtin. + """ + if not self._name.is_value_name: + return '' + + lines = self._name.get_root_context().code_lines + if lines is None: + # Probably a builtin module, just ignore in that case. + return '' + + index = self._name.start_pos[0] - 1 + start_index = max(index - before, 0) + return ''.join(lines[start_index:index + after + 1]) + + def _get_signatures(self, for_docstring=False): + if self._name.api_type == 'property': + return [] + if for_docstring and self._name.api_type == 'statement' and not self.is_stub(): + # For docstrings we don't resolve signatures if they are simple + # statements and not stubs. This is a speed optimization. + return [] + + if isinstance(self._name, MixedName): + # While this would eventually happen anyway, it's basically just a + # shortcut to not infer anything tree related, because it's really + # not necessary. + return self._name.infer_compiled_value().get_signatures() + + names = convert_names([self._name], prefer_stubs=True) + return [sig for name in names for sig in name.infer().get_signatures()] + + def get_signatures(self): + """ + Returns all potential signatures for a function or a class. Multiple + signatures are typical if you use Python stubs with ``@overload``. + + :rtype: list of :class:`BaseSignature` + """ + return [ + BaseSignature(self._inference_state, s) + for s in self._get_signatures() + ] + + def execute(self): + """ + Uses type inference to "execute" this identifier and returns the + executed objects. + + :rtype: list of :class:`Name` + """ + return _values_to_definitions(self._name.infer().execute_with_values()) + + def get_type_hint(self): + """ + Returns type hints like ``Iterable[int]`` or ``Union[int, str]``. + + This method might be quite slow, especially for functions. The problem + is finding executions for those functions to return something like + ``Callable[[int, str], str]``. + + :rtype: str + """ + return self._name.infer().get_type_hint() + + +class Completion(BaseName): + """ + ``Completion`` objects are returned from :meth:`.Script.complete`. They + provide additional information about a completion. + """ + def __init__(self, inference_state, name, stack, like_name_length, + is_fuzzy, cached_name=None): + super().__init__(inference_state, name) + + self._like_name_length = like_name_length + self._stack = stack + self._is_fuzzy = is_fuzzy + self._cached_name = cached_name + + # Completion objects with the same Completion name (which means + # duplicate items in the completion) + self._same_name_completions = [] + + def _complete(self, like_name): + append = '' + if settings.add_bracket_after_function \ + and self.type == 'function': + append = '(' + + name = self._name.get_public_name() + if like_name: + name = name[self._like_name_length:] + return name + append + + @property + def complete(self): + """ + Only works with non-fuzzy completions. Returns None if fuzzy + completions are used. + + Return the rest of the word, e.g. completing ``isinstance``:: + + isinstan# <-- Cursor is here + + would return the string 'ce'. It also adds additional stuff, depending + on your ``settings.py``. + + Assuming the following function definition:: + + def foo(param=0): + pass + + completing ``foo(par`` would give a ``Completion`` which ``complete`` + would be ``am=``. + """ + if self._is_fuzzy: + return None + return self._complete(True) + + @property + def name_with_symbols(self): + """ + Similar to :attr:`.name`, but like :attr:`.name` returns also the + symbols, for example assuming the following function definition:: + + def foo(param=0): + pass + + completing ``foo(`` would give a ``Completion`` which + ``name_with_symbols`` would be "param=". + + """ + return self._complete(False) + + def docstring(self, raw=False, fast=True): + """ + Documented under :meth:`BaseName.docstring`. + """ + if self._like_name_length >= 3: + # In this case we can just resolve the like name, because we + # wouldn't load like > 100 Python modules anymore. + fast = False + + return super().docstring(raw=raw, fast=fast) + + def _get_docstring(self): + if self._cached_name is not None: + return completion_cache.get_docstring( + self._cached_name, + self._name.get_public_name(), + lambda: self._get_cache() + ) + return super()._get_docstring() + + def _get_docstring_signature(self): + if self._cached_name is not None: + return completion_cache.get_docstring_signature( + self._cached_name, + self._name.get_public_name(), + lambda: self._get_cache() + ) + return super()._get_docstring_signature() + + def _get_cache(self): + return ( + super().type, + super()._get_docstring_signature(), + super()._get_docstring(), + ) + + @property + def type(self): + """ + Documented under :meth:`BaseName.type`. + """ + # Purely a speed optimization. + if self._cached_name is not None: + return completion_cache.get_type( + self._cached_name, + self._name.get_public_name(), + lambda: self._get_cache() + ) + + return super().type + + def get_completion_prefix_length(self): + """ + Returns the length of the prefix being completed. + For example, completing ``isinstance``:: + + isinstan# <-- Cursor is here + + would return 8, because len('isinstan') == 8. + + Assuming the following function definition:: + + def foo(param=0): + pass + + completing ``foo(par`` would return 3. + """ + return self._like_name_length + + def __repr__(self): + return '<%s: %s>' % (type(self).__name__, self._name.get_public_name()) + + +class Name(BaseName): + """ + *Name* objects are returned from many different APIs including + :meth:`.Script.goto` or :meth:`.Script.infer`. + """ + def __init__(self, inference_state, definition): + super().__init__(inference_state, definition) + + @memoize_method + def defined_names(self): + """ + List sub-definitions (e.g., methods in class). + + :rtype: list of :class:`Name` + """ + defs = self._name.infer() + return sorted( + unite(defined_names(self._inference_state, d) for d in defs), + key=lambda s: s._name.start_pos or (0, 0) + ) + + def is_definition(self): + """ + Returns True, if defined as a name in a statement, function or class. + Returns False, if it's a reference to such a definition. + """ + if self._name.tree_name is None: + return True + else: + return self._name.tree_name.is_definition() + + def __eq__(self, other): + return self._name.start_pos == other._name.start_pos \ + and self.module_path == other.module_path \ + and self.name == other.name \ + and self._inference_state == other._inference_state + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self._name.start_pos, self.module_path, self.name, self._inference_state)) + + +class BaseSignature(Name): + """ + These signatures are returned by :meth:`BaseName.get_signatures` + calls. + """ + def __init__(self, inference_state, signature): + super().__init__(inference_state, signature.name) + self._signature = signature + + @property + def params(self): + """ + Returns definitions for all parameters that a signature defines. + This includes stuff like ``*args`` and ``**kwargs``. + + :rtype: list of :class:`.ParamName` + """ + return [ParamName(self._inference_state, n) + for n in self._signature.get_param_names(resolve_stars=True)] + + def to_string(self): + """ + Returns a text representation of the signature. This could for example + look like ``foo(bar, baz: int, **kwargs)``. + + :rtype: str + """ + return self._signature.to_string() + + +class Signature(BaseSignature): + """ + A full signature object is the return value of + :meth:`.Script.get_signatures`. + """ + def __init__(self, inference_state, signature, call_details): + super().__init__(inference_state, signature) + self._call_details = call_details + self._signature = signature + + @property + def index(self): + """ + Returns the param index of the current cursor position. + Returns None if the index cannot be found in the curent call. + + :rtype: int + """ + return self._call_details.calculate_index( + self._signature.get_param_names(resolve_stars=True) + ) + + @property + def bracket_start(self): + """ + Returns a line/column tuple of the bracket that is responsible for the + last function call. The first line is 1 and the first column 0. + + :rtype: int, int + """ + return self._call_details.bracket_leaf.start_pos + + def __repr__(self): + return '<%s: index=%r %s>' % ( + type(self).__name__, + self.index, + self._signature.to_string(), + ) + + +class ParamName(Name): + def infer_default(self): + """ + Returns default values like the ``1`` of ``def foo(x=1):``. + + :rtype: list of :class:`.Name` + """ + return _values_to_definitions(self._name.infer_default()) + + def infer_annotation(self, **kwargs): + """ + :param execute_annotation: Default True; If False, values are not + executed and classes are returned instead of instances. + :rtype: list of :class:`.Name` + """ + return _values_to_definitions(self._name.infer_annotation(ignore_stars=True, **kwargs)) + + def to_string(self): + """ + Returns a simple representation of a param, like + ``f: Callable[..., Any]``. + + :rtype: str + """ + return self._name.to_string() + + @property + def kind(self): + """ + Returns an enum instance of :mod:`inspect`'s ``Parameter`` enum. + + :rtype: :py:attr:`inspect.Parameter.kind` + """ + return self._name.get_kind() diff --git a/lib/python3.10/site-packages/jedi/api/completion.py b/lib/python3.10/site-packages/jedi/api/completion.py new file mode 100644 index 0000000000000000000000000000000000000000..3078d83f42686bba3268d40e9c9f89a7d5512a15 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/completion.py @@ -0,0 +1,696 @@ +import re +from textwrap import dedent +from inspect import Parameter + +from parso.python.token import PythonTokenTypes +from parso.python import tree +from parso.tree import search_ancestor, Leaf +from parso import split_lines + +from jedi import debug +from jedi import settings +from jedi.api import classes +from jedi.api import helpers +from jedi.api import keywords +from jedi.api.strings import complete_dict +from jedi.api.file_name import complete_file_name +from jedi.inference import imports +from jedi.inference.base_value import ValueSet +from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names +from jedi.inference.context import get_global_filters +from jedi.inference.value import TreeInstance +from jedi.inference.docstring_utils import DocstringModule +from jedi.inference.names import ParamNameWrapper, SubModuleName +from jedi.inference.gradual.conversion import convert_values, convert_names +from jedi.parser_utils import cut_value_at_position +from jedi.plugins import plugin_manager + + +class ParamNameWithEquals(ParamNameWrapper): + def get_public_name(self): + return self.string_name + '=' + + +def _get_signature_param_names(signatures, positional_count, used_kwargs): + # Add named params + for call_sig in signatures: + for i, p in enumerate(call_sig.params): + kind = p.kind + if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD: + continue + if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \ + and p.name not in used_kwargs: + yield ParamNameWithEquals(p._name) + + +def _must_be_kwarg(signatures, positional_count, used_kwargs): + if used_kwargs: + return True + + must_be_kwarg = True + for signature in signatures: + for i, p in enumerate(signature.params): + kind = p.kind + if kind is Parameter.VAR_POSITIONAL: + # In case there were not already kwargs, the next param can + # always be a normal argument. + return False + + if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD, + Parameter.POSITIONAL_ONLY): + must_be_kwarg = False + break + if not must_be_kwarg: + break + return must_be_kwarg + + +def filter_names(inference_state, completion_names, stack, like_name, fuzzy, + imported_names, cached_name): + comp_dct = set() + if settings.case_insensitive_completion: + like_name = like_name.lower() + for name in completion_names: + string = name.string_name + if string in imported_names and string != like_name: + continue + if settings.case_insensitive_completion: + string = string.lower() + if helpers.match(string, like_name, fuzzy=fuzzy): + new = classes.Completion( + inference_state, + name, + stack, + len(like_name), + is_fuzzy=fuzzy, + cached_name=cached_name, + ) + k = (new.name, new.complete) # key + if k not in comp_dct: + comp_dct.add(k) + tree_name = name.tree_name + if tree_name is not None: + definition = tree_name.get_definition() + if definition is not None and definition.type == 'del_stmt': + continue + yield new + + +def _remove_duplicates(completions, other_completions): + names = {d.name for d in other_completions} + return [c for c in completions if c.name not in names] + + +def get_user_context(module_context, position): + """ + Returns the scope in which the user resides. This includes flows. + """ + leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True) + return module_context.create_context(leaf) + + +def get_flow_scope_node(module_node, position): + node = module_node.get_leaf_for_position(position, include_prefixes=True) + while not isinstance(node, (tree.Scope, tree.Flow)): + node = node.parent + + return node + + +@plugin_manager.decorate() +def complete_param_names(context, function_name, decorator_nodes): + # Basically there's no way to do param completion. The plugins are + # responsible for this. + return [] + + +class Completion: + def __init__(self, inference_state, module_context, code_lines, position, + signatures_callback, fuzzy=False): + self._inference_state = inference_state + self._module_context = module_context + self._module_node = module_context.tree_node + self._code_lines = code_lines + + # The first step of completions is to get the name + self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position) + # The actual cursor position is not what we need to calculate + # everything. We want the start of the name we're on. + self._original_position = position + self._signatures_callback = signatures_callback + + self._fuzzy = fuzzy + + # Return list of completions in this order: + # - Beginning with what user is typing + # - Public (alphabet) + # - Private ("_xxx") + # - Dunder ("__xxx") + def complete(self): + leaf = self._module_node.get_leaf_for_position( + self._original_position, + include_prefixes=True + ) + string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position) + + prefixed_completions = complete_dict( + self._module_context, + self._code_lines, + start_leaf or leaf, + self._original_position, + None if string is None else quote + string, + fuzzy=self._fuzzy, + ) + + if string is not None and not prefixed_completions: + prefixed_completions = list(complete_file_name( + self._inference_state, self._module_context, start_leaf, quote, string, + self._like_name, self._signatures_callback, + self._code_lines, self._original_position, + self._fuzzy + )) + if string is not None: + if not prefixed_completions and '\n' in string: + # Complete only multi line strings + prefixed_completions = self._complete_in_string(start_leaf, string) + return prefixed_completions + + cached_name, completion_names = self._complete_python(leaf) + + imported_names = [] + if leaf.parent is not None and leaf.parent.type in ['import_as_names', 'dotted_as_names']: + imported_names.extend(extract_imported_names(leaf.parent)) + + completions = list(filter_names(self._inference_state, completion_names, + self.stack, self._like_name, + self._fuzzy, imported_names, cached_name=cached_name)) + + return ( + # Removing duplicates mostly to remove False/True/None duplicates. + _remove_duplicates(prefixed_completions, completions) + + sorted(completions, key=lambda x: (not x.name.startswith(self._like_name), + x.name.startswith('__'), + x.name.startswith('_'), + x.name.lower())) + ) + + def _complete_python(self, leaf): + """ + Analyzes the current context of a completion and decides what to + return. + + Technically this works by generating a parser stack and analysing the + current stack for possible grammar nodes. + + Possible enhancements: + - global/nonlocal search global + - yield from / raise from <- could be only exceptions/generators + - In args: */**: no completion + - In params (also lambda): no completion before = + """ + grammar = self._inference_state.grammar + self.stack = stack = None + self._position = ( + self._original_position[0], + self._original_position[1] - len(self._like_name) + ) + cached_name = None + + try: + self.stack = stack = helpers.get_stack_at_position( + grammar, self._code_lines, leaf, self._position + ) + except helpers.OnErrorLeaf as e: + value = e.error_leaf.value + if value == '.': + # After ErrorLeaf's that are dots, we will not do any + # completions since this probably just confuses the user. + return cached_name, [] + + # If we don't have a value, just use global completion. + return cached_name, self._complete_global_scope() + + allowed_transitions = \ + list(stack._allowed_transition_names_and_token_types()) + + if 'if' in allowed_transitions: + leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) + previous_leaf = leaf.get_previous_leaf() + + indent = self._position[1] + if not (leaf.start_pos <= self._position <= leaf.end_pos): + indent = leaf.start_pos[1] + + if previous_leaf is not None: + stmt = previous_leaf + while True: + stmt = search_ancestor( + stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt', + 'error_node', + ) + if stmt is None: + break + + type_ = stmt.type + if type_ == 'error_node': + first = stmt.children[0] + if isinstance(first, Leaf): + type_ = first.value + '_stmt' + # Compare indents + if stmt.start_pos[1] == indent: + if type_ == 'if_stmt': + allowed_transitions += ['elif', 'else'] + elif type_ == 'try_stmt': + allowed_transitions += ['except', 'finally', 'else'] + elif type_ == 'for_stmt': + allowed_transitions.append('else') + + completion_names = [] + + kwargs_only = False + if any(t in allowed_transitions for t in (PythonTokenTypes.NAME, + PythonTokenTypes.INDENT)): + # This means that we actually have to do type inference. + + nonterminals = [stack_node.nonterminal for stack_node in stack] + + nodes = _gather_nodes(stack) + if nodes and nodes[-1] in ('as', 'def', 'class'): + # No completions for ``with x as foo`` and ``import x as foo``. + # Also true for defining names as a class or function. + return cached_name, list(self._complete_inherited(is_function=True)) + elif "import_stmt" in nonterminals: + level, names = parse_dotted_names(nodes, "import_from" in nonterminals) + + only_modules = not ("import_from" in nonterminals and 'import' in nodes) + completion_names += self._get_importer_names( + names, + level, + only_modules=only_modules, + ) + elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.': + dot = self._module_node.get_leaf_for_position(self._position) + if dot.type == "endmarker": + # This is a bit of a weird edge case, maybe we can somehow + # generalize this. + dot = leaf.get_previous_leaf() + cached_name, n = self._complete_trailer(dot.get_previous_leaf()) + completion_names += n + elif self._is_parameter_completion(): + completion_names += self._complete_params(leaf) + else: + # Apparently this looks like it's good enough to filter most cases + # so that signature completions don't randomly appear. + # To understand why this works, three things are important: + # 1. trailer with a `,` in it is either a subscript or an arglist. + # 2. If there's no `,`, it's at the start and only signatures start + # with `(`. Other trailers could start with `.` or `[`. + # 3. Decorators are very primitive and have an optional `(` with + # optional arglist in them. + if nodes[-1] in ['(', ','] \ + and nonterminals[-1] in ('trailer', 'arglist', 'decorator'): + signatures = self._signatures_callback(*self._position) + if signatures: + call_details = signatures[0]._call_details + used_kwargs = list(call_details.iter_used_keyword_arguments()) + positional_count = call_details.count_positional_arguments() + + completion_names += _get_signature_param_names( + signatures, + positional_count, + used_kwargs, + ) + + kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs) + + if not kwargs_only: + completion_names += self._complete_global_scope() + completion_names += self._complete_inherited(is_function=False) + + if not kwargs_only: + current_line = self._code_lines[self._position[0] - 1][:self._position[1]] + completion_names += self._complete_keywords( + allowed_transitions, + only_values=not (not current_line or current_line[-1] in ' \t.;' + and current_line[-3:] != '...') + ) + + return cached_name, completion_names + + def _is_parameter_completion(self): + tos = self.stack[-1] + if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1: + # We are at the position `lambda `, where basically the next node + # is a param. + return True + if tos.nonterminal in 'parameters': + # Basically we are at the position `foo(`, there's nothing there + # yet, so we have no `typedargslist`. + return True + # var args is for lambdas and typed args for normal functions + return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ',' + + def _complete_params(self, leaf): + stack_node = self.stack[-2] + if stack_node.nonterminal == 'parameters': + stack_node = self.stack[-3] + if stack_node.nonterminal == 'funcdef': + context = get_user_context(self._module_context, self._position) + node = search_ancestor(leaf, 'error_node', 'funcdef') + if node is not None: + if node.type == 'error_node': + n = node.children[0] + if n.type == 'decorators': + decorators = n.children + elif n.type == 'decorator': + decorators = [n] + else: + decorators = [] + else: + decorators = node.get_decorators() + function_name = stack_node.nodes[1] + + return complete_param_names(context, function_name.value, decorators) + return [] + + def _complete_keywords(self, allowed_transitions, only_values): + for k in allowed_transitions: + if isinstance(k, str) and k.isalpha(): + if not only_values or k in ('True', 'False', 'None'): + yield keywords.KeywordName(self._inference_state, k) + + def _complete_global_scope(self): + context = get_user_context(self._module_context, self._position) + debug.dbg('global completion scope: %s', context) + flow_scope_node = get_flow_scope_node(self._module_node, self._position) + filters = get_global_filters( + context, + self._position, + flow_scope_node + ) + completion_names = [] + for filter in filters: + completion_names += filter.values() + return completion_names + + def _complete_trailer(self, previous_leaf): + inferred_context = self._module_context.create_context(previous_leaf) + values = infer_call_of_leaf(inferred_context, previous_leaf) + debug.dbg('trailer completion values: %s', values, color='MAGENTA') + + # The cached name simply exists to make speed optimizations for certain + # modules. + cached_name = None + if len(values) == 1: + v, = values + if v.is_module(): + if len(v.string_names) == 1: + module_name = v.string_names[0] + if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'): + cached_name = module_name + + return cached_name, self._complete_trailer_for_values(values) + + def _complete_trailer_for_values(self, values): + user_context = get_user_context(self._module_context, self._position) + + return complete_trailer(user_context, values) + + def _get_importer_names(self, names, level=0, only_modules=True): + names = [n.value for n in names] + i = imports.Importer(self._inference_state, names, self._module_context, level) + return i.completion_names(self._inference_state, only_modules=only_modules) + + def _complete_inherited(self, is_function=True): + """ + Autocomplete inherited methods when overriding in child class. + """ + leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True) + cls = tree.search_ancestor(leaf, 'classdef') + if cls is None: + return + + # Complete the methods that are defined in the super classes. + class_value = self._module_context.create_value(cls) + + if cls.start_pos[1] >= leaf.start_pos[1]: + return + + filters = class_value.get_filters(is_instance=True) + # The first dict is the dictionary of class itself. + next(filters) + for filter in filters: + for name in filter.values(): + # TODO we should probably check here for properties + if (name.api_type == 'function') == is_function: + yield name + + def _complete_in_string(self, start_leaf, string): + """ + To make it possible for people to have completions in doctests or + generally in "Python" code in docstrings, we use the following + heuristic: + + - Having an indented block of code + - Having some doctest code that starts with `>>>` + - Having backticks that doesn't have whitespace inside it + """ + + def iter_relevant_lines(lines): + include_next_line = False + for l in code_lines: + if include_next_line or l.startswith('>>>') or l.startswith(' '): + yield re.sub(r'^( *>>> ?| +)', '', l) + else: + yield None + + include_next_line = bool(re.match(' *>>>', l)) + + string = dedent(string) + code_lines = split_lines(string, keepends=True) + relevant_code_lines = list(iter_relevant_lines(code_lines)) + if relevant_code_lines[-1] is not None: + # Some code lines might be None, therefore get rid of that. + relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines] + return self._complete_code_lines(relevant_code_lines) + match = re.search(r'`([^`\s]+)', code_lines[-1]) + if match: + return self._complete_code_lines([match.group(1)]) + return [] + + def _complete_code_lines(self, code_lines): + module_node = self._inference_state.grammar.parse(''.join(code_lines)) + module_value = DocstringModule( + in_module_context=self._module_context, + inference_state=self._inference_state, + module_node=module_node, + code_lines=code_lines, + ) + return Completion( + self._inference_state, + module_value.as_context(), + code_lines=code_lines, + position=module_node.end_pos, + signatures_callback=lambda *args, **kwargs: [], + fuzzy=self._fuzzy + ).complete() + + +def _gather_nodes(stack): + nodes = [] + for stack_node in stack: + if stack_node.dfa.from_rule == 'small_stmt': + nodes = [] + else: + nodes += stack_node.nodes + return nodes + + +_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")') + + +def _extract_string_while_in_string(leaf, position): + def return_part_of_leaf(leaf): + kwargs = {} + if leaf.line == position[0]: + kwargs['endpos'] = position[1] - leaf.column + match = _string_start.match(leaf.value, **kwargs) + if not match: + return None, None, None + start = match.group(0) + if leaf.line == position[0] and position[1] < leaf.column + match.end(): + return None, None, None + return cut_value_at_position(leaf, position)[match.end():], leaf, start + + if position < leaf.start_pos: + return None, None, None + + if leaf.type == 'string': + return return_part_of_leaf(leaf) + + leaves = [] + while leaf is not None: + if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value): + if len(leaf.value) > 1: + return return_part_of_leaf(leaf) + prefix_leaf = None + if not leaf.prefix: + prefix_leaf = leaf.get_previous_leaf() + if prefix_leaf is None or prefix_leaf.type != 'name' \ + or not all(c in 'rubf' for c in prefix_leaf.value.lower()): + prefix_leaf = None + + return ( + ''.join(cut_value_at_position(l, position) for l in leaves), + prefix_leaf or leaf, + ('' if prefix_leaf is None else prefix_leaf.value) + + cut_value_at_position(leaf, position), + ) + if leaf.line != position[0]: + # Multi line strings are always simple error leaves and contain the + # whole string, single line error leaves are atherefore important + # now and since the line is different, it's not really a single + # line string anymore. + break + leaves.insert(0, leaf) + leaf = leaf.get_previous_leaf() + return None, None, None + + +def complete_trailer(user_context, values): + completion_names = [] + for value in values: + for filter in value.get_filters(origin_scope=user_context.tree_node): + completion_names += filter.values() + + if not value.is_stub() and isinstance(value, TreeInstance): + completion_names += _complete_getattr(user_context, value) + + python_values = convert_values(values) + for c in python_values: + if c not in values: + for filter in c.get_filters(origin_scope=user_context.tree_node): + completion_names += filter.values() + return completion_names + + +def _complete_getattr(user_context, instance): + """ + A heuristic to make completion for proxy objects work. This is not + intended to work in all cases. It works exactly in this case: + + def __getattr__(self, name): + ... + return getattr(any_object, name) + + It is important that the return contains getattr directly, otherwise it + won't work anymore. It's really just a stupid heuristic. It will not + work if you write e.g. `return (getatr(o, name))`, because of the + additional parentheses. It will also not work if you move the getattr + to some other place that is not the return statement itself. + + It is intentional that it doesn't work in all cases. Generally it's + really hard to do even this case (as you can see below). Most people + will write it like this anyway and the other ones, well they are just + out of luck I guess :) ~dave. + """ + names = (instance.get_function_slot_names('__getattr__') + or instance.get_function_slot_names('__getattribute__')) + functions = ValueSet.from_sets( + name.infer() + for name in names + ) + for func in functions: + tree_node = func.tree_node + if tree_node is None or tree_node.type != 'funcdef': + continue + + for return_stmt in tree_node.iter_return_stmts(): + # Basically until the next comment we just try to find out if a + # return statement looks exactly like `return getattr(x, name)`. + if return_stmt.type != 'return_stmt': + continue + atom_expr = return_stmt.children[1] + if atom_expr.type != 'atom_expr': + continue + atom = atom_expr.children[0] + trailer = atom_expr.children[1] + if len(atom_expr.children) != 2 or atom.type != 'name' \ + or atom.value != 'getattr': + continue + arglist = trailer.children[1] + if arglist.type != 'arglist' or len(arglist.children) < 3: + continue + context = func.as_context() + object_node = arglist.children[0] + + # Make sure it's a param: foo in __getattr__(self, foo) + name_node = arglist.children[2] + name_list = context.goto(name_node, name_node.start_pos) + if not any(n.api_type == 'param' for n in name_list): + continue + + # Now that we know that these are most probably completion + # objects, we just infer the object and return them as + # completions. + objects = context.infer_node(object_node) + return complete_trailer(user_context, objects) + return [] + + +def search_in_module(inference_state, module_context, names, wanted_names, + wanted_type, complete=False, fuzzy=False, + ignore_imports=False, convert=False): + for s in wanted_names[:-1]: + new_names = [] + for n in names: + if s == n.string_name: + if n.tree_name is not None and n.api_type in ('module', 'namespace') \ + and ignore_imports: + continue + new_names += complete_trailer( + module_context, + n.infer() + ) + debug.dbg('dot lookup on search %s from %s', new_names, names[:10]) + names = new_names + + last_name = wanted_names[-1].lower() + for n in names: + string = n.string_name.lower() + if complete and helpers.match(string, last_name, fuzzy=fuzzy) \ + or not complete and string == last_name: + if isinstance(n, SubModuleName): + names = [v.name for v in n.infer()] + else: + names = [n] + if convert: + names = convert_names(names) + for n2 in names: + if complete: + def_ = classes.Completion( + inference_state, n2, + stack=None, + like_name_length=len(last_name), + is_fuzzy=fuzzy, + ) + else: + def_ = classes.Name(inference_state, n2) + if not wanted_type or wanted_type == def_.type: + yield def_ + + +def extract_imported_names(node): + imported_names = [] + + if node.type in ['import_as_names', 'dotted_as_names', 'import_as_name']: + for index, child in enumerate(node.children): + if child.type == 'name': + if (index > 0 and node.children[index - 1].type == "keyword" + and node.children[index - 1].value == "as"): + continue + imported_names.append(child.value) + elif child.type == 'import_as_name': + imported_names.extend(extract_imported_names(child)) + + return imported_names diff --git a/lib/python3.10/site-packages/jedi/api/completion_cache.py b/lib/python3.10/site-packages/jedi/api/completion_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..46e9bead210d817399113bed5db11e4e5e69e6cd --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/completion_cache.py @@ -0,0 +1,31 @@ +from typing import Dict, Tuple, Callable + +CacheValues = Tuple[str, str, str] +CacheValuesCallback = Callable[[], CacheValues] + + +_cache: Dict[str, Dict[str, CacheValues]] = {} + + +def save_entry(module_name: str, name: str, cache: CacheValues) -> None: + try: + module_cache = _cache[module_name] + except KeyError: + module_cache = _cache[module_name] = {} + module_cache[name] = cache + + +def _create_get_from_cache(number: int) -> Callable[[str, str, CacheValuesCallback], str]: + def _get_from_cache(module_name: str, name: str, get_cache_values: CacheValuesCallback) -> str: + try: + return _cache[module_name][name][number] + except KeyError: + v = get_cache_values() + save_entry(module_name, name, v) + return v[number] + return _get_from_cache + + +get_type = _create_get_from_cache(0) +get_docstring_signature = _create_get_from_cache(1) +get_docstring = _create_get_from_cache(2) diff --git a/lib/python3.10/site-packages/jedi/api/environment.py b/lib/python3.10/site-packages/jedi/api/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..64b318e1ff7a3302155136afcccb0cca05b78fd1 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/environment.py @@ -0,0 +1,480 @@ +""" +Environments are a way to activate different Python versions or Virtualenvs for +static analysis. The Python binary in that environment is going to be executed. +""" +import os +import sys +import hashlib +import filecmp +from collections import namedtuple +from shutil import which +from typing import TYPE_CHECKING + +from jedi.cache import memoize_method, time_cache +from jedi.inference.compiled.subprocess import CompiledSubprocess, \ + InferenceStateSameProcess, InferenceStateSubprocess + +import parso + +if TYPE_CHECKING: + from jedi.inference import InferenceState + + +_VersionInfo = namedtuple('VersionInfo', 'major minor micro') # type: ignore[name-match] + +_SUPPORTED_PYTHONS = ['3.13', '3.12', '3.11', '3.10', '3.9', '3.8', '3.7', '3.6'] +_SAFE_PATHS = ['/usr/bin', '/usr/local/bin'] +_CONDA_VAR = 'CONDA_PREFIX' +_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor) + + +class InvalidPythonEnvironment(Exception): + """ + If you see this exception, the Python executable or Virtualenv you have + been trying to use is probably not a correct Python version. + """ + + +class _BaseEnvironment: + @memoize_method + def get_grammar(self): + version_string = '%s.%s' % (self.version_info.major, self.version_info.minor) + return parso.load_grammar(version=version_string) + + @property + def _sha256(self): + try: + return self._hash + except AttributeError: + self._hash = _calculate_sha256_for_file(self.executable) + return self._hash + + +def _get_info(): + return ( + sys.executable, + sys.prefix, + sys.version_info[:3], + ) + + +class Environment(_BaseEnvironment): + """ + This class is supposed to be created by internal Jedi architecture. You + should not create it directly. Please use create_environment or the other + functions instead. It is then returned by that function. + """ + _subprocess = None + + def __init__(self, executable, env_vars=None): + self._start_executable = executable + self._env_vars = env_vars + # Initialize the environment + self._get_subprocess() + + def _get_subprocess(self): + if self._subprocess is not None and not self._subprocess.is_crashed: + return self._subprocess + + try: + self._subprocess = CompiledSubprocess(self._start_executable, + env_vars=self._env_vars) + info = self._subprocess._send(None, _get_info) + except Exception as exc: + raise InvalidPythonEnvironment( + "Could not get version information for %r: %r" % ( + self._start_executable, + exc)) + + # Since it could change and might not be the same(?) as the one given, + # set it here. + self.executable = info[0] + """ + The Python executable, matches ``sys.executable``. + """ + self.path = info[1] + """ + The path to an environment, matches ``sys.prefix``. + """ + self.version_info = _VersionInfo(*info[2]) + """ + Like :data:`sys.version_info`: a tuple to show the current + Environment's Python version. + """ + return self._subprocess + + def __repr__(self): + version = '.'.join(str(i) for i in self.version_info) + return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path) + + def get_inference_state_subprocess( + self, + inference_state: 'InferenceState', + ) -> InferenceStateSubprocess: + return InferenceStateSubprocess(inference_state, self._get_subprocess()) + + @memoize_method + def get_sys_path(self): + """ + The sys path for this environment. Does not include potential + modifications from e.g. appending to :data:`sys.path`. + + :returns: list of str + """ + # It's pretty much impossible to generate the sys path without actually + # executing Python. The sys path (when starting with -S) itself depends + # on how the Python version was compiled (ENV variables). + # If you omit -S when starting Python (normal case), additionally + # site.py gets executed. + return self._get_subprocess().get_sys_path() + + +class _SameEnvironmentMixin: + def __init__(self): + self._start_executable = self.executable = sys.executable + self.path = sys.prefix + self.version_info = _VersionInfo(*sys.version_info[:3]) + self._env_vars = None + + +class SameEnvironment(_SameEnvironmentMixin, Environment): + pass + + +class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment): + def get_inference_state_subprocess( + self, + inference_state: 'InferenceState', + ) -> InferenceStateSameProcess: + return InferenceStateSameProcess(inference_state) + + def get_sys_path(self): + return sys.path + + +def _get_virtual_env_from_var(env_var='VIRTUAL_ENV'): + """Get virtualenv environment from VIRTUAL_ENV environment variable. + + It uses `safe=False` with ``create_environment``, because the environment + variable is considered to be safe / controlled by the user solely. + """ + var = os.environ.get(env_var) + if var: + # Under macOS in some cases - notably when using Pipenv - the + # sys.prefix of the virtualenv is /path/to/env/bin/.. instead of + # /path/to/env so we need to fully resolve the paths in order to + # compare them. + if os.path.realpath(var) == os.path.realpath(sys.prefix): + return _try_get_same_env() + + try: + return create_environment(var, safe=False) + except InvalidPythonEnvironment: + pass + + +def _calculate_sha256_for_file(path): + sha256 = hashlib.sha256() + with open(path, 'rb') as f: + for block in iter(lambda: f.read(filecmp.BUFSIZE), b''): + sha256.update(block) + return sha256.hexdigest() + + +def get_default_environment(): + """ + Tries to return an active Virtualenv or conda environment. + If there is no VIRTUAL_ENV variable or no CONDA_PREFIX variable set + set it will return the latest Python version installed on the system. This + makes it possible to use as many new Python features as possible when using + autocompletion and other functionality. + + :returns: :class:`.Environment` + """ + virtual_env = _get_virtual_env_from_var() + if virtual_env is not None: + return virtual_env + + conda_env = _get_virtual_env_from_var(_CONDA_VAR) + if conda_env is not None: + return conda_env + + return _try_get_same_env() + + +def _try_get_same_env(): + env = SameEnvironment() + if not os.path.basename(env.executable).lower().startswith('python'): + # This tries to counter issues with embedding. In some cases (e.g. + # VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This + # happens, because for Mac a function called `_NSGetExecutablePath` is + # used and for Windows `GetModuleFileNameW`. These are both platform + # specific functions. For all other systems sys.executable should be + # alright. However here we try to generalize: + # + # 1. Check if the executable looks like python (heuristic) + # 2. In case it's not try to find the executable + # 3. In case we don't find it use an interpreter environment. + # + # The last option will always work, but leads to potential crashes of + # Jedi - which is ok, because it happens very rarely and even less, + # because the code below should work for most cases. + if os.name == 'nt': + # The first case would be a virtualenv and the second a normal + # Python installation. + checks = (r'Scripts\python.exe', 'python.exe') + else: + # For unix it looks like Python is always in a bin folder. + checks = ( + 'bin/python%s.%s' % (sys.version_info[0], sys.version[1]), + 'bin/python%s' % (sys.version_info[0]), + 'bin/python', + ) + for check in checks: + guess = os.path.join(sys.exec_prefix, check) + if os.path.isfile(guess): + # Bingo - We think we have our Python. + return Environment(guess) + # It looks like there is no reasonable Python to be found. + return InterpreterEnvironment() + # If no virtualenv is found, use the environment we're already + # using. + return env + + +def get_cached_default_environment(): + var = os.environ.get('VIRTUAL_ENV') or os.environ.get(_CONDA_VAR) + environment = _get_cached_default_environment() + + # Under macOS in some cases - notably when using Pipenv - the + # sys.prefix of the virtualenv is /path/to/env/bin/.. instead of + # /path/to/env so we need to fully resolve the paths in order to + # compare them. + if var and os.path.realpath(var) != os.path.realpath(environment.path): + _get_cached_default_environment.clear_cache() + return _get_cached_default_environment() + return environment + + +@time_cache(seconds=10 * 60) # 10 Minutes +def _get_cached_default_environment(): + try: + return get_default_environment() + except InvalidPythonEnvironment: + # It's possible that `sys.executable` is wrong. Typically happens + # when Jedi is used in an executable that embeds Python. For further + # information, have a look at: + # https://github.com/davidhalter/jedi/issues/1531 + return InterpreterEnvironment() + + +def find_virtualenvs(paths=None, *, safe=True, use_environment_vars=True): + """ + :param paths: A list of paths in your file system to be scanned for + Virtualenvs. It will search in these paths and potentially execute the + Python binaries. + :param safe: Default True. In case this is False, it will allow this + function to execute potential `python` environments. An attacker might + be able to drop an executable in a path this function is searching by + default. If the executable has not been installed by root, it will not + be executed. + :param use_environment_vars: Default True. If True, the VIRTUAL_ENV + variable will be checked if it contains a valid VirtualEnv. + CONDA_PREFIX will be checked to see if it contains a valid conda + environment. + + :yields: :class:`.Environment` + """ + if paths is None: + paths = [] + + _used_paths = set() + + if use_environment_vars: + # Using this variable should be safe, because attackers might be + # able to drop files (via git) but not environment variables. + virtual_env = _get_virtual_env_from_var() + if virtual_env is not None: + yield virtual_env + _used_paths.add(virtual_env.path) + + conda_env = _get_virtual_env_from_var(_CONDA_VAR) + if conda_env is not None: + yield conda_env + _used_paths.add(conda_env.path) + + for directory in paths: + if not os.path.isdir(directory): + continue + + directory = os.path.abspath(directory) + for path in os.listdir(directory): + path = os.path.join(directory, path) + if path in _used_paths: + # A path shouldn't be inferred twice. + continue + _used_paths.add(path) + + try: + executable = _get_executable_path(path, safe=safe) + yield Environment(executable) + except InvalidPythonEnvironment: + pass + + +def find_system_environments(*, env_vars=None): + """ + Ignores virtualenvs and returns the Python versions that were installed on + your system. This might return nothing, if you're running Python e.g. from + a portable version. + + The environments are sorted from latest to oldest Python version. + + :yields: :class:`.Environment` + """ + for version_string in _SUPPORTED_PYTHONS: + try: + yield get_system_environment(version_string, env_vars=env_vars) + except InvalidPythonEnvironment: + pass + + +# TODO: this function should probably return a list of environments since +# multiple Python installations can be found on a system for the same version. +def get_system_environment(version, *, env_vars=None): + """ + Return the first Python environment found for a string of the form 'X.Y' + where X and Y are the major and minor versions of Python. + + :raises: :exc:`.InvalidPythonEnvironment` + :returns: :class:`.Environment` + """ + exe = which('python' + version) + if exe: + if exe == sys.executable: + return SameEnvironment() + return Environment(exe) + + if os.name == 'nt': + for exe in _get_executables_from_windows_registry(version): + try: + return Environment(exe, env_vars=env_vars) + except InvalidPythonEnvironment: + pass + raise InvalidPythonEnvironment("Cannot find executable python%s." % version) + + +def create_environment(path, *, safe=True, env_vars=None): + """ + Make it possible to manually create an Environment object by specifying a + Virtualenv path or an executable path and optional environment variables. + + :raises: :exc:`.InvalidPythonEnvironment` + :returns: :class:`.Environment` + """ + if os.path.isfile(path): + _assert_safe(path, safe) + return Environment(path, env_vars=env_vars) + return Environment(_get_executable_path(path, safe=safe), env_vars=env_vars) + + +def _get_executable_path(path, safe=True): + """ + Returns None if it's not actually a virtual env. + """ + + if os.name == 'nt': + pythons = [os.path.join(path, 'Scripts', 'python.exe'), os.path.join(path, 'python.exe')] + else: + pythons = [os.path.join(path, 'bin', 'python')] + for python in pythons: + if os.path.exists(python): + break + else: + raise InvalidPythonEnvironment("%s seems to be missing." % python) + + _assert_safe(python, safe) + return python + + +def _get_executables_from_windows_registry(version): + import winreg + + # TODO: support Python Anaconda. + sub_keys = [ + r'SOFTWARE\Python\PythonCore\{version}\InstallPath', + r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath', + r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath', + r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath' + ] + for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]: + for sub_key in sub_keys: + sub_key = sub_key.format(version=version) + try: + with winreg.OpenKey(root_key, sub_key) as key: + prefix = winreg.QueryValueEx(key, '')[0] + exe = os.path.join(prefix, 'python.exe') + if os.path.isfile(exe): + yield exe + except WindowsError: + pass + + +def _assert_safe(executable_path, safe): + if safe and not _is_safe(executable_path): + raise InvalidPythonEnvironment( + "The python binary is potentially unsafe.") + + +def _is_safe(executable_path): + # Resolve sym links. A venv typically is a symlink to a known Python + # binary. Only virtualenvs copy symlinks around. + real_path = os.path.realpath(executable_path) + + if _is_unix_safe_simple(real_path): + return True + + # Just check the list of known Python versions. If it's not in there, + # it's likely an attacker or some Python that was not properly + # installed in the system. + for environment in find_system_environments(): + if environment.executable == real_path: + return True + + # If the versions don't match, just compare the binary files. If we + # don't do that, only venvs will be working and not virtualenvs. + # venvs are symlinks while virtualenvs are actual copies of the + # Python files. + # This still means that if the system Python is updated and the + # virtualenv's Python is not (which is probably never going to get + # upgraded), it will not work with Jedi. IMO that's fine, because + # people should just be using venv. ~ dave + if environment._sha256 == _calculate_sha256_for_file(real_path): + return True + return False + + +def _is_unix_safe_simple(real_path): + if _is_unix_admin(): + # In case we are root, just be conservative and + # only execute known paths. + return any(real_path.startswith(p) for p in _SAFE_PATHS) + + uid = os.stat(real_path).st_uid + # The interpreter needs to be owned by root. This means that it wasn't + # written by a user and therefore attacking Jedi is not as simple. + # The attack could look like the following: + # 1. A user clones a repository. + # 2. The repository has an innocent looking folder called foobar. jedi + # searches for the folder and executes foobar/bin/python --version if + # there's also a foobar/bin/activate. + # 3. The attacker has gained code execution, since he controls + # foobar/bin/python. + return uid == 0 + + +def _is_unix_admin(): + try: + return os.getuid() == 0 + except AttributeError: + return False # Windows diff --git a/lib/python3.10/site-packages/jedi/api/errors.py b/lib/python3.10/site-packages/jedi/api/errors.py new file mode 100644 index 0000000000000000000000000000000000000000..10cb62af1500616e150f5f2e73396241d8f6769f --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/errors.py @@ -0,0 +1,46 @@ +""" +This file is about errors in Python files and not about exception handling in +Jedi. +""" + + +def parso_to_jedi_errors(grammar, module_node): + return [SyntaxError(e) for e in grammar.iter_errors(module_node)] + + +class SyntaxError: + """ + Syntax errors are generated by :meth:`.Script.get_syntax_errors`. + """ + def __init__(self, parso_error): + self._parso_error = parso_error + + @property + def line(self): + """The line where the error starts (starting with 1).""" + return self._parso_error.start_pos[0] + + @property + def column(self): + """The column where the error starts (starting with 0).""" + return self._parso_error.start_pos[1] + + @property + def until_line(self): + """The line where the error ends (starting with 1).""" + return self._parso_error.end_pos[0] + + @property + def until_column(self): + """The column where the error ends (starting with 0).""" + return self._parso_error.end_pos[1] + + def get_message(self): + return self._parso_error.message + + def __repr__(self): + return '<%s from=%s to=%s>' % ( + self.__class__.__name__, + self._parso_error.start_pos, + self._parso_error.end_pos, + ) diff --git a/lib/python3.10/site-packages/jedi/api/exceptions.py b/lib/python3.10/site-packages/jedi/api/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..9db12e714de1dc1eb5bb6d3cbe631c456ec7a555 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/exceptions.py @@ -0,0 +1,31 @@ +class _JediError(Exception): + pass + + +class InternalError(_JediError): + """ + This error might happen a subprocess is crashing. The reason for this is + usually broken C code in third party libraries. This is not a very common + thing and it is safe to use Jedi again. However using the same calls might + result in the same error again. + """ + + +class WrongVersion(_JediError): + """ + This error is reserved for the future, shouldn't really be happening at the + moment. + """ + + +class RefactoringError(_JediError): + """ + Refactorings can fail for various reasons. So if you work with refactorings + like :meth:`.Script.rename`, :meth:`.Script.inline`, + :meth:`.Script.extract_variable` and :meth:`.Script.extract_function`, make + sure to catch these. The descriptions in the errors are usually valuable + for end users. + + A typical ``RefactoringError`` would tell the user that inlining is not + possible if no name is under the cursor. + """ diff --git a/lib/python3.10/site-packages/jedi/api/file_name.py b/lib/python3.10/site-packages/jedi/api/file_name.py new file mode 100644 index 0000000000000000000000000000000000000000..277f3220ce53dfa410033d2c5ef40c2f1a888652 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/file_name.py @@ -0,0 +1,155 @@ +import os + +from jedi.api import classes +from jedi.api.strings import StringName, get_quote_ending +from jedi.api.helpers import match +from jedi.inference.helpers import get_str_or_none + + +class PathName(StringName): + api_type = 'path' + + +def complete_file_name(inference_state, module_context, start_leaf, quote, string, + like_name, signatures_callback, code_lines, position, fuzzy): + # First we want to find out what can actually be changed as a name. + like_name_length = len(os.path.basename(string)) + + addition = _get_string_additions(module_context, start_leaf) + if string.startswith('~'): + string = os.path.expanduser(string) + if addition is None: + return + string = addition + string + + # Here we use basename again, because if strings are added like + # `'foo' + 'bar`, it should complete to `foobar/`. + must_start_with = os.path.basename(string) + string = os.path.dirname(string) + + sigs = signatures_callback(*position) + is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs) + if is_in_os_path_join: + to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start) + if to_be_added is None: + is_in_os_path_join = False + else: + string = to_be_added + string + base_path = os.path.join(inference_state.project.path, string) + try: + listed = sorted(os.scandir(base_path), key=lambda e: e.name) + # OSError: [Errno 36] File name too long: '...' + except (FileNotFoundError, OSError): + return + quote_ending = get_quote_ending(quote, code_lines, position) + for entry in listed: + name = entry.name + if match(name, must_start_with, fuzzy=fuzzy): + if is_in_os_path_join or not entry.is_dir(): + name += quote_ending + else: + name += os.path.sep + + yield classes.Completion( + inference_state, + PathName(inference_state, name[len(must_start_with) - like_name_length:]), + stack=None, + like_name_length=like_name_length, + is_fuzzy=fuzzy, + ) + + +def _get_string_additions(module_context, start_leaf): + def iterate_nodes(): + node = addition.parent + was_addition = True + for child_node in reversed(node.children[:node.children.index(addition)]): + if was_addition: + was_addition = False + yield child_node + continue + + if child_node != '+': + break + was_addition = True + + addition = start_leaf.get_previous_leaf() + if addition != '+': + return '' + context = module_context.create_context(start_leaf) + return _add_strings(context, reversed(list(iterate_nodes()))) + + +def _add_strings(context, nodes, add_slash=False): + string = '' + first = True + for child_node in nodes: + values = context.infer_node(child_node) + if len(values) != 1: + return None + c, = values + s = get_str_or_none(c) + if s is None: + return None + if not first and add_slash: + string += os.path.sep + string += s + first = False + return string + + +def _add_os_path_join(module_context, start_leaf, bracket_start): + def check(maybe_bracket, nodes): + if maybe_bracket.start_pos != bracket_start: + return None + + if not nodes: + return '' + context = module_context.create_context(nodes[0]) + return _add_strings(context, nodes, add_slash=True) or '' + + if start_leaf.type == 'error_leaf': + # Unfinished string literal, like `join('` + value_node = start_leaf.parent + index = value_node.children.index(start_leaf) + if index > 0: + error_node = value_node.children[index - 1] + if error_node.type == 'error_node' and len(error_node.children) >= 2: + index = -2 + if error_node.children[-1].type == 'arglist': + arglist_nodes = error_node.children[-1].children + index -= 1 + else: + arglist_nodes = [] + + return check(error_node.children[index + 1], arglist_nodes[::2]) + return None + + # Maybe an arglist or some weird error case. Therefore checked below. + searched_node_child = start_leaf + while searched_node_child.parent is not None \ + and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'): + searched_node_child = searched_node_child.parent + + if searched_node_child.get_first_leaf() is not start_leaf: + return None + searched_node = searched_node_child.parent + if searched_node is None: + return None + + index = searched_node.children.index(searched_node_child) + arglist_nodes = searched_node.children[:index] + if searched_node.type == 'arglist': + trailer = searched_node.parent + if trailer.type == 'error_node': + trailer_index = trailer.children.index(searched_node) + assert trailer_index >= 2 + assert trailer.children[trailer_index - 1] == '(' + return check(trailer.children[trailer_index - 1], arglist_nodes[::2]) + elif trailer.type == 'trailer': + return check(trailer.children[0], arglist_nodes[::2]) + elif searched_node.type == 'trailer': + return check(searched_node.children[0], []) + elif searched_node.type == 'error_node': + # Stuff like `join(""` + return check(arglist_nodes[-1], []) diff --git a/lib/python3.10/site-packages/jedi/api/helpers.py b/lib/python3.10/site-packages/jedi/api/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..111f75b44705dfe1f8c9f9ac179874426fe39819 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/helpers.py @@ -0,0 +1,522 @@ +""" +Helpers for the API +""" +import re +from collections import namedtuple +from textwrap import dedent +from itertools import chain +from functools import wraps +from inspect import Parameter + +from parso.python.parser import Parser +from parso.python import tree + +from jedi.inference.base_value import NO_VALUES +from jedi.inference.syntax_tree import infer_atom +from jedi.inference.helpers import infer_call_of_leaf +from jedi.inference.compiled import get_string_value_set +from jedi.cache import signature_time_cache, memoize_method +from jedi.parser_utils import get_parent_scope + + +CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name']) + + +def _start_match(string, like_name): + return string.startswith(like_name) + + +def _fuzzy_match(string, like_name): + if len(like_name) <= 1: + return like_name in string + pos = string.find(like_name[0]) + if pos >= 0: + return _fuzzy_match(string[pos + 1:], like_name[1:]) + return False + + +def match(string, like_name, fuzzy=False): + if fuzzy: + return _fuzzy_match(string, like_name) + else: + return _start_match(string, like_name) + + +def sorted_definitions(defs): + # Note: `or ''` below is required because `module_path` could be + return sorted(defs, key=lambda x: (str(x.module_path or ''), + x.line or 0, + x.column or 0, + x.name)) + + +def get_on_completion_name(module_node, lines, position): + leaf = module_node.get_leaf_for_position(position) + if leaf is None or leaf.type in ('string', 'error_leaf'): + # Completions inside strings are a bit special, we need to parse the + # string. The same is true for comments and error_leafs. + line = lines[position[0] - 1] + # The first step of completions is to get the name + return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0) + elif leaf.type not in ('name', 'keyword'): + return '' + + return leaf.value[:position[1] - leaf.start_pos[1]] + + +def _get_code(code_lines, start_pos, end_pos): + # Get relevant lines. + lines = code_lines[start_pos[0] - 1:end_pos[0]] + # Remove the parts at the end of the line. + lines[-1] = lines[-1][:end_pos[1]] + # Remove first line indentation. + lines[0] = lines[0][start_pos[1]:] + return ''.join(lines) + + +class OnErrorLeaf(Exception): + @property + def error_leaf(self): + return self.args[0] + + +def _get_code_for_stack(code_lines, leaf, position): + # It might happen that we're on whitespace or on a comment. This means + # that we would not get the right leaf. + if leaf.start_pos >= position: + # If we're not on a comment simply get the previous leaf and proceed. + leaf = leaf.get_previous_leaf() + if leaf is None: + return '' # At the beginning of the file. + + is_after_newline = leaf.type == 'newline' + while leaf.type == 'newline': + leaf = leaf.get_previous_leaf() + if leaf is None: + return '' + + if leaf.type == 'error_leaf' or leaf.type == 'string': + if leaf.start_pos[0] < position[0]: + # On a different line, we just begin anew. + return '' + + # Error leafs cannot be parsed, completion in strings is also + # impossible. + raise OnErrorLeaf(leaf) + else: + user_stmt = leaf + while True: + if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'): + break + user_stmt = user_stmt.parent + + if is_after_newline: + if user_stmt.start_pos[1] > position[1]: + # This means that it's actually a dedent and that means that we + # start without value (part of a suite). + return '' + + # This is basically getting the relevant lines. + return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position) + + +def get_stack_at_position(grammar, code_lines, leaf, pos): + """ + Returns the possible node names (e.g. import_from, xor_test or yield_stmt). + """ + class EndMarkerReached(Exception): + pass + + def tokenize_without_endmarker(code): + # TODO This is for now not an official parso API that exists purely + # for Jedi. + tokens = grammar._tokenize(code) + for token in tokens: + if token.string == safeword: + raise EndMarkerReached() + elif token.prefix.endswith(safeword): + # This happens with comments. + raise EndMarkerReached() + elif token.string.endswith(safeword): + yield token # Probably an f-string literal that was not finished. + raise EndMarkerReached() + else: + yield token + + # The code might be indedented, just remove it. + code = dedent(_get_code_for_stack(code_lines, leaf, pos)) + # We use a word to tell Jedi when we have reached the start of the + # completion. + # Use Z as a prefix because it's not part of a number suffix. + safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI' + code = code + ' ' + safeword + + p = Parser(grammar._pgen_grammar, error_recovery=True) + try: + p.parse(tokens=tokenize_without_endmarker(code)) + except EndMarkerReached: + return p.stack + raise SystemError( + "This really shouldn't happen. There's a bug in Jedi:\n%s" + % list(tokenize_without_endmarker(code)) + ) + + +def infer(inference_state, context, leaf): + if leaf.type == 'name': + return inference_state.infer(context, leaf) + + parent = leaf.parent + definitions = NO_VALUES + if parent.type == 'atom': + # e.g. `(a + b)` + definitions = context.infer_node(leaf.parent) + elif parent.type == 'trailer': + # e.g. `a()` + definitions = infer_call_of_leaf(context, leaf) + elif isinstance(leaf, tree.Literal): + # e.g. `"foo"` or `1.0` + return infer_atom(context, leaf) + elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'): + return get_string_value_set(inference_state) + return definitions + + +def filter_follow_imports(names, follow_builtin_imports=False): + for name in names: + if name.is_import(): + new_names = list(filter_follow_imports( + name.goto(), + follow_builtin_imports=follow_builtin_imports, + )) + found_builtin = False + if follow_builtin_imports: + for new_name in new_names: + if new_name.start_pos is None: + found_builtin = True + + if found_builtin: + yield name + else: + yield from new_names + else: + yield name + + +class CallDetails: + def __init__(self, bracket_leaf, children, position): + self.bracket_leaf = bracket_leaf + self._children = children + self._position = position + + @property + def index(self): + return _get_index_and_key(self._children, self._position)[0] + + @property + def keyword_name_str(self): + return _get_index_and_key(self._children, self._position)[1] + + @memoize_method + def _list_arguments(self): + return list(_iter_arguments(self._children, self._position)) + + def calculate_index(self, param_names): + positional_count = 0 + used_names = set() + star_count = -1 + args = self._list_arguments() + if not args: + if param_names: + return 0 + else: + return None + + is_kwarg = False + for i, (star_count, key_start, had_equal) in enumerate(args): + is_kwarg |= had_equal | (star_count == 2) + if star_count: + pass # For now do nothing, we don't know what's in there here. + else: + if i + 1 != len(args): # Not last + if had_equal: + used_names.add(key_start) + else: + positional_count += 1 + + for i, param_name in enumerate(param_names): + kind = param_name.get_kind() + + if not is_kwarg: + if kind == Parameter.VAR_POSITIONAL: + return i + if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY): + if i == positional_count: + return i + + if key_start is not None and not star_count == 1 or star_count == 2: + if param_name.string_name not in used_names \ + and (kind == Parameter.KEYWORD_ONLY + or kind == Parameter.POSITIONAL_OR_KEYWORD + and positional_count <= i): + if star_count: + return i + if had_equal: + if param_name.string_name == key_start: + return i + else: + if param_name.string_name.startswith(key_start): + return i + + if kind == Parameter.VAR_KEYWORD: + return i + return None + + def iter_used_keyword_arguments(self): + for star_count, key_start, had_equal in list(self._list_arguments()): + if had_equal and key_start: + yield key_start + + def count_positional_arguments(self): + count = 0 + for star_count, key_start, had_equal in self._list_arguments()[:-1]: + if star_count or key_start: + break + count += 1 + return count + + +def _iter_arguments(nodes, position): + def remove_after_pos(name): + if name.type != 'name': + return None + return name.value[:position[1] - name.start_pos[1]] + + # Returns Generator[Tuple[star_count, Optional[key_start: str], had_equal]] + nodes_before = [c for c in nodes if c.start_pos < position] + if nodes_before[-1].type == 'arglist': + yield from _iter_arguments(nodes_before[-1].children, position) + return + + previous_node_yielded = False + stars_seen = 0 + for i, node in enumerate(nodes_before): + if node.type == 'argument': + previous_node_yielded = True + first = node.children[0] + second = node.children[1] + if second == '=': + if second.start_pos < position and first.type == 'name': + yield 0, first.value, True + else: + yield 0, remove_after_pos(first), False + elif first in ('*', '**'): + yield len(first.value), remove_after_pos(second), False + else: + # Must be a Comprehension + first_leaf = node.get_first_leaf() + if first_leaf.type == 'name' and first_leaf.start_pos >= position: + yield 0, remove_after_pos(first_leaf), False + else: + yield 0, None, False + stars_seen = 0 + elif node.type == 'testlist_star_expr': + for n in node.children[::2]: + if n.type == 'star_expr': + stars_seen = 1 + n = n.children[1] + yield stars_seen, remove_after_pos(n), False + stars_seen = 0 + # The count of children is even if there's a comma at the end. + previous_node_yielded = bool(len(node.children) % 2) + elif isinstance(node, tree.PythonLeaf) and node.value == ',': + if not previous_node_yielded: + yield stars_seen, '', False + stars_seen = 0 + previous_node_yielded = False + elif isinstance(node, tree.PythonLeaf) and node.value in ('*', '**'): + stars_seen = len(node.value) + elif node == '=' and nodes_before[-1]: + previous_node_yielded = True + before = nodes_before[i - 1] + if before.type == 'name': + yield 0, before.value, True + else: + yield 0, None, False + # Just ignore the star that is probably a syntax error. + stars_seen = 0 + + if not previous_node_yielded: + if nodes_before[-1].type == 'name': + yield stars_seen, remove_after_pos(nodes_before[-1]), False + else: + yield stars_seen, '', False + + +def _get_index_and_key(nodes, position): + """ + Returns the amount of commas and the keyword argument string. + """ + nodes_before = [c for c in nodes if c.start_pos < position] + if nodes_before[-1].type == 'arglist': + return _get_index_and_key(nodes_before[-1].children, position) + + key_str = None + + last = nodes_before[-1] + if last.type == 'argument' and last.children[1] == '=' \ + and last.children[1].end_pos <= position: + # Checked if the argument + key_str = last.children[0].value + elif last == '=': + key_str = nodes_before[-2].value + + return nodes_before.count(','), key_str + + +def _get_signature_details_from_error_node(node, additional_children, position): + for index, element in reversed(list(enumerate(node.children))): + # `index > 0` means that it's a trailer and not an atom. + if element == '(' and element.end_pos <= position and index > 0: + # It's an error node, we don't want to match too much, just + # until the parentheses is enough. + children = node.children[index:] + name = element.get_previous_leaf() + if name is None: + continue + if name.type == 'name' or name.parent.type in ('trailer', 'atom'): + return CallDetails(element, children + additional_children, position) + + +def get_signature_details(module, position): + leaf = module.get_leaf_for_position(position, include_prefixes=True) + # It's easier to deal with the previous token than the next one in this + # case. + if leaf.start_pos >= position: + # Whitespace / comments after the leaf count towards the previous leaf. + leaf = leaf.get_previous_leaf() + if leaf is None: + return None + + # Now that we know where we are in the syntax tree, we start to look at + # parents for possible function definitions. + node = leaf.parent + while node is not None: + if node.type in ('funcdef', 'classdef', 'decorated', 'async_stmt'): + # Don't show signatures if there's stuff before it that just + # makes it feel strange to have a signature. + return None + + additional_children = [] + for n in reversed(node.children): + if n.start_pos < position: + if n.type == 'error_node': + result = _get_signature_details_from_error_node( + n, additional_children, position + ) + if result is not None: + return result + + additional_children[0:0] = n.children + continue + additional_children.insert(0, n) + + # Find a valid trailer + if node.type == 'trailer' and node.children[0] == '(' \ + or node.type == 'decorator' and node.children[2] == '(': + # Additionally we have to check that an ending parenthesis isn't + # interpreted wrong. There are two cases: + # 1. Cursor before paren -> The current signature is good + # 2. Cursor after paren -> We need to skip the current signature + if not (leaf is node.children[-1] and position >= leaf.end_pos): + leaf = node.get_previous_leaf() + if leaf is None: + return None + return CallDetails( + node.children[0] if node.type == 'trailer' else node.children[2], + node.children, + position + ) + + node = node.parent + + return None + + +@signature_time_cache("call_signatures_validity") +def cache_signatures(inference_state, context, bracket_leaf, code_lines, user_pos): + """This function calculates the cache key.""" + line_index = user_pos[0] - 1 + + before_cursor = code_lines[line_index][:user_pos[1]] + other_lines = code_lines[bracket_leaf.start_pos[0]:line_index] + whole = ''.join(other_lines + [before_cursor]) + before_bracket = re.match(r'.*\(', whole, re.DOTALL) + + module_path = context.get_root_context().py__file__() + if module_path is None: + yield None # Don't cache! + else: + yield (module_path, before_bracket, bracket_leaf.start_pos) + yield infer( + inference_state, + context, + bracket_leaf.get_previous_leaf(), + ) + + +def validate_line_column(func): + @wraps(func) + def wrapper(self, line=None, column=None, *args, **kwargs): + line = max(len(self._code_lines), 1) if line is None else line + if not (0 < line <= len(self._code_lines)): + raise ValueError('`line` parameter is not in a valid range.') + + line_string = self._code_lines[line - 1] + line_len = len(line_string) + if line_string.endswith('\r\n'): + line_len -= 2 + elif line_string.endswith('\n'): + line_len -= 1 + + column = line_len if column is None else column + if not (0 <= column <= line_len): + raise ValueError('`column` parameter (%d) is not in a valid range ' + '(0-%d) for line %d (%r).' % ( + column, line_len, line, line_string)) + return func(self, line, column, *args, **kwargs) + return wrapper + + +def get_module_names(module, all_scopes, definitions=True, references=False): + """ + Returns a dictionary with name parts as keys and their call paths as + values. + """ + def def_ref_filter(name): + is_def = name.is_definition() + return definitions and is_def or references and not is_def + + names = list(chain.from_iterable(module.get_used_names().values())) + if not all_scopes: + # We have to filter all the names that don't have the module as a + # parent_scope. There's None as a parent, because nodes in the module + # node have the parent module and not suite as all the others. + # Therefore it's important to catch that case. + + def is_module_scope_name(name): + parent_scope = get_parent_scope(name) + # async functions have an extra wrapper. Strip it. + if parent_scope and parent_scope.type == 'async_stmt': + parent_scope = parent_scope.parent + return parent_scope in (module, None) + + names = [n for n in names if is_module_scope_name(n)] + return filter(def_ref_filter, names) + + +def split_search_string(name): + type, _, dotted_names = name.rpartition(' ') + if type == 'def': + type = 'function' + return type, dotted_names.split('.') diff --git a/lib/python3.10/site-packages/jedi/api/interpreter.py b/lib/python3.10/site-packages/jedi/api/interpreter.py new file mode 100644 index 0000000000000000000000000000000000000000..befafe5a46dca943ff1481bdc004aa2951e60434 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/interpreter.py @@ -0,0 +1,74 @@ +""" +TODO Some parts of this module are still not well documented. +""" + +from jedi.inference import compiled +from jedi.inference.base_value import ValueSet +from jedi.inference.filters import ParserTreeFilter, MergedFilter +from jedi.inference.names import TreeNameDefinition +from jedi.inference.compiled import mixed +from jedi.inference.compiled.access import create_access_path +from jedi.inference.context import ModuleContext + + +def _create(inference_state, obj): + return compiled.create_from_access_path( + inference_state, create_access_path(inference_state, obj) + ) + + +class NamespaceObject: + def __init__(self, dct): + self.__dict__ = dct + + +class MixedTreeName(TreeNameDefinition): + def infer(self): + """ + In IPython notebook it is typical that some parts of the code that is + provided was already executed. In that case if something is not properly + inferred, it should still infer from the variables it already knows. + """ + inferred = super().infer() + if not inferred: + for compiled_value in self.parent_context.mixed_values: + for f in compiled_value.get_filters(): + values = ValueSet.from_sets( + n.infer() for n in f.get(self.string_name) + ) + if values: + return values + return inferred + + +class MixedParserTreeFilter(ParserTreeFilter): + name_class = MixedTreeName + + +class MixedModuleContext(ModuleContext): + def __init__(self, tree_module_value, namespaces): + super().__init__(tree_module_value) + self.mixed_values = [ + self._get_mixed_object( + _create(self.inference_state, NamespaceObject(n)) + ) for n in namespaces + ] + + def _get_mixed_object(self, compiled_value): + return mixed.MixedObject( + compiled_value=compiled_value, + tree_value=self._value + ) + + def get_filters(self, until_position=None, origin_scope=None): + yield MergedFilter( + MixedParserTreeFilter( + parent_context=self, + until_position=until_position, + origin_scope=origin_scope + ), + self.get_global_filter(), + ) + + for mixed_object in self.mixed_values: + yield from mixed_object.get_filters(until_position, origin_scope) diff --git a/lib/python3.10/site-packages/jedi/api/keywords.py b/lib/python3.10/site-packages/jedi/api/keywords.py new file mode 100644 index 0000000000000000000000000000000000000000..80ff13c3c08f55be529061472c41f6d83240d3e4 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/keywords.py @@ -0,0 +1,51 @@ +import pydoc +from contextlib import suppress +from typing import Dict, Optional + +from jedi.inference.names import AbstractArbitraryName + +try: + from pydoc_data import topics + pydoc_topics: Optional[Dict[str, str]] = topics.topics +except ImportError: + # Python 3.6.8 embeddable does not have pydoc_data. + pydoc_topics = None + + +class KeywordName(AbstractArbitraryName): + api_type = 'keyword' + + def py__doc__(self): + return imitate_pydoc(self.string_name) + + +def imitate_pydoc(string): + """ + It's not possible to get the pydoc's without starting the annoying pager + stuff. + """ + if pydoc_topics is None: + return '' + + h = pydoc.help + with suppress(KeyError): + # try to access symbols + string = h.symbols[string] + string, _, related = string.partition(' ') + + def get_target(s): + return h.topics.get(s, h.keywords.get(s)) + + while isinstance(string, str): + string = get_target(string) + + try: + # is a tuple now + label, related = string + except TypeError: + return '' + + try: + return pydoc_topics[label].strip() if pydoc_topics else '' + except KeyError: + return '' diff --git a/lib/python3.10/site-packages/jedi/api/project.py b/lib/python3.10/site-packages/jedi/api/project.py new file mode 100644 index 0000000000000000000000000000000000000000..8927e7ea27276def440aa92c1ca7f371e405f46c --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/project.py @@ -0,0 +1,448 @@ +""" +Projects are a way to handle Python projects within Jedi. For simpler plugins +you might not want to deal with projects, but if you want to give the user more +flexibility to define sys paths and Python interpreters for a project, +:class:`.Project` is the perfect way to allow for that. + +Projects can be saved to disk and loaded again, to allow project definitions to +be used across repositories. +""" +import json +from pathlib import Path +from itertools import chain + +from jedi import debug +from jedi.api.environment import get_cached_default_environment, create_environment +from jedi.api.exceptions import WrongVersion +from jedi.api.completion import search_in_module +from jedi.api.helpers import split_search_string, get_module_names +from jedi.inference.imports import load_module_from_path, \ + load_namespace_from_path, iter_module_names +from jedi.inference.sys_path import discover_buildout_paths +from jedi.inference.cache import inference_state_as_method_param_cache +from jedi.inference.references import recurse_find_python_folders_and_files, search_in_file_ios +from jedi.file_io import FolderIO + +_CONFIG_FOLDER = '.jedi' +_CONTAINS_POTENTIAL_PROJECT = \ + 'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in', 'pyproject.toml' + +_SERIALIZER_VERSION = 1 + + +def _try_to_skip_duplicates(func): + def wrapper(*args, **kwargs): + found_tree_nodes = [] + found_modules = [] + for definition in func(*args, **kwargs): + tree_node = definition._name.tree_name + if tree_node is not None and tree_node in found_tree_nodes: + continue + if definition.type == 'module' and definition.module_path is not None: + if definition.module_path in found_modules: + continue + found_modules.append(definition.module_path) + yield definition + found_tree_nodes.append(tree_node) + return wrapper + + +def _remove_duplicates_from_path(path): + used = set() + for p in path: + if p in used: + continue + used.add(p) + yield p + + +class Project: + """ + Projects are a simple way to manage Python folders and define how Jedi does + import resolution. It is mostly used as a parameter to :class:`.Script`. + Additionally there are functions to search a whole project. + """ + _environment = None + + @staticmethod + def _get_config_folder_path(base_path): + return base_path.joinpath(_CONFIG_FOLDER) + + @staticmethod + def _get_json_path(base_path): + return Project._get_config_folder_path(base_path).joinpath('project.json') + + @classmethod + def load(cls, path): + """ + Loads a project from a specific path. You should not provide the path + to ``.jedi/project.json``, but rather the path to the project folder. + + :param path: The path of the directory you want to use as a project. + """ + if isinstance(path, str): + path = Path(path) + with open(cls._get_json_path(path)) as f: + version, data = json.load(f) + + if version == 1: + return cls(**data) + else: + raise WrongVersion( + "The Jedi version of this project seems newer than what we can handle." + ) + + def save(self): + """ + Saves the project configuration in the project in ``.jedi/project.json``. + """ + data = dict(self.__dict__) + data.pop('_environment', None) + data.pop('_django', None) # TODO make django setting public? + data = {k.lstrip('_'): v for k, v in data.items()} + data['path'] = str(data['path']) + + self._get_config_folder_path(self._path).mkdir(parents=True, exist_ok=True) + with open(self._get_json_path(self._path), 'w') as f: + return json.dump((_SERIALIZER_VERSION, data), f) + + def __init__( + self, + path, + *, + environment_path=None, + load_unsafe_extensions=False, + sys_path=None, + added_sys_path=(), + smart_sys_path=True, + ) -> None: + """ + :param path: The base path for this project. + :param environment_path: The Python executable path, typically the path + of a virtual environment. + :param load_unsafe_extensions: Default False, Loads extensions that are not in the + sys path and in the local directories. With this option enabled, + this is potentially unsafe if you clone a git repository and + analyze it's code, because those compiled extensions will be + important and therefore have execution privileges. + :param sys_path: list of str. You can override the sys path if you + want. By default the ``sys.path.`` is generated by the + environment (virtualenvs, etc). + :param added_sys_path: list of str. Adds these paths at the end of the + sys path. + :param smart_sys_path: If this is enabled (default), adds paths from + local directories. Otherwise you will have to rely on your packages + being properly configured on the ``sys.path``. + """ + + if isinstance(path, str): + path = Path(path).absolute() + self._path = path + + self._environment_path = environment_path + if sys_path is not None: + # Remap potential pathlib.Path entries + sys_path = list(map(str, sys_path)) + self._sys_path = sys_path + self._smart_sys_path = smart_sys_path + self._load_unsafe_extensions = load_unsafe_extensions + self._django = False + # Remap potential pathlib.Path entries + self.added_sys_path = list(map(str, added_sys_path)) + """The sys path that is going to be added at the end of the """ + + @property + def path(self): + """ + The base path for this project. + """ + return self._path + + @property + def sys_path(self): + """ + The sys path provided to this project. This can be None and in that + case will be auto generated. + """ + return self._sys_path + + @property + def smart_sys_path(self): + """ + If the sys path is going to be calculated in a smart way, where + additional paths are added. + """ + return self._smart_sys_path + + @property + def load_unsafe_extensions(self): + """ + Wheter the project loads unsafe extensions. + """ + return self._load_unsafe_extensions + + @inference_state_as_method_param_cache() + def _get_base_sys_path(self, inference_state): + # The sys path has not been set explicitly. + sys_path = list(inference_state.environment.get_sys_path()) + try: + sys_path.remove('') + except ValueError: + pass + return sys_path + + @inference_state_as_method_param_cache() + def _get_sys_path(self, inference_state, add_parent_paths=True, add_init_paths=False): + """ + Keep this method private for all users of jedi. However internally this + one is used like a public method. + """ + suffixed = list(self.added_sys_path) + prefixed = [] + + if self._sys_path is None: + sys_path = list(self._get_base_sys_path(inference_state)) + else: + sys_path = list(self._sys_path) + + if self._smart_sys_path: + prefixed.append(str(self._path)) + + if inference_state.script_path is not None: + suffixed += map(str, discover_buildout_paths( + inference_state, + inference_state.script_path + )) + + if add_parent_paths: + # Collect directories in upward search by: + # 1. Skipping directories with __init__.py + # 2. Stopping immediately when above self._path + traversed = [] + for parent_path in inference_state.script_path.parents: + if parent_path == self._path \ + or self._path not in parent_path.parents: + break + if not add_init_paths \ + and parent_path.joinpath("__init__.py").is_file(): + continue + traversed.append(str(parent_path)) + + # AFAIK some libraries have imports like `foo.foo.bar`, which + # leads to the conclusion to by default prefer longer paths + # rather than shorter ones by default. + suffixed += reversed(traversed) + + if self._django: + prefixed.append(str(self._path)) + + path = prefixed + sys_path + suffixed + return list(_remove_duplicates_from_path(path)) + + def get_environment(self): + if self._environment is None: + if self._environment_path is not None: + self._environment = create_environment(self._environment_path, safe=False) + else: + self._environment = get_cached_default_environment() + return self._environment + + def search(self, string, *, all_scopes=False): + """ + Searches a name in the whole project. If the project is very big, + at some point Jedi will stop searching. However it's also very much + recommended to not exhaust the generator. Just display the first ten + results to the user. + + There are currently three different search patterns: + + - ``foo`` to search for a definition foo in any file or a file called + ``foo.py`` or ``foo.pyi``. + - ``foo.bar`` to search for the ``foo`` and then an attribute ``bar`` + in it. + - ``class foo.bar.Bar`` or ``def foo.bar.baz`` to search for a specific + API type. + + :param bool all_scopes: Default False; searches not only for + definitions on the top level of a module level, but also in + functions and classes. + :yields: :class:`.Name` + """ + return self._search_func(string, all_scopes=all_scopes) + + def complete_search(self, string, **kwargs): + """ + Like :meth:`.Script.search`, but completes that string. An empty string + lists all definitions in a project, so be careful with that. + + :param bool all_scopes: Default False; searches not only for + definitions on the top level of a module level, but also in + functions and classes. + :yields: :class:`.Completion` + """ + return self._search_func(string, complete=True, **kwargs) + + @_try_to_skip_duplicates + def _search_func(self, string, complete=False, all_scopes=False): + # Using a Script is they easiest way to get an empty module context. + from jedi import Script + s = Script('', project=self) + inference_state = s._inference_state + empty_module_context = s._get_module_context() + + debug.dbg('Search for string %s, complete=%s', string, complete) + wanted_type, wanted_names = split_search_string(string) + name = wanted_names[0] + stub_folder_name = name + '-stubs' + + ios = recurse_find_python_folders_and_files(FolderIO(str(self._path))) + file_ios = [] + + # 1. Search for modules in the current project + for folder_io, file_io in ios: + if file_io is None: + file_name = folder_io.get_base_name() + if file_name == name or file_name == stub_folder_name: + f = folder_io.get_file_io('__init__.py') + try: + m = load_module_from_path(inference_state, f).as_context() + except FileNotFoundError: + f = folder_io.get_file_io('__init__.pyi') + try: + m = load_module_from_path(inference_state, f).as_context() + except FileNotFoundError: + m = load_namespace_from_path(inference_state, folder_io).as_context() + else: + continue + else: + file_ios.append(file_io) + if Path(file_io.path).name in (name + '.py', name + '.pyi'): + m = load_module_from_path(inference_state, file_io).as_context() + else: + continue + + debug.dbg('Search of a specific module %s', m) + yield from search_in_module( + inference_state, + m, + names=[m.name], + wanted_type=wanted_type, + wanted_names=wanted_names, + complete=complete, + convert=True, + ignore_imports=True, + ) + + # 2. Search for identifiers in the project. + for module_context in search_in_file_ios(inference_state, file_ios, + name, complete=complete): + names = get_module_names(module_context.tree_node, all_scopes=all_scopes) + names = [module_context.create_name(n) for n in names] + names = _remove_imports(names) + yield from search_in_module( + inference_state, + module_context, + names=names, + wanted_type=wanted_type, + wanted_names=wanted_names, + complete=complete, + ignore_imports=True, + ) + + # 3. Search for modules on sys.path + sys_path = [ + p for p in self._get_sys_path(inference_state) + # Exclude the current folder which is handled by recursing the folders. + if p != self._path + ] + names = list(iter_module_names(inference_state, empty_module_context, sys_path)) + yield from search_in_module( + inference_state, + empty_module_context, + names=names, + wanted_type=wanted_type, + wanted_names=wanted_names, + complete=complete, + convert=True, + ) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._path) + + +def _is_potential_project(path): + for name in _CONTAINS_POTENTIAL_PROJECT: + try: + if path.joinpath(name).exists(): + return True + except OSError: + continue + return False + + +def _is_django_path(directory): + """ Detects the path of the very well known Django library (if used) """ + try: + with open(directory.joinpath('manage.py'), 'rb') as f: + return b"DJANGO_SETTINGS_MODULE" in f.read() + except (FileNotFoundError, IsADirectoryError, PermissionError): + return False + + +def get_default_project(path=None): + """ + If a project is not defined by the user, Jedi tries to define a project by + itself as well as possible. Jedi traverses folders until it finds one of + the following: + + 1. A ``.jedi/config.json`` + 2. One of the following files: ``setup.py``, ``.git``, ``.hg``, + ``requirements.txt`` and ``MANIFEST.in``. + """ + if path is None: + path = Path.cwd() + elif isinstance(path, str): + path = Path(path) + + check = path.absolute() + probable_path = None + first_no_init_file = None + for dir in chain([check], check.parents): + try: + return Project.load(dir) + except (FileNotFoundError, IsADirectoryError, PermissionError): + pass + except NotADirectoryError: + continue + + if first_no_init_file is None: + if dir.joinpath('__init__.py').exists(): + # In the case that a __init__.py exists, it's in 99% just a + # Python package and the project sits at least one level above. + continue + elif not dir.is_file(): + first_no_init_file = dir + + if _is_django_path(dir): + project = Project(dir) + project._django = True + return project + + if probable_path is None and _is_potential_project(dir): + probable_path = dir + + if probable_path is not None: + return Project(probable_path) + + if first_no_init_file is not None: + return Project(first_no_init_file) + + curdir = path if path.is_dir() else path.parent + return Project(curdir) + + +def _remove_imports(names): + return [ + n for n in names + if n.tree_name is None or n.api_type not in ('module', 'namespace') + ] diff --git a/lib/python3.10/site-packages/jedi/api/replstartup.py b/lib/python3.10/site-packages/jedi/api/replstartup.py new file mode 100644 index 0000000000000000000000000000000000000000..e0f23d19b71082fad3b92cc760b069dce92faa21 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/replstartup.py @@ -0,0 +1,29 @@ +""" +To use Jedi completion in Python interpreter, add the following in your shell +setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is +not available on Windows. If you still want Jedi autocompletion in your REPL, +just use IPython instead:: + + export PYTHONSTARTUP="$(python -m jedi repl)" + +Then you will be able to use Jedi completer in your Python interpreter:: + + $ python + Python 3.9.2+ (default, Jul 20 2020, 22:15:08) + [GCC 4.6.1] on linux2 + Type "help", "copyright", "credits" or "license" for more information. + >>> import os + >>> os.path.join('a', 'b').split().in # doctest: +SKIP + ..dex ..sert + +""" +import jedi.utils +from jedi import __version__ as __jedi_version__ + +print('REPL completion using Jedi %s' % __jedi_version__) +jedi.utils.setup_readline(fuzzy=False) + +del jedi + +# Note: try not to do many things here, as it will contaminate global +# namespace of the interpreter. diff --git a/lib/python3.10/site-packages/jedi/api/strings.py b/lib/python3.10/site-packages/jedi/api/strings.py new file mode 100644 index 0000000000000000000000000000000000000000..7850142c5e1470a0c6c2a3313f205defbd35fb85 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/api/strings.py @@ -0,0 +1,111 @@ +""" +This module is here for string completions. This means mostly stuff where +strings are returned, like `foo = dict(bar=3); foo["ba` would complete to +`"bar"]`. + +It however does the same for numbers. The difference between string completions +and other completions is mostly that this module doesn't return defined +names in a module, but pretty much an arbitrary string. +""" +import re + +from jedi.inference.names import AbstractArbitraryName +from jedi.inference.helpers import infer_call_of_leaf +from jedi.api.classes import Completion +from jedi.parser_utils import cut_value_at_position + +_sentinel = object() + + +class StringName(AbstractArbitraryName): + api_type = 'string' + is_value_name = False + + +def complete_dict(module_context, code_lines, leaf, position, string, fuzzy): + bracket_leaf = leaf + if bracket_leaf != '[': + bracket_leaf = leaf.get_previous_leaf() + + cut_end_quote = '' + if string: + cut_end_quote = get_quote_ending(string, code_lines, position, invert_result=True) + + if bracket_leaf == '[': + if string is None and leaf is not bracket_leaf: + string = cut_value_at_position(leaf, position) + + context = module_context.create_context(bracket_leaf) + + before_node = before_bracket_leaf = bracket_leaf.get_previous_leaf() + if before_node in (')', ']', '}'): + before_node = before_node.parent + if before_node.type in ('atom', 'trailer', 'name'): + values = infer_call_of_leaf(context, before_bracket_leaf) + return list(_completions_for_dicts( + module_context.inference_state, + values, + '' if string is None else string, + cut_end_quote, + fuzzy=fuzzy, + )) + return [] + + +def _completions_for_dicts(inference_state, dicts, literal_string, cut_end_quote, fuzzy): + for dict_key in sorted(_get_python_keys(dicts), key=lambda x: repr(x)): + dict_key_str = _create_repr_string(literal_string, dict_key) + if dict_key_str.startswith(literal_string): + name = StringName(inference_state, dict_key_str[:-len(cut_end_quote) or None]) + yield Completion( + inference_state, + name, + stack=None, + like_name_length=len(literal_string), + is_fuzzy=fuzzy + ) + + +def _create_repr_string(literal_string, dict_key): + if not isinstance(dict_key, (str, bytes)) or not literal_string: + return repr(dict_key) + + r = repr(dict_key) + prefix, quote = _get_string_prefix_and_quote(literal_string) + if quote is None: + return r + if quote == r[0]: + return prefix + r + return prefix + quote + r[1:-1] + quote + + +def _get_python_keys(dicts): + for dct in dicts: + if dct.array_type == 'dict': + for key in dct.get_key_values(): + dict_key = key.get_safe_value(default=_sentinel) + if dict_key is not _sentinel: + yield dict_key + + +def _get_string_prefix_and_quote(string): + match = re.match(r'(\w*)("""|\'{3}|"|\')', string) + if match is None: + return None, None + return match.group(1), match.group(2) + + +def _matches_quote_at_position(code_lines, quote, position): + string = code_lines[position[0] - 1][position[1]:position[1] + len(quote)] + return string == quote + + +def get_quote_ending(string, code_lines, position, invert_result=False): + _, quote = _get_string_prefix_and_quote(string) + if quote is None: + return '' + + # Add a quote only if it's not already there. + if _matches_quote_at_position(code_lines, quote, position) != invert_result: + return '' + return quote diff --git a/lib/python3.10/site-packages/jedi/inference/__init__.py b/lib/python3.10/site-packages/jedi/inference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd31cbd398a930f72e42148c6e14ec972c2b3f86 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/__init__.py @@ -0,0 +1,199 @@ +""" +Type inference of Python code in |jedi| is based on three assumptions: + +* The code uses as least side effects as possible. Jedi understands certain + list/tuple/set modifications, but there's no guarantee that Jedi detects + everything (list.append in different modules for example). +* No magic is being used: + + - metaclasses + - ``setattr()`` / ``__import__()`` + - writing to ``globals()``, ``locals()``, ``object.__dict__`` +* The programmer is not a total dick, e.g. like `this + `_ :-) + +The actual algorithm is based on a principle I call lazy type inference. That +said, the typical entry point for static analysis is calling +``infer_expr_stmt``. There's separate logic for autocompletion in the API, the +inference_state is all about inferring an expression. + +TODO this paragraph is not what jedi does anymore, it's similar, but not the +same. + +Now you need to understand what follows after ``infer_expr_stmt``. Let's +make an example:: + + import datetime + datetime.date.toda# <-- cursor here + +First of all, this module doesn't care about completion. It really just cares +about ``datetime.date``. At the end of the procedure ``infer_expr_stmt`` will +return the ``date`` class. + +To *visualize* this (simplified): + +- ``InferenceState.infer_expr_stmt`` doesn't do much, because there's no assignment. +- ``Context.infer_node`` cares for resolving the dotted path +- ``InferenceState.find_types`` searches for global definitions of datetime, which + it finds in the definition of an import, by scanning the syntax tree. +- Using the import logic, the datetime module is found. +- Now ``find_types`` is called again by ``infer_node`` to find ``date`` + inside the datetime module. + +Now what would happen if we wanted ``datetime.date.foo.bar``? Two more +calls to ``find_types``. However the second call would be ignored, because the +first one would return nothing (there's no foo attribute in ``date``). + +What if the import would contain another ``ExprStmt`` like this:: + + from foo import bar + Date = bar.baz + +Well... You get it. Just another ``infer_expr_stmt`` recursion. It's really +easy. Python can obviously get way more complicated then this. To understand +tuple assignments, list comprehensions and everything else, a lot more code had +to be written. + +Jedi has been tested very well, so you can just start modifying code. It's best +to write your own test first for your "new" feature. Don't be scared of +breaking stuff. As long as the tests pass, you're most likely to be fine. + +I need to mention now that lazy type inference is really good because it +only *inferes* what needs to be *inferred*. All the statements and modules +that are not used are just being ignored. +""" +import parso +from jedi.file_io import FileIO + +from jedi import debug +from jedi import settings +from jedi.inference import imports +from jedi.inference import recursion +from jedi.inference.cache import inference_state_function_cache +from jedi.inference import helpers +from jedi.inference.names import TreeNameDefinition +from jedi.inference.base_value import ContextualizedNode, \ + ValueSet, iterate_values +from jedi.inference.value import ClassValue, FunctionValue +from jedi.inference.syntax_tree import infer_expr_stmt, \ + check_tuple_assignments, tree_name_to_values +from jedi.inference.imports import follow_error_node_imports_if_possible +from jedi.plugins import plugin_manager + + +class InferenceState: + def __init__(self, project, environment=None, script_path=None): + if environment is None: + environment = project.get_environment() + self.environment = environment + self.script_path = script_path + self.compiled_subprocess = environment.get_inference_state_subprocess(self) + self.grammar = environment.get_grammar() + + self.latest_grammar = parso.load_grammar(version='3.13') + self.memoize_cache = {} # for memoize decorators + self.module_cache = imports.ModuleCache() # does the job of `sys.modules`. + self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleValue]] + self.compiled_cache = {} # see `inference.compiled.create()` + self.inferred_element_counts = {} + self.mixed_cache = {} # see `inference.compiled.mixed._create()` + self.analysis = [] + self.dynamic_params_depth = 0 + self.do_dynamic_params_search = settings.dynamic_params + self.is_analysis = False + self.project = project + self.access_cache = {} + self.allow_unsafe_executions = False + self.flow_analysis_enabled = True + + self.reset_recursion_limitations() + + def import_module(self, import_names, sys_path=None, prefer_stubs=True): + return imports.import_module_by_names( + self, import_names, sys_path, prefer_stubs=prefer_stubs) + + @staticmethod + @plugin_manager.decorate() + def execute(value, arguments): + debug.dbg('execute: %s %s', value, arguments) + with debug.increase_indent_cm(): + value_set = value.py__call__(arguments=arguments) + debug.dbg('execute result: %s in %s', value_set, value) + return value_set + + # mypy doesn't suppport decorated propeties (https://github.com/python/mypy/issues/1362) + @property # type: ignore[misc] + @inference_state_function_cache() + def builtins_module(self): + module_name = 'builtins' + builtins_module, = self.import_module((module_name,), sys_path=[]) + return builtins_module + + @property # type: ignore[misc] + @inference_state_function_cache() + def typing_module(self): + typing_module, = self.import_module(('typing',)) + return typing_module + + def reset_recursion_limitations(self): + self.recursion_detector = recursion.RecursionDetector() + self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self) + + def get_sys_path(self, **kwargs): + """Convenience function""" + return self.project._get_sys_path(self, **kwargs) + + def infer(self, context, name): + def_ = name.get_definition(import_name_always=True) + if def_ is not None: + type_ = def_.type + is_classdef = type_ == 'classdef' + if is_classdef or type_ == 'funcdef': + if is_classdef: + c = ClassValue(self, context, name.parent) + else: + c = FunctionValue.from_context(context, name.parent) + return ValueSet([c]) + + if type_ == 'expr_stmt': + is_simple_name = name.parent.type not in ('power', 'trailer') + if is_simple_name: + return infer_expr_stmt(context, def_, name) + if type_ == 'for_stmt': + container_types = context.infer_node(def_.children[3]) + cn = ContextualizedNode(context, def_.children[3]) + for_types = iterate_values(container_types, cn) + n = TreeNameDefinition(context, name) + return check_tuple_assignments(n, for_types) + if type_ in ('import_from', 'import_name'): + return imports.infer_import(context, name) + if type_ == 'with_stmt': + return tree_name_to_values(self, context, name) + elif type_ == 'param': + return context.py__getattribute__(name.value, position=name.end_pos) + elif type_ == 'namedexpr_test': + return context.infer_node(def_) + else: + result = follow_error_node_imports_if_possible(context, name) + if result is not None: + return result + + return helpers.infer_call_of_leaf(context, name) + + def parse_and_get_code(self, code=None, path=None, + use_latest_grammar=False, file_io=None, **kwargs): + if code is None: + if file_io is None: + file_io = FileIO(path) + code = file_io.read() + # We cannot just use parso, because it doesn't use errors='replace'. + code = parso.python_bytes_to_unicode(code, encoding='utf-8', errors='replace') + + if len(code) > settings._cropped_file_size: + code = code[:settings._cropped_file_size] + + grammar = self.latest_grammar if use_latest_grammar else self.grammar + return grammar.parse(code=code, path=path, file_io=file_io, **kwargs), code + + def parse(self, *args, **kwargs): + return self.parse_and_get_code(*args, **kwargs)[0] diff --git a/lib/python3.10/site-packages/jedi/inference/analysis.py b/lib/python3.10/site-packages/jedi/inference/analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..c272a9cb86a1f2522d9563fa6a7e2e6af8344049 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/analysis.py @@ -0,0 +1,213 @@ +""" +Module for statical analysis. +""" +from parso.python import tree + +from jedi import debug +from jedi.inference.helpers import is_string + + +CODES = { + 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), + 'name-error': (2, NameError, 'Potential NameError.'), + 'import-error': (3, ImportError, 'Potential ImportError.'), + 'type-error-too-many-arguments': (4, TypeError, None), + 'type-error-too-few-arguments': (5, TypeError, None), + 'type-error-keyword-argument': (6, TypeError, None), + 'type-error-multiple-values': (7, TypeError, None), + 'type-error-star-star': (8, TypeError, None), + 'type-error-star': (9, TypeError, None), + 'type-error-operation': (10, TypeError, None), + 'type-error-not-iterable': (11, TypeError, None), + 'type-error-isinstance': (12, TypeError, None), + 'type-error-not-subscriptable': (13, TypeError, None), + 'value-error-too-many-values': (14, ValueError, None), + 'value-error-too-few-values': (15, ValueError, None), +} + + +class Error: + def __init__(self, name, module_path, start_pos, message=None): + self.path = module_path + self._start_pos = start_pos + self.name = name + if message is None: + message = CODES[self.name][2] + self.message = message + + @property + def line(self): + return self._start_pos[0] + + @property + def column(self): + return self._start_pos[1] + + @property + def code(self): + # The class name start + first = self.__class__.__name__[0] + return first + str(CODES[self.name][0]) + + def __str__(self): + return '%s:%s:%s: %s %s' % (self.path, self.line, self.column, + self.code, self.message) + + def __eq__(self, other): + return (self.path == other.path and self.name == other.name + and self._start_pos == other._start_pos) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash((self.path, self._start_pos, self.name)) + + def __repr__(self): + return '<%s %s: %s@%s,%s>' % (self.__class__.__name__, + self.name, self.path, + self._start_pos[0], self._start_pos[1]) + + +class Warning(Error): + pass + + +def add(node_context, error_name, node, message=None, typ=Error, payload=None): + exception = CODES[error_name][1] + if _check_for_exception_catch(node_context, node, exception, payload): + return + + # TODO this path is probably not right + module_context = node_context.get_root_context() + module_path = module_context.py__file__() + issue_instance = typ(error_name, module_path, node.start_pos, message) + debug.warning(str(issue_instance), format=False) + node_context.inference_state.analysis.append(issue_instance) + return issue_instance + + +def _check_for_setattr(instance): + """ + Check if there's any setattr method inside an instance. If so, return True. + """ + module = instance.get_root_context() + node = module.tree_node + if node is None: + # If it's a compiled module or doesn't have a tree_node + return False + + try: + stmt_names = node.get_used_names()['setattr'] + except KeyError: + return False + + return any(node.start_pos < n.start_pos < node.end_pos + # Check if it's a function called setattr. + and not (n.parent.type == 'funcdef' and n.parent.name == n) + for n in stmt_names) + + +def add_attribute_error(name_context, lookup_value, name): + message = ('AttributeError: %s has no attribute %s.' % (lookup_value, name)) + # Check for __getattr__/__getattribute__ existance and issue a warning + # instead of an error, if that happens. + typ = Error + if lookup_value.is_instance() and not lookup_value.is_compiled(): + # TODO maybe make a warning for __getattr__/__getattribute__ + + if _check_for_setattr(lookup_value): + typ = Warning + + payload = lookup_value, name + add(name_context, 'attribute-error', name, message, typ, payload) + + +def _check_for_exception_catch(node_context, jedi_name, exception, payload=None): + """ + Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and + doesn't count as an error (if equal to `exception`). + Also checks `hasattr` for AttributeErrors and uses the `payload` to compare + it. + Returns True if the exception was catched. + """ + def check_match(cls, exception): + if not cls.is_class(): + return False + + for python_cls in exception.mro(): + if cls.py__name__() == python_cls.__name__ \ + and cls.parent_context.is_builtins_module(): + return True + return False + + def check_try_for_except(obj, exception): + # Only nodes in try + iterator = iter(obj.children) + for branch_type in iterator: + next(iterator) # The colon + suite = next(iterator) + if branch_type == 'try' \ + and not (branch_type.start_pos < jedi_name.start_pos <= suite.end_pos): + return False + + for node in obj.get_except_clause_tests(): + if node is None: + return True # An exception block that catches everything. + else: + except_classes = node_context.infer_node(node) + for cls in except_classes: + from jedi.inference.value import iterable + if isinstance(cls, iterable.Sequence) and \ + cls.array_type == 'tuple': + # multiple exceptions + for lazy_value in cls.py__iter__(): + for typ in lazy_value.infer(): + if check_match(typ, exception): + return True + else: + if check_match(cls, exception): + return True + + def check_hasattr(node, suite): + try: + assert suite.start_pos <= jedi_name.start_pos < suite.end_pos + assert node.type in ('power', 'atom_expr') + base = node.children[0] + assert base.type == 'name' and base.value == 'hasattr' + trailer = node.children[1] + assert trailer.type == 'trailer' + arglist = trailer.children[1] + assert arglist.type == 'arglist' + from jedi.inference.arguments import TreeArguments + args = TreeArguments(node_context.inference_state, node_context, arglist) + unpacked_args = list(args.unpack()) + # Arguments should be very simple + assert len(unpacked_args) == 2 + + # Check name + key, lazy_value = unpacked_args[1] + names = list(lazy_value.infer()) + assert len(names) == 1 and is_string(names[0]) + assert names[0].get_safe_value() == payload[1].value + + # Check objects + key, lazy_value = unpacked_args[0] + objects = lazy_value.infer() + return payload[0] in objects + except AssertionError: + return False + + obj = jedi_name + while obj is not None and not isinstance(obj, (tree.Function, tree.Class)): + if isinstance(obj, tree.Flow): + # try/except catch check + if obj.type == 'try_stmt' and check_try_for_except(obj, exception): + return True + # hasattr check + if exception == AttributeError and obj.type in ('if_stmt', 'while_stmt'): + if check_hasattr(obj.children[1], obj.children[3]): + return True + obj = obj.parent + + return False diff --git a/lib/python3.10/site-packages/jedi/inference/arguments.py b/lib/python3.10/site-packages/jedi/inference/arguments.py new file mode 100644 index 0000000000000000000000000000000000000000..8602f494d8bd3dcfb46bcd9f7ad4e4341f29a954 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/arguments.py @@ -0,0 +1,335 @@ +import re +from itertools import zip_longest + +from parso.python import tree + +from jedi import debug +from jedi.inference.utils import PushBackIterator +from jedi.inference import analysis +from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \ + LazyTreeValue, get_merged_lazy_value +from jedi.inference.names import ParamName, TreeNameDefinition, AnonymousParamName +from jedi.inference.base_value import NO_VALUES, ValueSet, ContextualizedNode +from jedi.inference.value import iterable +from jedi.inference.cache import inference_state_as_method_param_cache + + +def try_iter_content(types, depth=0): + """Helper method for static analysis.""" + if depth > 10: + # It's possible that a loop has references on itself (especially with + # CompiledValue). Therefore don't loop infinitely. + return + + for typ in types: + try: + f = typ.py__iter__ + except AttributeError: + pass + else: + for lazy_value in f(): + try_iter_content(lazy_value.infer(), depth + 1) + + +class ParamIssue(Exception): + pass + + +def repack_with_argument_clinic(clinic_string): + """ + Transforms a function or method with arguments to the signature that is + given as an argument clinic notation. + + Argument clinic is part of CPython and used for all the functions that are + implemented in C (Python 3.7): + + str.split.__text_signature__ + # Results in: '($self, /, sep=None, maxsplit=-1)' + """ + def decorator(func): + def wrapper(value, arguments): + try: + args = tuple(iterate_argument_clinic( + value.inference_state, + arguments, + clinic_string, + )) + except ParamIssue: + return NO_VALUES + else: + return func(value, *args) + + return wrapper + return decorator + + +def iterate_argument_clinic(inference_state, arguments, clinic_string): + """Uses a list with argument clinic information (see PEP 436).""" + clinic_args = list(_parse_argument_clinic(clinic_string)) + + iterator = PushBackIterator(arguments.unpack()) + for i, (name, optional, allow_kwargs, stars) in enumerate(clinic_args): + if stars == 1: + lazy_values = [] + for key, argument in iterator: + if key is not None: + iterator.push_back((key, argument)) + break + + lazy_values.append(argument) + yield ValueSet([iterable.FakeTuple(inference_state, lazy_values)]) + lazy_values + continue + elif stars == 2: + raise NotImplementedError() + key, argument = next(iterator, (None, None)) + if key is not None: + debug.warning('Keyword arguments in argument clinic are currently not supported.') + raise ParamIssue + if argument is None and not optional: + debug.warning('TypeError: %s expected at least %s arguments, got %s', + name, len(clinic_args), i) + raise ParamIssue + + value_set = NO_VALUES if argument is None else argument.infer() + + if not value_set and not optional: + # For the stdlib we always want values. If we don't get them, + # that's ok, maybe something is too hard to resolve, however, + # we will not proceed with the type inference of that function. + debug.warning('argument_clinic "%s" not resolvable.', name) + raise ParamIssue + yield value_set + + +def _parse_argument_clinic(string): + allow_kwargs = False + optional = False + while string: + # Optional arguments have to begin with a bracket. And should always be + # at the end of the arguments. This is therefore not a proper argument + # clinic implementation. `range()` for exmple allows an optional start + # value at the beginning. + match = re.match(r'(?:(?:(\[),? ?|, ?|)(\**\w+)|, ?/)\]*', string) + string = string[len(match.group(0)):] + if not match.group(2): # A slash -> allow named arguments + allow_kwargs = True + continue + optional = optional or bool(match.group(1)) + word = match.group(2) + stars = word.count('*') + word = word[stars:] + yield (word, optional, allow_kwargs, stars) + if stars: + allow_kwargs = True + + +class _AbstractArgumentsMixin: + def unpack(self, funcdef=None): + raise NotImplementedError + + def get_calling_nodes(self): + return [] + + +class AbstractArguments(_AbstractArgumentsMixin): + context = None + argument_node = None + trailer = None + + +def unpack_arglist(arglist): + if arglist is None: + return + + if arglist.type != 'arglist' and not ( + arglist.type == 'argument' and arglist.children[0] in ('*', '**')): + yield 0, arglist + return + + iterator = iter(arglist.children) + for child in iterator: + if child == ',': + continue + elif child in ('*', '**'): + c = next(iterator, None) + assert c is not None + yield len(child.value), c + elif child.type == 'argument' and \ + child.children[0] in ('*', '**'): + assert len(child.children) == 2 + yield len(child.children[0].value), child.children[1] + else: + yield 0, child + + +class TreeArguments(AbstractArguments): + def __init__(self, inference_state, context, argument_node, trailer=None): + """ + :param argument_node: May be an argument_node or a list of nodes. + """ + self.argument_node = argument_node + self.context = context + self._inference_state = inference_state + self.trailer = trailer # Can be None, e.g. in a class definition. + + @classmethod + @inference_state_as_method_param_cache() + def create_cached(cls, *args, **kwargs): + return cls(*args, **kwargs) + + def unpack(self, funcdef=None): + named_args = [] + for star_count, el in unpack_arglist(self.argument_node): + if star_count == 1: + arrays = self.context.infer_node(el) + iterators = [_iterate_star_args(self.context, a, el, funcdef) + for a in arrays] + for values in list(zip_longest(*iterators)): + yield None, get_merged_lazy_value( + [v for v in values if v is not None] + ) + elif star_count == 2: + arrays = self.context.infer_node(el) + for dct in arrays: + yield from _star_star_dict(self.context, dct, el, funcdef) + else: + if el.type == 'argument': + c = el.children + if len(c) == 3: # Keyword argument. + named_args.append((c[0].value, LazyTreeValue(self.context, c[2]),)) + else: # Generator comprehension. + # Include the brackets with the parent. + sync_comp_for = el.children[1] + if sync_comp_for.type == 'comp_for': + sync_comp_for = sync_comp_for.children[1] + comp = iterable.GeneratorComprehension( + self._inference_state, + defining_context=self.context, + sync_comp_for_node=sync_comp_for, + entry_node=el.children[0], + ) + yield None, LazyKnownValue(comp) + else: + yield None, LazyTreeValue(self.context, el) + + # Reordering arguments is necessary, because star args sometimes appear + # after named argument, but in the actual order it's prepended. + yield from named_args + + def _as_tree_tuple_objects(self): + for star_count, argument in unpack_arglist(self.argument_node): + default = None + if argument.type == 'argument': + if len(argument.children) == 3: # Keyword argument. + argument, default = argument.children[::2] + yield argument, default, star_count + + def iter_calling_names_with_star(self): + for name, default, star_count in self._as_tree_tuple_objects(): + # TODO this function is a bit strange. probably refactor? + if not star_count or not isinstance(name, tree.Name): + continue + + yield TreeNameDefinition(self.context, name) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.argument_node) + + def get_calling_nodes(self): + old_arguments_list = [] + arguments = self + + while arguments not in old_arguments_list: + if not isinstance(arguments, TreeArguments): + break + + old_arguments_list.append(arguments) + for calling_name in reversed(list(arguments.iter_calling_names_with_star())): + names = calling_name.goto() + if len(names) != 1: + break + if isinstance(names[0], AnonymousParamName): + # Dynamic parameters should not have calling nodes, because + # they are dynamic and extremely random. + return [] + if not isinstance(names[0], ParamName): + break + executed_param_name = names[0].get_executed_param_name() + arguments = executed_param_name.arguments + break + + if arguments.argument_node is not None: + return [ContextualizedNode(arguments.context, arguments.argument_node)] + if arguments.trailer is not None: + return [ContextualizedNode(arguments.context, arguments.trailer)] + return [] + + +class ValuesArguments(AbstractArguments): + def __init__(self, values_list): + self._values_list = values_list + + def unpack(self, funcdef=None): + for values in self._values_list: + yield None, LazyKnownValues(values) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._values_list) + + +class TreeArgumentsWrapper(_AbstractArgumentsMixin): + def __init__(self, arguments): + self._wrapped_arguments = arguments + + @property + def context(self): + return self._wrapped_arguments.context + + @property + def argument_node(self): + return self._wrapped_arguments.argument_node + + @property + def trailer(self): + return self._wrapped_arguments.trailer + + def unpack(self, func=None): + raise NotImplementedError + + def get_calling_nodes(self): + return self._wrapped_arguments.get_calling_nodes() + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._wrapped_arguments) + + +def _iterate_star_args(context, array, input_node, funcdef=None): + if not array.py__getattribute__('__iter__'): + if funcdef is not None: + # TODO this funcdef should not be needed. + m = "TypeError: %s() argument after * must be a sequence, not %s" \ + % (funcdef.name.value, array) + analysis.add(context, 'type-error-star', input_node, message=m) + try: + iter_ = array.py__iter__ + except AttributeError: + pass + else: + yield from iter_() + + +def _star_star_dict(context, array, input_node, funcdef): + from jedi.inference.value.instance import CompiledInstance + if isinstance(array, CompiledInstance) and array.name.string_name == 'dict': + # For now ignore this case. In the future add proper iterators and just + # make one call without crazy isinstance checks. + return {} + elif isinstance(array, iterable.Sequence) and array.array_type == 'dict': + return array.exact_key_items() + else: + if funcdef is not None: + m = "TypeError: %s argument after ** must be a mapping, not %s" \ + % (funcdef.name.value, array) + analysis.add(context, 'type-error-star-star', input_node, message=m) + return {} diff --git a/lib/python3.10/site-packages/jedi/inference/base_value.py b/lib/python3.10/site-packages/jedi/inference/base_value.py new file mode 100644 index 0000000000000000000000000000000000000000..9a789a4ef040c9298ba63628fdcceedd29364c95 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/base_value.py @@ -0,0 +1,558 @@ +""" +Values are the "values" that Python would return. However Values are at the +same time also the "values" that a user is currently sitting in. + +A ValueSet is typically used to specify the return of a function or any other +static analysis operation. In jedi there are always multiple returns and not +just one. +""" +from functools import reduce +from operator import add +from itertools import zip_longest + +from parso.python.tree import Name + +from jedi import debug +from jedi.parser_utils import clean_scope_docstring +from jedi.inference.helpers import SimpleGetItemNotFound +from jedi.inference.utils import safe_property +from jedi.inference.cache import inference_state_as_method_param_cache +from jedi.cache import memoize_method + +sentinel = object() + + +class HasNoContext(Exception): + pass + + +class HelperValueMixin: + def get_root_context(self): + value = self + if value.parent_context is None: + return value.as_context() + + while True: + if value.parent_context is None: + return value + value = value.parent_context + + def execute(self, arguments): + return self.inference_state.execute(self, arguments=arguments) + + def execute_with_values(self, *value_list): + from jedi.inference.arguments import ValuesArguments + arguments = ValuesArguments([ValueSet([value]) for value in value_list]) + return self.inference_state.execute(self, arguments) + + def execute_annotation(self): + return self.execute_with_values() + + def gather_annotation_classes(self): + return ValueSet([self]) + + def merge_types_of_iterate(self, contextualized_node=None, is_async=False): + return ValueSet.from_sets( + lazy_value.infer() + for lazy_value in self.iterate(contextualized_node, is_async) + ) + + def _get_value_filters(self, name_or_str): + origin_scope = name_or_str if isinstance(name_or_str, Name) else None + yield from self.get_filters(origin_scope=origin_scope) + # This covers the case where a stub files are incomplete. + if self.is_stub(): + from jedi.inference.gradual.conversion import convert_values + for c in convert_values(ValueSet({self})): + yield from c.get_filters() + + def goto(self, name_or_str, name_context=None, analysis_errors=True): + from jedi.inference import finder + filters = self._get_value_filters(name_or_str) + names = finder.filter_name(filters, name_or_str) + debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names) + return names + + def py__getattribute__(self, name_or_str, name_context=None, position=None, + analysis_errors=True): + """ + :param position: Position of the last statement -> tuple of line, column + """ + if name_context is None: + name_context = self + names = self.goto(name_or_str, name_context, analysis_errors) + values = ValueSet.from_sets(name.infer() for name in names) + if not values: + n = name_or_str.value if isinstance(name_or_str, Name) else name_or_str + values = self.py__getattribute__alternatives(n) + + if not names and not values and analysis_errors: + if isinstance(name_or_str, Name): + from jedi.inference import analysis + analysis.add_attribute_error( + name_context, self, name_or_str) + debug.dbg('context.names_to_types: %s -> %s', names, values) + return values + + def py__await__(self): + await_value_set = self.py__getattribute__("__await__") + if not await_value_set: + debug.warning('Tried to run __await__ on value %s', self) + return await_value_set.execute_with_values() + + def py__name__(self): + return self.name.string_name + + def iterate(self, contextualized_node=None, is_async=False): + debug.dbg('iterate %s', self) + if is_async: + from jedi.inference.lazy_value import LazyKnownValues + # TODO if no __aiter__ values are there, error should be: + # TypeError: 'async for' requires an object with __aiter__ method, got int + return iter([ + LazyKnownValues( + self.py__getattribute__('__aiter__').execute_with_values() + .py__getattribute__('__anext__').execute_with_values() + .py__getattribute__('__await__').execute_with_values() + .py__stop_iteration_returns() + ) # noqa: E124 + ]) + return self.py__iter__(contextualized_node) + + def is_sub_class_of(self, class_value): + with debug.increase_indent_cm('subclass matching of %s <=> %s' % (self, class_value), + color='BLUE'): + for cls in self.py__mro__(): + if cls.is_same_class(class_value): + debug.dbg('matched subclass True', color='BLUE') + return True + debug.dbg('matched subclass False', color='BLUE') + return False + + def is_same_class(self, class2): + # Class matching should prefer comparisons that are not this function. + if type(class2).is_same_class != HelperValueMixin.is_same_class: + return class2.is_same_class(self) + return self == class2 + + @memoize_method + def as_context(self, *args, **kwargs): + return self._as_context(*args, **kwargs) + + +class Value(HelperValueMixin): + """ + To be implemented by subclasses. + """ + tree_node = None + # Possible values: None, tuple, list, dict and set. Here to deal with these + # very important containers. + array_type = None + api_type = 'not_defined_please_report_bug' + + def __init__(self, inference_state, parent_context=None): + self.inference_state = inference_state + self.parent_context = parent_context + + def py__getitem__(self, index_value_set, contextualized_node): + from jedi.inference import analysis + # TODO this value is probably not right. + analysis.add( + contextualized_node.context, + 'type-error-not-subscriptable', + contextualized_node.node, + message="TypeError: '%s' object is not subscriptable" % self + ) + return NO_VALUES + + def py__simple_getitem__(self, index): + raise SimpleGetItemNotFound + + def py__iter__(self, contextualized_node=None): + if contextualized_node is not None: + from jedi.inference import analysis + analysis.add( + contextualized_node.context, + 'type-error-not-iterable', + contextualized_node.node, + message="TypeError: '%s' object is not iterable" % self) + return iter([]) + + def py__next__(self, contextualized_node=None): + return self.py__iter__(contextualized_node) + + def get_signatures(self): + return [] + + def is_class(self): + return False + + def is_class_mixin(self): + return False + + def is_instance(self): + return False + + def is_function(self): + return False + + def is_module(self): + return False + + def is_namespace(self): + return False + + def is_compiled(self): + return False + + def is_bound_method(self): + return False + + def is_builtins_module(self): + return False + + def py__bool__(self): + """ + Since Wrapper is a super class for classes, functions and modules, + the return value will always be true. + """ + return True + + def py__doc__(self): + try: + self.tree_node.get_doc_node + except AttributeError: + return '' + else: + return clean_scope_docstring(self.tree_node) + + def get_safe_value(self, default=sentinel): + if default is sentinel: + raise ValueError("There exists no safe value for value %s" % self) + return default + + def execute_operation(self, other, operator): + debug.warning("%s not possible between %s and %s", operator, self, other) + return NO_VALUES + + def py__call__(self, arguments): + debug.warning("no execution possible %s", self) + return NO_VALUES + + def py__stop_iteration_returns(self): + debug.warning("Not possible to return the stop iterations of %s", self) + return NO_VALUES + + def py__getattribute__alternatives(self, name_or_str): + """ + For now a way to add values in cases like __getattr__. + """ + return NO_VALUES + + def py__get__(self, instance, class_value): + debug.warning("No __get__ defined on %s", self) + return ValueSet([self]) + + def py__get__on_class(self, calling_instance, instance, class_value): + return NotImplemented + + def get_qualified_names(self): + # Returns Optional[Tuple[str, ...]] + return None + + def is_stub(self): + # The root value knows if it's a stub or not. + return self.parent_context.is_stub() + + def _as_context(self): + raise HasNoContext + + @property + def name(self): + raise NotImplementedError + + def get_type_hint(self, add_class_info=True): + return None + + def infer_type_vars(self, value_set): + """ + When the current instance represents a type annotation, this method + tries to find information about undefined type vars and returns a dict + from type var name to value set. + + This is for example important to understand what `iter([1])` returns. + According to typeshed, `iter` returns an `Iterator[_T]`: + + def iter(iterable: Iterable[_T]) -> Iterator[_T]: ... + + This functions would generate `int` for `_T` in this case, because it + unpacks the `Iterable`. + + Parameters + ---------- + + `self`: represents the annotation of the current parameter to infer the + value for. In the above example, this would initially be the + `Iterable[_T]` of the `iterable` parameter and then, when recursing, + just the `_T` generic parameter. + + `value_set`: represents the actual argument passed to the parameter + we're inferred for, or (for recursive calls) their types. In the + above example this would first be the representation of the list + `[1]` and then, when recursing, just of `1`. + """ + return {} + + +def iterate_values(values, contextualized_node=None, is_async=False): + """ + Calls `iterate`, on all values but ignores the ordering and just returns + all values that the iterate functions yield. + """ + return ValueSet.from_sets( + lazy_value.infer() + for lazy_value in values.iterate(contextualized_node, is_async=is_async) + ) + + +class _ValueWrapperBase(HelperValueMixin): + @safe_property + def name(self): + from jedi.inference.names import ValueName + wrapped_name = self._wrapped_value.name + if wrapped_name.tree_name is not None: + return ValueName(self, wrapped_name.tree_name) + else: + from jedi.inference.compiled import CompiledValueName + return CompiledValueName(self, wrapped_name.string_name) + + @classmethod + @inference_state_as_method_param_cache() + def create_cached(cls, inference_state, *args, **kwargs): + return cls(*args, **kwargs) + + def __getattr__(self, name): + assert name != '_wrapped_value', 'Problem with _get_wrapped_value' + return getattr(self._wrapped_value, name) + + +class LazyValueWrapper(_ValueWrapperBase): + @safe_property + @memoize_method + def _wrapped_value(self): + with debug.increase_indent_cm('Resolve lazy value wrapper'): + return self._get_wrapped_value() + + def __repr__(self): + return '<%s>' % (self.__class__.__name__) + + def _get_wrapped_value(self): + raise NotImplementedError + + +class ValueWrapper(_ValueWrapperBase): + def __init__(self, wrapped_value): + self._wrapped_value = wrapped_value + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._wrapped_value) + + +class TreeValue(Value): + def __init__(self, inference_state, parent_context, tree_node): + super().__init__(inference_state, parent_context) + self.tree_node = tree_node + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.tree_node) + + +class ContextualizedNode: + def __init__(self, context, node): + self.context = context + self.node = node + + def get_root_context(self): + return self.context.get_root_context() + + def infer(self): + return self.context.infer_node(self.node) + + def __repr__(self): + return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context) + + +def _getitem(value, index_values, contextualized_node): + # The actual getitem call. + result = NO_VALUES + unused_values = set() + for index_value in index_values: + index = index_value.get_safe_value(default=None) + if type(index) in (float, int, str, slice, bytes): + try: + result |= value.py__simple_getitem__(index) + continue + except SimpleGetItemNotFound: + pass + + unused_values.add(index_value) + + # The index was somehow not good enough or simply a wrong type. + # Therefore we now iterate through all the values and just take + # all results. + if unused_values or not index_values: + result |= value.py__getitem__( + ValueSet(unused_values), + contextualized_node + ) + debug.dbg('py__getitem__ result: %s', result) + return result + + +class ValueSet: + def __init__(self, iterable): + self._set = frozenset(iterable) + for value in iterable: + assert not isinstance(value, ValueSet) + + @classmethod + def _from_frozen_set(cls, frozenset_): + self = cls.__new__(cls) + self._set = frozenset_ + return self + + @classmethod + def from_sets(cls, sets): + """ + Used to work with an iterable of set. + """ + aggregated = set() + for set_ in sets: + if isinstance(set_, ValueSet): + aggregated |= set_._set + else: + aggregated |= frozenset(set_) + return cls._from_frozen_set(frozenset(aggregated)) + + def __or__(self, other): + return self._from_frozen_set(self._set | other._set) + + def __and__(self, other): + return self._from_frozen_set(self._set & other._set) + + def __iter__(self): + return iter(self._set) + + def __bool__(self): + return bool(self._set) + + def __len__(self): + return len(self._set) + + def __repr__(self): + return 'S{%s}' % (', '.join(str(s) for s in self._set)) + + def filter(self, filter_func): + return self.__class__(filter(filter_func, self._set)) + + def __getattr__(self, name): + def mapper(*args, **kwargs): + return self.from_sets( + getattr(value, name)(*args, **kwargs) + for value in self._set + ) + return mapper + + def __eq__(self, other): + return self._set == other._set + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self._set) + + def py__class__(self): + return ValueSet(c.py__class__() for c in self._set) + + def iterate(self, contextualized_node=None, is_async=False): + from jedi.inference.lazy_value import get_merged_lazy_value + type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set] + for lazy_values in zip_longest(*type_iters): + yield get_merged_lazy_value( + [l for l in lazy_values if l is not None] + ) + + def execute(self, arguments): + return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set) + + def execute_with_values(self, *args, **kwargs): + return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set) + + def goto(self, *args, **kwargs): + return reduce(add, [c.goto(*args, **kwargs) for c in self._set], []) + + def py__getattribute__(self, *args, **kwargs): + return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set) + + def get_item(self, *args, **kwargs): + return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set) + + def try_merge(self, function_name): + value_set = self.__class__([]) + for c in self._set: + try: + method = getattr(c, function_name) + except AttributeError: + pass + else: + value_set |= method() + return value_set + + def gather_annotation_classes(self): + return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set]) + + def get_signatures(self): + return [sig for c in self._set for sig in c.get_signatures()] + + def get_type_hint(self, add_class_info=True): + t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set] + type_hints = sorted(filter(None, t)) + if len(type_hints) == 1: + return type_hints[0] + + optional = 'None' in type_hints + if optional: + type_hints.remove('None') + + if len(type_hints) == 0: + return None + elif len(type_hints) == 1: + s = type_hints[0] + else: + s = 'Union[%s]' % ', '.join(type_hints) + if optional: + s = 'Optional[%s]' % s + return s + + def infer_type_vars(self, value_set): + # Circular + from jedi.inference.gradual.annotation import merge_type_var_dicts + + type_var_dict = {} + for value in self._set: + merge_type_var_dicts( + type_var_dict, + value.infer_type_vars(value_set), + ) + return type_var_dict + + +NO_VALUES = ValueSet([]) + + +def iterator_to_value_set(func): + def wrapper(*args, **kwargs): + return ValueSet(func(*args, **kwargs)) + + return wrapper diff --git a/lib/python3.10/site-packages/jedi/inference/cache.py b/lib/python3.10/site-packages/jedi/inference/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..8360a42a4f51210657dc1c7ad54b7926183f1911 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/cache.py @@ -0,0 +1,126 @@ +""" +- the popular ``_memoize_default`` works like a typical memoize and returns the + default otherwise. +- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes. +""" +from functools import wraps + +from jedi import debug + +_NO_DEFAULT = object() +_RECURSION_SENTINEL = object() + + +def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False, + second_arg_is_inference_state=False): + """ This is a typical memoization decorator, BUT there is one difference: + To prevent recursion it sets defaults. + + Preventing recursion is in this case the much bigger use than speed. I + don't think, that there is a big speed difference, but there are many cases + where recursion could happen (think about a = b; b = a). + """ + def func(function): + def wrapper(obj, *args, **kwargs): + # TODO These checks are kind of ugly and slow. + if inference_state_is_first_arg: + cache = obj.memoize_cache + elif second_arg_is_inference_state: + cache = args[0].memoize_cache # needed for meta classes + else: + cache = obj.inference_state.memoize_cache + + try: + memo = cache[function] + except KeyError: + cache[function] = memo = {} + + key = (obj, args, frozenset(kwargs.items())) + if key in memo: + return memo[key] + else: + if default is not _NO_DEFAULT: + memo[key] = default + rv = function(obj, *args, **kwargs) + memo[key] = rv + return rv + return wrapper + + return func + + +def inference_state_function_cache(default=_NO_DEFAULT): + def decorator(func): + return _memoize_default(default=default, inference_state_is_first_arg=True)(func) + + return decorator + + +def inference_state_method_cache(default=_NO_DEFAULT): + def decorator(func): + return _memoize_default(default=default)(func) + + return decorator + + +def inference_state_as_method_param_cache(): + def decorator(call): + return _memoize_default(second_arg_is_inference_state=True)(call) + + return decorator + + +class CachedMetaClass(type): + """ + This is basically almost the same than the decorator above, it just caches + class initializations. Either you do it this way or with decorators, but + with decorators you lose class access (isinstance, etc). + """ + @inference_state_as_method_param_cache() + def __call__(self, *args, **kwargs): + return super().__call__(*args, **kwargs) + + +def inference_state_method_generator_cache(): + """ + This is a special memoizer. It memoizes generators and also checks for + recursion errors and returns no further iterator elemends in that case. + """ + def func(function): + @wraps(function) + def wrapper(obj, *args, **kwargs): + cache = obj.inference_state.memoize_cache + try: + memo = cache[function] + except KeyError: + cache[function] = memo = {} + + key = (obj, args, frozenset(kwargs.items())) + + if key in memo: + actual_generator, cached_lst = memo[key] + else: + actual_generator = function(obj, *args, **kwargs) + cached_lst = [] + memo[key] = actual_generator, cached_lst + + i = 0 + while True: + try: + next_element = cached_lst[i] + if next_element is _RECURSION_SENTINEL: + debug.warning('Found a generator recursion for %s' % obj) + # This means we have hit a recursion. + return + except IndexError: + cached_lst.append(_RECURSION_SENTINEL) + next_element = next(actual_generator, None) + if next_element is None: + cached_lst.pop() + return + cached_lst[-1] = next_element + yield next_element + i += 1 + return wrapper + + return func diff --git a/lib/python3.10/site-packages/jedi/inference/context.py b/lib/python3.10/site-packages/jedi/inference/context.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc6b99412810e9dab5a365465bd13c04c8f9eda --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/context.py @@ -0,0 +1,499 @@ +from abc import abstractmethod +from contextlib import contextmanager +from pathlib import Path +from typing import Optional + +from parso.tree import search_ancestor +from parso.python.tree import Name + +from jedi.inference.filters import ParserTreeFilter, MergedFilter, \ + GlobalNameFilter +from jedi.inference.names import AnonymousParamName, TreeNameDefinition +from jedi.inference.base_value import NO_VALUES, ValueSet +from jedi.parser_utils import get_parent_scope +from jedi import debug +from jedi import parser_utils + + +class AbstractContext: + # Must be defined: inference_state and tree_node and parent_context as an attribute/property + + def __init__(self, inference_state): + self.inference_state = inference_state + self.predefined_names = {} + + @abstractmethod + def get_filters(self, until_position=None, origin_scope=None): + raise NotImplementedError + + def goto(self, name_or_str, position): + from jedi.inference import finder + filters = _get_global_filters_for_name( + self, name_or_str if isinstance(name_or_str, Name) else None, position, + ) + names = finder.filter_name(filters, name_or_str) + debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names) + return names + + def py__getattribute__(self, name_or_str, name_context=None, position=None, + analysis_errors=True): + """ + :param position: Position of the last statement -> tuple of line, column + """ + if name_context is None: + name_context = self + names = self.goto(name_or_str, position) + + string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str + + # This paragraph is currently needed for proper branch type inference + # (static analysis). + found_predefined_types = None + if self.predefined_names and isinstance(name_or_str, Name): + node = name_or_str + while node is not None and not parser_utils.is_scope(node): + node = node.parent + if node.type in ("if_stmt", "for_stmt", "comp_for", 'sync_comp_for'): + try: + name_dict = self.predefined_names[node] + types = name_dict[string_name] + except KeyError: + continue + else: + found_predefined_types = types + break + if found_predefined_types is not None and names: + from jedi.inference import flow_analysis + check = flow_analysis.reachability_check( + context=self, + value_scope=self.tree_node, + node=name_or_str, + ) + if check is flow_analysis.UNREACHABLE: + values = NO_VALUES + else: + values = found_predefined_types + else: + values = ValueSet.from_sets(name.infer() for name in names) + + if not names and not values and analysis_errors: + if isinstance(name_or_str, Name): + from jedi.inference import analysis + message = ("NameError: name '%s' is not defined." % string_name) + analysis.add(name_context, 'name-error', name_or_str, message) + + debug.dbg('context.names_to_types: %s -> %s', names, values) + if values: + return values + return self._check_for_additional_knowledge(name_or_str, name_context, position) + + def _check_for_additional_knowledge(self, name_or_str, name_context, position): + name_context = name_context or self + # Add isinstance and other if/assert knowledge. + if isinstance(name_or_str, Name) and not name_context.is_instance(): + flow_scope = name_or_str + base_nodes = [name_context.tree_node] + + if any(b.type in ('comp_for', 'sync_comp_for') for b in base_nodes): + return NO_VALUES + from jedi.inference.finder import check_flow_information + while True: + flow_scope = get_parent_scope(flow_scope, include_flows=True) + n = check_flow_information(name_context, flow_scope, + name_or_str, position) + if n is not None: + return n + if flow_scope in base_nodes: + break + return NO_VALUES + + def get_root_context(self): + parent_context = self.parent_context + if parent_context is None: + return self + return parent_context.get_root_context() + + def is_module(self): + return False + + def is_builtins_module(self): + return False + + def is_class(self): + return False + + def is_stub(self): + return False + + def is_instance(self): + return False + + def is_compiled(self): + return False + + def is_bound_method(self): + return False + + @abstractmethod + def py__name__(self): + raise NotImplementedError + + def get_value(self): + raise NotImplementedError + + @property + def name(self): + return None + + def get_qualified_names(self): + return () + + def py__doc__(self): + return '' + + @contextmanager + def predefine_names(self, flow_scope, dct): + predefined = self.predefined_names + predefined[flow_scope] = dct + try: + yield + finally: + del predefined[flow_scope] + + +class ValueContext(AbstractContext): + """ + Should be defined, otherwise the API returns empty types. + """ + def __init__(self, value): + super().__init__(value.inference_state) + self._value = value + + @property + def tree_node(self): + return self._value.tree_node + + @property + def parent_context(self): + return self._value.parent_context + + def is_module(self): + return self._value.is_module() + + def is_builtins_module(self): + return self._value == self.inference_state.builtins_module + + def is_class(self): + return self._value.is_class() + + def is_stub(self): + return self._value.is_stub() + + def is_instance(self): + return self._value.is_instance() + + def is_compiled(self): + return self._value.is_compiled() + + def is_bound_method(self): + return self._value.is_bound_method() + + def py__name__(self): + return self._value.py__name__() + + @property + def name(self): + return self._value.name + + def get_qualified_names(self): + return self._value.get_qualified_names() + + def py__doc__(self): + return self._value.py__doc__() + + def get_value(self): + return self._value + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._value) + + +class TreeContextMixin: + def infer_node(self, node): + from jedi.inference.syntax_tree import infer_node + return infer_node(self, node) + + def create_value(self, node): + from jedi.inference import value + + if node == self.tree_node: + assert self.is_module() + return self.get_value() + + parent_context = self.create_context(node) + + if node.type in ('funcdef', 'lambdef'): + func = value.FunctionValue.from_context(parent_context, node) + if parent_context.is_class(): + class_value = parent_context.parent_context.create_value(parent_context.tree_node) + instance = value.AnonymousInstance( + self.inference_state, parent_context.parent_context, class_value) + func = value.BoundMethod( + instance=instance, + class_context=class_value.as_context(), + function=func + ) + return func + elif node.type == 'classdef': + return value.ClassValue(self.inference_state, parent_context, node) + else: + raise NotImplementedError("Probably shouldn't happen: %s" % node) + + def create_context(self, node): + def from_scope_node(scope_node, is_nested=True): + if scope_node == self.tree_node: + return self + + if scope_node.type in ('funcdef', 'lambdef', 'classdef'): + return self.create_value(scope_node).as_context() + elif scope_node.type in ('comp_for', 'sync_comp_for'): + parent_context = from_scope_node(parent_scope(scope_node.parent)) + if node.start_pos >= scope_node.children[-1].start_pos: + return parent_context + return CompForContext(parent_context, scope_node) + raise Exception("There's a scope that was not managed: %s" % scope_node) + + def parent_scope(node): + while True: + node = node.parent + + if parser_utils.is_scope(node): + return node + elif node.type in ('argument', 'testlist_comp'): + if node.children[1].type in ('comp_for', 'sync_comp_for'): + return node.children[1] + elif node.type == 'dictorsetmaker': + for n in node.children[1:4]: + # In dictionaries it can be pretty much anything. + if n.type in ('comp_for', 'sync_comp_for'): + return n + + scope_node = parent_scope(node) + if scope_node.type in ('funcdef', 'classdef'): + colon = scope_node.children[scope_node.children.index(':')] + if node.start_pos < colon.start_pos: + parent = node.parent + if not (parent.type == 'param' and parent.name == node): + scope_node = parent_scope(scope_node) + return from_scope_node(scope_node, is_nested=True) + + def create_name(self, tree_name): + definition = tree_name.get_definition() + if definition and definition.type == 'param' and definition.name == tree_name: + funcdef = search_ancestor(definition, 'funcdef', 'lambdef') + func = self.create_value(funcdef) + return AnonymousParamName(func, tree_name) + else: + context = self.create_context(tree_name) + return TreeNameDefinition(context, tree_name) + + +class FunctionContext(TreeContextMixin, ValueContext): + def get_filters(self, until_position=None, origin_scope=None): + yield ParserTreeFilter( + self.inference_state, + parent_context=self, + until_position=until_position, + origin_scope=origin_scope + ) + + +class ModuleContext(TreeContextMixin, ValueContext): + def py__file__(self) -> Optional[Path]: + return self._value.py__file__() # type: ignore[no-any-return] + + def get_filters(self, until_position=None, origin_scope=None): + filters = self._value.get_filters(origin_scope) + # Skip the first filter and replace it. + next(filters, None) + yield MergedFilter( + ParserTreeFilter( + parent_context=self, + until_position=until_position, + origin_scope=origin_scope + ), + self.get_global_filter(), + ) + yield from filters + + def get_global_filter(self): + return GlobalNameFilter(self) + + @property + def string_names(self): + return self._value.string_names + + @property + def code_lines(self): + return self._value.code_lines + + def get_value(self): + """ + This is the only function that converts a context back to a value. + This is necessary for stub -> python conversion and vice versa. However + this method shouldn't be moved to AbstractContext. + """ + return self._value + + +class NamespaceContext(TreeContextMixin, ValueContext): + def get_filters(self, until_position=None, origin_scope=None): + return self._value.get_filters() + + def get_value(self): + return self._value + + @property + def string_names(self): + return self._value.string_names + + def py__file__(self) -> Optional[Path]: + return self._value.py__file__() # type: ignore[no-any-return] + + +class ClassContext(TreeContextMixin, ValueContext): + def get_filters(self, until_position=None, origin_scope=None): + yield self.get_global_filter(until_position, origin_scope) + + def get_global_filter(self, until_position=None, origin_scope=None): + return ParserTreeFilter( + parent_context=self, + until_position=until_position, + origin_scope=origin_scope + ) + + +class CompForContext(TreeContextMixin, AbstractContext): + def __init__(self, parent_context, comp_for): + super().__init__(parent_context.inference_state) + self.tree_node = comp_for + self.parent_context = parent_context + + def get_filters(self, until_position=None, origin_scope=None): + yield ParserTreeFilter(self) + + def get_value(self): + return None + + def py__name__(self): + return '' + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.tree_node) + + +class CompiledContext(ValueContext): + def get_filters(self, until_position=None, origin_scope=None): + return self._value.get_filters() + + +class CompiledModuleContext(CompiledContext): + code_lines = None + + def get_value(self): + return self._value + + @property + def string_names(self): + return self._value.string_names + + def py__file__(self) -> Optional[Path]: + return self._value.py__file__() # type: ignore[no-any-return] + + +def _get_global_filters_for_name(context, name_or_none, position): + # For functions and classes the defaults don't belong to the + # function and get inferred in the value before the function. So + # make sure to exclude the function/class name. + if name_or_none is not None: + ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef', 'lambdef') + lambdef = None + if ancestor == 'lambdef': + # For lambdas it's even more complicated since parts will + # be inferred later. + lambdef = ancestor + ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef') + if ancestor is not None: + colon = ancestor.children[-2] + if position is not None and position < colon.start_pos: + if lambdef is None or position < lambdef.children[-2].start_pos: + position = ancestor.start_pos + + return get_global_filters(context, position, name_or_none) + + +def get_global_filters(context, until_position, origin_scope): + """ + Returns all filters in order of priority for name resolution. + + For global name lookups. The filters will handle name resolution + themselves, but here we gather possible filters downwards. + + >>> from jedi import Script + >>> script = Script(''' + ... x = ['a', 'b', 'c'] + ... def func(): + ... y = None + ... ''') + >>> module_node = script._module_node + >>> scope = next(module_node.iter_funcdefs()) + >>> scope + + >>> context = script._get_module_context().create_context(scope) + >>> filters = list(get_global_filters(context, (4, 0), None)) + + First we get the names from the function scope. + + >>> print(filters[0]) # doctest: +ELLIPSIS + MergedFilter(, ) + >>> sorted(str(n) for n in filters[0].values()) # doctest: +NORMALIZE_WHITESPACE + ['', + ''] + >>> filters[0]._filters[0]._until_position + (4, 0) + >>> filters[0]._filters[1]._until_position + + Then it yields the names from one level "lower". In this example, this is + the module scope (including globals). + As a side note, you can see, that the position in the filter is None on the + globals filter, because there the whole module is searched. + + >>> list(filters[1].values()) # package modules -> Also empty. + [] + >>> sorted(name.string_name for name in filters[2].values()) # Module attributes + ['__doc__', '__name__', '__package__'] + + Finally, it yields the builtin filter, if `include_builtin` is + true (default). + + >>> list(filters[3].values()) # doctest: +ELLIPSIS + [...] + """ + base_context = context + from jedi.inference.value.function import BaseFunctionExecutionContext + while context is not None: + # Names in methods cannot be resolved within the class. + yield from context.get_filters( + until_position=until_position, + origin_scope=origin_scope + ) + if isinstance(context, (BaseFunctionExecutionContext, ModuleContext)): + # The position should be reset if the current scope is a function. + until_position = None + + context = context.parent_context + + b = next(base_context.inference_state.builtins_module.get_filters(), None) + assert b is not None + # Add builtins to the global scope. + yield b diff --git a/lib/python3.10/site-packages/jedi/inference/docstring_utils.py b/lib/python3.10/site-packages/jedi/inference/docstring_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bee0d75ec155518a4d41f8f4072d3fc7c72f5f69 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/docstring_utils.py @@ -0,0 +1,21 @@ +from jedi.inference.value import ModuleValue +from jedi.inference.context import ModuleContext + + +class DocstringModule(ModuleValue): + def __init__(self, in_module_context, **kwargs): + super().__init__(**kwargs) + self._in_module_context = in_module_context + + def _as_context(self): + return DocstringModuleContext(self, self._in_module_context) + + +class DocstringModuleContext(ModuleContext): + def __init__(self, module_value, in_module_context): + super().__init__(module_value) + self._in_module_context = in_module_context + + def get_filters(self, origin_scope=None, until_position=None): + yield from super().get_filters(until_position=until_position) + yield from self._in_module_context.get_filters() diff --git a/lib/python3.10/site-packages/jedi/inference/docstrings.py b/lib/python3.10/site-packages/jedi/inference/docstrings.py new file mode 100644 index 0000000000000000000000000000000000000000..809974f3778a64c6d28d1f420fad1dce7504c478 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/docstrings.py @@ -0,0 +1,286 @@ +""" +Docstrings are another source of information for functions and classes. +:mod:`jedi.inference.dynamic_params` tries to find all executions of functions, +while the docstring parsing is much easier. There are three different types of +docstrings that |jedi| understands: + +- `Sphinx `_ +- `Epydoc `_ +- `Numpydoc `_ + +For example, the sphinx annotation ``:type foo: str`` clearly states that the +type of ``foo`` is ``str``. + +As an addition to parameter searching, this module also provides return +annotations. +""" + +import re +import warnings + +from parso import parse, ParserSyntaxError + +from jedi import debug +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.base_value import iterator_to_value_set, ValueSet, \ + NO_VALUES +from jedi.inference.lazy_value import LazyKnownValues + + +DOCSTRING_PARAM_PATTERNS = [ + r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx + r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type + r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc +] + +DOCSTRING_RETURN_PATTERNS = [ + re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx + re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc +] + +REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') + + +_numpy_doc_string_cache = None + + +def _get_numpy_doc_string_cls(): + global _numpy_doc_string_cache + if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)): + raise _numpy_doc_string_cache + from numpydoc.docscrape import NumpyDocString # type: ignore[import] + _numpy_doc_string_cache = NumpyDocString + return _numpy_doc_string_cache + + +def _search_param_in_numpydocstr(docstr, param_str): + """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters'] + except Exception: + return [] + for p_name, p_type, p_descr in params: + if p_name == param_str: + m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) + if m: + p_type = m.group(1) + return list(_expand_typestr(p_type)) + return [] + + +def _search_return_in_numpydocstr(docstr): + """ + Search `docstr` (in numpydoc format) for type(-s) of function returns. + """ + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + doc = _get_numpy_doc_string_cls()(docstr) + except Exception: + return + try: + # This is a non-public API. If it ever changes we should be + # prepared and return gracefully. + returns = doc._parsed_data['Returns'] + returns += doc._parsed_data['Yields'] + except Exception: + return + for r_name, r_type, r_descr in returns: + # Return names are optional and if so the type is in the name + if not r_type: + r_type = r_name + yield from _expand_typestr(r_type) + + +def _expand_typestr(type_str): + """ + Attempts to interpret the possible types in `type_str` + """ + # Check if alternative types are specified with 'or' + if re.search(r'\bor\b', type_str): + for t in type_str.split('or'): + yield t.split('of')[0].strip() + # Check if like "list of `type`" and set type to list + elif re.search(r'\bof\b', type_str): + yield type_str.split('of')[0] + # Check if type has is a set of valid literal values eg: {'C', 'F', 'A'} + elif type_str.startswith('{'): + node = parse(type_str, version='3.7').children[0] + if node.type == 'atom': + for leaf in getattr(node.children[1], "children", []): + if leaf.type == 'number': + if '.' in leaf.value: + yield 'float' + else: + yield 'int' + elif leaf.type == 'string': + if 'b' in leaf.string_prefix.lower(): + yield 'bytes' + else: + yield 'str' + # Ignore everything else. + + # Otherwise just work with what we have. + else: + yield type_str + + +def _search_param_in_docstr(docstr, param_str): + """ + Search `docstr` for type(-s) of `param_str`. + + >>> _search_param_in_docstr(':type param: int', 'param') + ['int'] + >>> _search_param_in_docstr('@type param: int', 'param') + ['int'] + >>> _search_param_in_docstr( + ... ':type param: :class:`threading.Thread`', 'param') + ['threading.Thread'] + >>> bool(_search_param_in_docstr('no document', 'param')) + False + >>> _search_param_in_docstr(':param int param: some description', 'param') + ['int'] + + """ + # look at #40 to see definitions of those params + patterns = [re.compile(p % re.escape(param_str)) + for p in DOCSTRING_PARAM_PATTERNS] + for pattern in patterns: + match = pattern.search(docstr) + if match: + return [_strip_rst_role(match.group(1))] + + return _search_param_in_numpydocstr(docstr, param_str) + + +def _strip_rst_role(type_str): + """ + Strip off the part looks like a ReST role in `type_str`. + + >>> _strip_rst_role(':class:`ClassName`') # strip off :class: + 'ClassName' + >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain + 'module.Object' + >>> _strip_rst_role('ClassName') # do nothing when not ReST role + 'ClassName' + + See also: + http://sphinx-doc.org/domains.html#cross-referencing-python-objects + + """ + match = REST_ROLE_PATTERN.match(type_str) + if match: + return match.group(1) + else: + return type_str + + +def _infer_for_statement_string(module_context, string): + if string is None: + return [] + + potential_imports = re.findall(r'((?:\w+\.)*\w+)\.', string) + # Try to import module part in dotted name. + # (e.g., 'threading' in 'threading.Thread'). + imports = "\n".join(f"import {p}" for p in potential_imports) + string = f'{imports}\n{string}' + + debug.dbg('Parse docstring code %s', string, color='BLUE') + grammar = module_context.inference_state.grammar + try: + module = grammar.parse(string, error_recovery=False) + except ParserSyntaxError: + return [] + try: + # It's not the last item, because that's an end marker. + stmt = module.children[-2] + except (AttributeError, IndexError): + return [] + + if stmt.type not in ('name', 'atom', 'atom_expr'): + return [] + + # Here we basically use a fake module that also uses the filters in + # the actual module. + from jedi.inference.docstring_utils import DocstringModule + m = DocstringModule( + in_module_context=module_context, + inference_state=module_context.inference_state, + module_node=module, + code_lines=[], + ) + return list(_execute_types_in_stmt(m.as_context(), stmt)) + + +def _execute_types_in_stmt(module_context, stmt): + """ + Executing all types or general elements that we find in a statement. This + doesn't include tuple, list and dict literals, because the stuff they + contain is executed. (Used as type information). + """ + definitions = module_context.infer_node(stmt) + return ValueSet.from_sets( + _execute_array_values(module_context.inference_state, d) + for d in definitions + ) + + +def _execute_array_values(inference_state, array): + """ + Tuples indicate that there's not just one return value, but the listed + ones. `(str, int)` means that it returns a tuple with both types. + """ + from jedi.inference.value.iterable import SequenceLiteralValue, FakeTuple, FakeList + if isinstance(array, SequenceLiteralValue) and array.array_type in ('tuple', 'list'): + values = [] + for lazy_value in array.py__iter__(): + objects = ValueSet.from_sets( + _execute_array_values(inference_state, typ) + for typ in lazy_value.infer() + ) + values.append(LazyKnownValues(objects)) + cls = FakeTuple if array.array_type == 'tuple' else FakeList + return {cls(inference_state, values)} + else: + return array.execute_annotation() + + +@inference_state_method_cache() +def infer_param(function_value, param): + def infer_docstring(docstring): + return ValueSet( + p + for param_str in _search_param_in_docstr(docstring, param.name.value) + for p in _infer_for_statement_string(module_context, param_str) + ) + module_context = function_value.get_root_context() + func = param.get_parent_function() + if func.type == 'lambdef': + return NO_VALUES + + types = infer_docstring(function_value.py__doc__()) + if function_value.is_bound_method() \ + and function_value.py__name__() == '__init__': + types |= infer_docstring(function_value.class_context.py__doc__()) + + debug.dbg('Found param types for docstring: %s', types, color='BLUE') + return types + + +@inference_state_method_cache() +@iterator_to_value_set +def infer_return_types(function_value): + def search_return_in_docstr(code): + for p in DOCSTRING_RETURN_PATTERNS: + match = p.search(code) + if match: + yield _strip_rst_role(match.group(1)) + # Check for numpy style return hint + yield from _search_return_in_numpydocstr(code) + + for type_str in search_return_in_docstr(function_value.py__doc__()): + yield from _infer_for_statement_string(function_value.get_root_context(), type_str) diff --git a/lib/python3.10/site-packages/jedi/inference/dynamic_params.py b/lib/python3.10/site-packages/jedi/inference/dynamic_params.py new file mode 100644 index 0000000000000000000000000000000000000000..e759111a67311e685f2a947e26c2e7a891143ec8 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/dynamic_params.py @@ -0,0 +1,224 @@ +""" +One of the really important features of |jedi| is to have an option to +understand code like this:: + + def foo(bar): + bar. # completion here + foo(1) + +There's no doubt wheter bar is an ``int`` or not, but if there's also a call +like ``foo('str')``, what would happen? Well, we'll just show both. Because +that's what a human would expect. + +It works as follows: + +- |Jedi| sees a param +- search for function calls named ``foo`` +- execute these calls and check the input. +""" + +from jedi import settings +from jedi import debug +from jedi.parser_utils import get_parent_scope +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.arguments import TreeArguments +from jedi.inference.param import get_executed_param_names +from jedi.inference.helpers import is_stdlib_path +from jedi.inference.utils import to_list +from jedi.inference.value import instance +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.references import get_module_contexts_containing_name +from jedi.inference import recursion + + +MAX_PARAM_SEARCHES = 20 + + +def _avoid_recursions(func): + def wrapper(function_value, param_index): + inf = function_value.inference_state + with recursion.execution_allowed(inf, function_value.tree_node) as allowed: + # We need to catch recursions that may occur, because an + # anonymous functions can create an anonymous parameter that is + # more or less self referencing. + if allowed: + inf.dynamic_params_depth += 1 + try: + return func(function_value, param_index) + finally: + inf.dynamic_params_depth -= 1 + return NO_VALUES + return wrapper + + +@debug.increase_indent +@_avoid_recursions +def dynamic_param_lookup(function_value, param_index): + """ + A dynamic search for param values. If you try to complete a type: + + >>> def func(foo): + ... foo + >>> func(1) + >>> func("") + + It is not known what the type ``foo`` without analysing the whole code. You + have to look for all calls to ``func`` to find out what ``foo`` possibly + is. + """ + if not function_value.inference_state.do_dynamic_params_search: + return NO_VALUES + + funcdef = function_value.tree_node + + path = function_value.get_root_context().py__file__() + if path is not None and is_stdlib_path(path): + # We don't want to search for references in the stdlib. Usually people + # don't work with it (except if you are a core maintainer, sorry). + # This makes everything slower. Just disable it and run the tests, + # you will see the slowdown, especially in 3.6. + return NO_VALUES + + if funcdef.type == 'lambdef': + string_name = _get_lambda_name(funcdef) + if string_name is None: + return NO_VALUES + else: + string_name = funcdef.name.value + debug.dbg('Dynamic param search in %s.', string_name, color='MAGENTA') + + module_context = function_value.get_root_context() + arguments_list = _search_function_arguments(module_context, funcdef, string_name) + values = ValueSet.from_sets( + get_executed_param_names( + function_value, arguments + )[param_index].infer() + for arguments in arguments_list + ) + debug.dbg('Dynamic param result finished', color='MAGENTA') + return values + + +@inference_state_method_cache(default=None) +@to_list +def _search_function_arguments(module_context, funcdef, string_name): + """ + Returns a list of param names. + """ + compare_node = funcdef + if string_name == '__init__': + cls = get_parent_scope(funcdef) + if cls.type == 'classdef': + string_name = cls.name.value + compare_node = cls + + found_arguments = False + i = 0 + inference_state = module_context.inference_state + + if settings.dynamic_params_for_other_modules: + module_contexts = get_module_contexts_containing_name( + inference_state, [module_context], string_name, + # Limit the amounts of files to be opened massively. + limit_reduction=5, + ) + else: + module_contexts = [module_context] + + for for_mod_context in module_contexts: + for name, trailer in _get_potential_nodes(for_mod_context, string_name): + i += 1 + + # This is a simple way to stop Jedi's dynamic param recursion + # from going wild: The deeper Jedi's in the recursion, the less + # code should be inferred. + if i * inference_state.dynamic_params_depth > MAX_PARAM_SEARCHES: + return + + random_context = for_mod_context.create_context(name) + for arguments in _check_name_for_execution( + inference_state, random_context, compare_node, name, trailer): + found_arguments = True + yield arguments + + # If there are results after processing a module, we're probably + # good to process. This is a speed optimization. + if found_arguments: + return + + +def _get_lambda_name(node): + stmt = node.parent + if stmt.type == 'expr_stmt': + first_operator = next(stmt.yield_operators(), None) + if first_operator == '=': + first = stmt.children[0] + if first.type == 'name': + return first.value + + return None + + +def _get_potential_nodes(module_value, func_string_name): + try: + names = module_value.tree_node.get_used_names()[func_string_name] + except KeyError: + return + + for name in names: + bracket = name.get_next_leaf() + trailer = bracket.parent + if trailer.type == 'trailer' and bracket == '(': + yield name, trailer + + +def _check_name_for_execution(inference_state, context, compare_node, name, trailer): + from jedi.inference.value.function import BaseFunctionExecutionContext + + def create_args(value): + arglist = trailer.children[1] + if arglist == ')': + arglist = None + args = TreeArguments(inference_state, context, arglist, trailer) + from jedi.inference.value.instance import InstanceArguments + if value.tree_node.type == 'classdef': + created_instance = instance.TreeInstance( + inference_state, + value.parent_context, + value, + args + ) + return InstanceArguments(created_instance, args) + else: + if value.is_bound_method(): + args = InstanceArguments(value.instance, args) + return args + + for value in inference_state.infer(context, name): + value_node = value.tree_node + if compare_node == value_node: + yield create_args(value) + elif isinstance(value.parent_context, BaseFunctionExecutionContext) \ + and compare_node.type == 'funcdef': + # Here we're trying to find decorators by checking the first + # parameter. It's not very generic though. Should find a better + # solution that also applies to nested decorators. + param_names = value.parent_context.get_param_names() + if len(param_names) != 1: + continue + values = param_names[0].infer() + if [v.tree_node for v in values] == [compare_node]: + # Found a decorator. + module_context = context.get_root_context() + execution_context = value.as_context(create_args(value)) + potential_nodes = _get_potential_nodes(module_context, param_names[0].string_name) + for name, trailer in potential_nodes: + if value_node.start_pos < name.start_pos < value_node.end_pos: + random_context = execution_context.create_context(name) + yield from _check_name_for_execution( + inference_state, + random_context, + compare_node, + name, + trailer + ) diff --git a/lib/python3.10/site-packages/jedi/inference/filters.py b/lib/python3.10/site-packages/jedi/inference/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..62782334b68d20ba1730d0b69c1e7f35a5689373 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/filters.py @@ -0,0 +1,371 @@ +""" +Filters are objects that you can use to filter names in different scopes. They +are needed for name resolution. +""" +from abc import abstractmethod +from typing import List, MutableMapping, Type +import weakref + +from parso.tree import search_ancestor +from parso.python.tree import Name, UsedNamesMapping + +from jedi.inference import flow_analysis +from jedi.inference.base_value import ValueSet, ValueWrapper, \ + LazyValueWrapper +from jedi.parser_utils import get_cached_parent_scope, get_parso_cache_node +from jedi.inference.utils import to_list +from jedi.inference.names import TreeNameDefinition, ParamName, \ + AnonymousParamName, AbstractNameDefinition, NameWrapper + +_definition_name_cache: MutableMapping[UsedNamesMapping, List[Name]] +_definition_name_cache = weakref.WeakKeyDictionary() + + +class AbstractFilter: + _until_position = None + + def _filter(self, names): + if self._until_position is not None: + return [n for n in names if n.start_pos < self._until_position] + return names + + @abstractmethod + def get(self, name): + raise NotImplementedError + + @abstractmethod + def values(self): + raise NotImplementedError + + +class FilterWrapper: + name_wrapper_class: Type[NameWrapper] + + def __init__(self, wrapped_filter): + self._wrapped_filter = wrapped_filter + + def wrap_names(self, names): + return [self.name_wrapper_class(name) for name in names] + + def get(self, name): + return self.wrap_names(self._wrapped_filter.get(name)) + + def values(self): + return self.wrap_names(self._wrapped_filter.values()) + + +def _get_definition_names(parso_cache_node, used_names, name_key): + if parso_cache_node is None: + names = used_names.get(name_key, ()) + return tuple(name for name in names if name.is_definition(include_setitem=True)) + + try: + for_module = _definition_name_cache[parso_cache_node] + except KeyError: + for_module = _definition_name_cache[parso_cache_node] = {} + + try: + return for_module[name_key] + except KeyError: + names = used_names.get(name_key, ()) + result = for_module[name_key] = tuple( + name for name in names if name.is_definition(include_setitem=True) + ) + return result + + +class _AbstractUsedNamesFilter(AbstractFilter): + name_class = TreeNameDefinition + + def __init__(self, parent_context, node_context=None): + if node_context is None: + node_context = parent_context + self._node_context = node_context + self._parser_scope = node_context.tree_node + module_context = node_context.get_root_context() + # It is quite hacky that we have to use that. This is for caching + # certain things with a WeakKeyDictionary. However, parso intentionally + # uses slots (to save memory) and therefore we end up with having to + # have a weak reference to the object that caches the tree. + # + # Previously we have tried to solve this by using a weak reference onto + # used_names. However that also does not work, because it has a + # reference from the module, which itself is referenced by any node + # through parents. + path = module_context.py__file__() + if path is None: + # If the path is None, there is no guarantee that parso caches it. + self._parso_cache_node = None + else: + self._parso_cache_node = get_parso_cache_node( + module_context.inference_state.latest_grammar + if module_context.is_stub() else module_context.inference_state.grammar, + path + ) + self._used_names = module_context.tree_node.get_used_names() + self.parent_context = parent_context + + def get(self, name): + return self._convert_names(self._filter( + _get_definition_names(self._parso_cache_node, self._used_names, name), + )) + + def _convert_names(self, names): + return [self.name_class(self.parent_context, name) for name in names] + + def values(self): + return self._convert_names( + name + for name_key in self._used_names + for name in self._filter( + _get_definition_names(self._parso_cache_node, self._used_names, name_key), + ) + ) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.parent_context) + + +class ParserTreeFilter(_AbstractUsedNamesFilter): + def __init__(self, parent_context, node_context=None, until_position=None, + origin_scope=None): + """ + node_context is an option to specify a second value for use cases + like the class mro where the parent class of a new name would be the + value, but for some type inference it's important to have a local + value of the other classes. + """ + super().__init__(parent_context, node_context) + self._origin_scope = origin_scope + self._until_position = until_position + + def _filter(self, names): + names = super()._filter(names) + names = [n for n in names if self._is_name_reachable(n)] + return list(self._check_flows(names)) + + def _is_name_reachable(self, name): + parent = name.parent + if parent.type == 'trailer': + return False + base_node = parent if parent.type in ('classdef', 'funcdef') else name + return get_cached_parent_scope(self._parso_cache_node, base_node) == self._parser_scope + + def _check_flows(self, names): + for name in sorted(names, key=lambda name: name.start_pos, reverse=True): + check = flow_analysis.reachability_check( + context=self._node_context, + value_scope=self._parser_scope, + node=name, + origin_scope=self._origin_scope + ) + if check is not flow_analysis.UNREACHABLE: + yield name + + if check is flow_analysis.REACHABLE: + break + + +class _FunctionExecutionFilter(ParserTreeFilter): + def __init__(self, parent_context, function_value, until_position, origin_scope): + super().__init__( + parent_context, + until_position=until_position, + origin_scope=origin_scope, + ) + self._function_value = function_value + + def _convert_param(self, param, name): + raise NotImplementedError + + @to_list + def _convert_names(self, names): + for name in names: + param = search_ancestor(name, 'param') + # Here we don't need to check if the param is a default/annotation, + # because those are not definitions and never make it to this + # point. + if param: + yield self._convert_param(param, name) + else: + yield TreeNameDefinition(self.parent_context, name) + + +class FunctionExecutionFilter(_FunctionExecutionFilter): + def __init__(self, *args, arguments, **kwargs): + super().__init__(*args, **kwargs) + self._arguments = arguments + + def _convert_param(self, param, name): + return ParamName(self._function_value, name, self._arguments) + + +class AnonymousFunctionExecutionFilter(_FunctionExecutionFilter): + def _convert_param(self, param, name): + return AnonymousParamName(self._function_value, name) + + +class GlobalNameFilter(_AbstractUsedNamesFilter): + def get(self, name): + try: + names = self._used_names[name] + except KeyError: + return [] + return self._convert_names(self._filter(names)) + + @to_list + def _filter(self, names): + for name in names: + if name.parent.type == 'global_stmt': + yield name + + def values(self): + return self._convert_names( + name for name_list in self._used_names.values() + for name in self._filter(name_list) + ) + + +class DictFilter(AbstractFilter): + def __init__(self, dct): + self._dct = dct + + def get(self, name): + try: + value = self._convert(name, self._dct[name]) + except KeyError: + return [] + else: + return list(self._filter([value])) + + def values(self): + def yielder(): + for item in self._dct.items(): + try: + yield self._convert(*item) + except KeyError: + pass + return self._filter(yielder()) + + def _convert(self, name, value): + return value + + def __repr__(self): + keys = ', '.join(self._dct.keys()) + return '<%s: for {%s}>' % (self.__class__.__name__, keys) + + +class MergedFilter: + def __init__(self, *filters): + self._filters = filters + + def get(self, name): + return [n for filter in self._filters for n in filter.get(name)] + + def values(self): + return [n for filter in self._filters for n in filter.values()] + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, ', '.join(str(f) for f in self._filters)) + + +class _BuiltinMappedMethod(ValueWrapper): + """``Generator.__next__`` ``dict.values`` methods and so on.""" + api_type = 'function' + + def __init__(self, value, method, builtin_func): + super().__init__(builtin_func) + self._value = value + self._method = method + + def py__call__(self, arguments): + # TODO add TypeError if params are given/or not correct. + return self._method(self._value, arguments) + + +class SpecialMethodFilter(DictFilter): + """ + A filter for methods that are defined in this module on the corresponding + classes like Generator (for __next__, etc). + """ + class SpecialMethodName(AbstractNameDefinition): + api_type = 'function' + + def __init__(self, parent_context, string_name, callable_, builtin_value): + self.parent_context = parent_context + self.string_name = string_name + self._callable = callable_ + self._builtin_value = builtin_value + + def infer(self): + for filter in self._builtin_value.get_filters(): + # We can take the first index, because on builtin methods there's + # always only going to be one name. The same is true for the + # inferred values. + for name in filter.get(self.string_name): + builtin_func = next(iter(name.infer())) + break + else: + continue + break + return ValueSet([ + _BuiltinMappedMethod(self.parent_context, self._callable, builtin_func) + ]) + + def __init__(self, value, dct, builtin_value): + super().__init__(dct) + self.value = value + self._builtin_value = builtin_value + """ + This value is what will be used to introspect the name, where as the + other value will be used to execute the function. + + We distinguish, because we have to. + """ + + def _convert(self, name, value): + return self.SpecialMethodName(self.value, name, value, self._builtin_value) + + +class _OverwriteMeta(type): + def __init__(cls, name, bases, dct): + super().__init__(name, bases, dct) + + base_dct = {} + for base_cls in reversed(cls.__bases__): + try: + base_dct.update(base_cls.overwritten_methods) + except AttributeError: + pass + + for func in cls.__dict__.values(): + try: + base_dct.update(func.registered_overwritten_methods) + except AttributeError: + pass + cls.overwritten_methods = base_dct + + +class _AttributeOverwriteMixin: + def get_filters(self, *args, **kwargs): + yield SpecialMethodFilter(self, self.overwritten_methods, self._wrapped_value) + yield from self._wrapped_value.get_filters(*args, **kwargs) + + +class LazyAttributeOverwrite(_AttributeOverwriteMixin, LazyValueWrapper, + metaclass=_OverwriteMeta): + def __init__(self, inference_state): + self.inference_state = inference_state + + +class AttributeOverwrite(_AttributeOverwriteMixin, ValueWrapper, + metaclass=_OverwriteMeta): + pass + + +def publish_method(method_name): + def decorator(func): + dct = func.__dict__.setdefault('registered_overwritten_methods', {}) + dct[method_name] = func + return func + return decorator diff --git a/lib/python3.10/site-packages/jedi/inference/finder.py b/lib/python3.10/site-packages/jedi/inference/finder.py new file mode 100644 index 0000000000000000000000000000000000000000..aac587324f8187159421aca3bf2bbf1ea64cf5f9 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/finder.py @@ -0,0 +1,146 @@ +""" +Searching for names with given scope and name. This is very central in Jedi and +Python. The name resolution is quite complicated with descripter, +``__getattribute__``, ``__getattr__``, ``global``, etc. + +If you want to understand name resolution, please read the first few chapters +in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/. + +Flow checks ++++++++++++ + +Flow checks are not really mature. There's only a check for ``isinstance``. It +would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``. +Unfortunately every other thing is being ignored (e.g. a == '' would be easy to +check for -> a is a string). There's big potential in these checks. +""" + +from parso.tree import search_ancestor +from parso.python.tree import Name + +from jedi import settings +from jedi.inference.arguments import TreeArguments +from jedi.inference.value import iterable +from jedi.inference.base_value import NO_VALUES +from jedi.parser_utils import is_scope + + +def filter_name(filters, name_or_str): + """ + Searches names that are defined in a scope (the different + ``filters``), until a name fits. + """ + string_name = name_or_str.value if isinstance(name_or_str, Name) else name_or_str + names = [] + for filter in filters: + names = filter.get(string_name) + if names: + break + + return list(_remove_del_stmt(names)) + + +def _remove_del_stmt(names): + # Catch del statements and remove them from results. + for name in names: + if name.tree_name is not None: + definition = name.tree_name.get_definition() + if definition is not None and definition.type == 'del_stmt': + continue + yield name + + +def check_flow_information(value, flow, search_name, pos): + """ Try to find out the type of a variable just with the information that + is given by the flows: e.g. It is also responsible for assert checks.:: + + if isinstance(k, str): + k. # <- completion here + + ensures that `k` is a string. + """ + if not settings.dynamic_flow_information: + return None + + result = None + if is_scope(flow): + # Check for asserts. + module_node = flow.get_root_node() + try: + names = module_node.get_used_names()[search_name.value] + except KeyError: + return None + names = reversed([ + n for n in names + if flow.start_pos <= n.start_pos < (pos or flow.end_pos) + ]) + + for name in names: + ass = search_ancestor(name, 'assert_stmt') + if ass is not None: + result = _check_isinstance_type(value, ass.assertion, search_name) + if result is not None: + return result + + if flow.type in ('if_stmt', 'while_stmt'): + potential_ifs = [c for c in flow.children[1::4] if c != ':'] + for if_test in reversed(potential_ifs): + if search_name.start_pos > if_test.end_pos: + return _check_isinstance_type(value, if_test, search_name) + return result + + +def _get_isinstance_trailer_arglist(node): + if node.type in ('power', 'atom_expr') and len(node.children) == 2: + # This might be removed if we analyze and, etc + first, trailer = node.children + if first.type == 'name' and first.value == 'isinstance' \ + and trailer.type == 'trailer' and trailer.children[0] == '(': + return trailer + return None + + +def _check_isinstance_type(value, node, search_name): + lazy_cls = None + trailer = _get_isinstance_trailer_arglist(node) + if trailer is not None and len(trailer.children) == 3: + arglist = trailer.children[1] + args = TreeArguments(value.inference_state, value, arglist, trailer) + param_list = list(args.unpack()) + # Disallow keyword arguments + if len(param_list) == 2 and len(arglist.children) == 3: + (key1, _), (key2, lazy_value_cls) = param_list + if key1 is None and key2 is None: + call = _get_call_string(search_name) + is_instance_call = _get_call_string(arglist.children[0]) + # Do a simple get_code comparison of the strings . They should + # just have the same code, and everything will be all right. + # There are ways that this is not correct, if some stuff is + # redefined in between. However here we don't care, because + # it's a heuristic that works pretty well. + if call == is_instance_call: + lazy_cls = lazy_value_cls + if lazy_cls is None: + return None + + value_set = NO_VALUES + for cls_or_tup in lazy_cls.infer(): + if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple': + for lazy_value in cls_or_tup.py__iter__(): + value_set |= lazy_value.infer().execute_with_values() + else: + value_set |= cls_or_tup.execute_with_values() + return value_set + + +def _get_call_string(node): + if node.parent.type == 'atom_expr': + return _get_call_string(node.parent) + + code = '' + leaf = node.get_first_leaf() + end = node.get_last_leaf().end_pos + while leaf.start_pos < end: + code += leaf.value + leaf = leaf.get_next_leaf() + return code diff --git a/lib/python3.10/site-packages/jedi/inference/flow_analysis.py b/lib/python3.10/site-packages/jedi/inference/flow_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..89bfe578dfcb4dd2770c6218ed32d374639910fb --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/flow_analysis.py @@ -0,0 +1,125 @@ +from typing import Dict, Optional + +from jedi.parser_utils import get_flow_branch_keyword, is_scope, get_parent_scope +from jedi.inference.recursion import execution_allowed +from jedi.inference.helpers import is_big_annoying_library + + +class Status: + lookup_table: Dict[Optional[bool], 'Status'] = {} + + def __init__(self, value: Optional[bool], name: str) -> None: + self._value = value + self._name = name + Status.lookup_table[value] = self + + def invert(self): + if self is REACHABLE: + return UNREACHABLE + elif self is UNREACHABLE: + return REACHABLE + else: + return UNSURE + + def __and__(self, other): + if UNSURE in (self, other): + return UNSURE + else: + return REACHABLE if self._value and other._value else UNREACHABLE + + def __repr__(self): + return '<%s: %s>' % (type(self).__name__, self._name) + + +REACHABLE = Status(True, 'reachable') +UNREACHABLE = Status(False, 'unreachable') +UNSURE = Status(None, 'unsure') + + +def _get_flow_scopes(node): + while True: + node = get_parent_scope(node, include_flows=True) + if node is None or is_scope(node): + return + yield node + + +def reachability_check(context, value_scope, node, origin_scope=None): + if is_big_annoying_library(context) \ + or not context.inference_state.flow_analysis_enabled: + return UNSURE + + first_flow_scope = get_parent_scope(node, include_flows=True) + if origin_scope is not None: + origin_flow_scopes = list(_get_flow_scopes(origin_scope)) + node_flow_scopes = list(_get_flow_scopes(node)) + + branch_matches = True + for flow_scope in origin_flow_scopes: + if flow_scope in node_flow_scopes: + node_keyword = get_flow_branch_keyword(flow_scope, node) + origin_keyword = get_flow_branch_keyword(flow_scope, origin_scope) + branch_matches = node_keyword == origin_keyword + if flow_scope.type == 'if_stmt': + if not branch_matches: + return UNREACHABLE + elif flow_scope.type == 'try_stmt': + if not branch_matches and origin_keyword == 'else' \ + and node_keyword == 'except': + return UNREACHABLE + if branch_matches: + break + + # Direct parents get resolved, we filter scopes that are separate + # branches. This makes sense for autocompletion and static analysis. + # For actual Python it doesn't matter, because we're talking about + # potentially unreachable code. + # e.g. `if 0:` would cause all name lookup within the flow make + # unaccessible. This is not a "problem" in Python, because the code is + # never called. In Jedi though, we still want to infer types. + while origin_scope is not None: + if first_flow_scope == origin_scope and branch_matches: + return REACHABLE + origin_scope = origin_scope.parent + + return _break_check(context, value_scope, first_flow_scope, node) + + +def _break_check(context, value_scope, flow_scope, node): + reachable = REACHABLE + if flow_scope.type == 'if_stmt': + if flow_scope.is_node_after_else(node): + for check_node in flow_scope.get_test_nodes(): + reachable = _check_if(context, check_node) + if reachable in (REACHABLE, UNSURE): + break + reachable = reachable.invert() + else: + flow_node = flow_scope.get_corresponding_test_node(node) + if flow_node is not None: + reachable = _check_if(context, flow_node) + elif flow_scope.type in ('try_stmt', 'while_stmt'): + return UNSURE + + # Only reachable branches need to be examined further. + if reachable in (UNREACHABLE, UNSURE): + return reachable + + if value_scope != flow_scope and value_scope != flow_scope.parent: + flow_scope = get_parent_scope(flow_scope, include_flows=True) + return reachable & _break_check(context, value_scope, flow_scope, node) + else: + return reachable + + +def _check_if(context, node): + with execution_allowed(context.inference_state, node) as allowed: + if not allowed: + return UNSURE + + types = context.infer_node(node) + values = set(x.py__bool__() for x in types) + if len(values) == 1: + return Status.lookup_table[values.pop()] + else: + return UNSURE diff --git a/lib/python3.10/site-packages/jedi/inference/helpers.py b/lib/python3.10/site-packages/jedi/inference/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..0e344c2404f8f213cff1914b859f1c300d1f3f12 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/helpers.py @@ -0,0 +1,202 @@ +import copy +import sys +import re +import os +from itertools import chain +from contextlib import contextmanager + +from parso.python import tree + + +def is_stdlib_path(path): + # Python standard library paths look like this: + # /usr/lib/python3.9/... + # TODO The implementation below is probably incorrect and not complete. + parts = path.parts + if 'dist-packages' in parts or 'site-packages' in parts: + return False + + base_path = os.path.join(sys.prefix, 'lib', 'python') + return bool(re.match(re.escape(base_path) + r'\d.\d', str(path))) + + +def deep_ast_copy(obj): + """ + Much, much faster than copy.deepcopy, but just for parser tree nodes. + """ + # If it's already in the cache, just return it. + new_obj = copy.copy(obj) + + # Copy children + new_children = [] + for child in obj.children: + if isinstance(child, tree.Leaf): + new_child = copy.copy(child) + new_child.parent = new_obj + else: + new_child = deep_ast_copy(child) + new_child.parent = new_obj + new_children.append(new_child) + new_obj.children = new_children + + return new_obj + + +def infer_call_of_leaf(context, leaf, cut_own_trailer=False): + """ + Creates a "call" node that consist of all ``trailer`` and ``power`` + objects. E.g. if you call it with ``append``:: + + list([]).append(3) or None + + You would get a node with the content ``list([]).append`` back. + + This generates a copy of the original ast node. + + If you're using the leaf, e.g. the bracket `)` it will return ``list([])``. + + We use this function for two purposes. Given an expression ``bar.foo``, + we may want to + - infer the type of ``foo`` to offer completions after foo + - infer the type of ``bar`` to be able to jump to the definition of foo + The option ``cut_own_trailer`` must be set to true for the second purpose. + """ + trailer = leaf.parent + if trailer.type == 'fstring': + from jedi.inference import compiled + return compiled.get_string_value_set(context.inference_state) + + # The leaf may not be the last or first child, because there exist three + # different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples + # we should not match anything more than x. + if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]): + if leaf == ':': + # Basically happens with foo[:] when the cursor is on the colon + from jedi.inference.base_value import NO_VALUES + return NO_VALUES + if trailer.type == 'atom': + return context.infer_node(trailer) + return context.infer_node(leaf) + + power = trailer.parent + index = power.children.index(trailer) + if cut_own_trailer: + cut = index + else: + cut = index + 1 + + if power.type == 'error_node': + start = index + while True: + start -= 1 + base = power.children[start] + if base.type != 'trailer': + break + trailers = power.children[start + 1:cut] + else: + base = power.children[0] + trailers = power.children[1:cut] + + if base == 'await': + base = trailers[0] + trailers = trailers[1:] + + values = context.infer_node(base) + from jedi.inference.syntax_tree import infer_trailer + for trailer in trailers: + values = infer_trailer(context, values, trailer) + return values + + +def get_names_of_node(node): + try: + children = node.children + except AttributeError: + if node.type == 'name': + return [node] + else: + return [] + else: + return list(chain.from_iterable(get_names_of_node(c) for c in children)) + + +def is_string(value): + return value.is_compiled() and isinstance(value.get_safe_value(default=None), str) + + +def is_literal(value): + return is_number(value) or is_string(value) + + +def _get_safe_value_or_none(value, accept): + value = value.get_safe_value(default=None) + if isinstance(value, accept): + return value + + +def get_int_or_none(value): + return _get_safe_value_or_none(value, int) + + +def get_str_or_none(value): + return _get_safe_value_or_none(value, str) + + +def is_number(value): + return _get_safe_value_or_none(value, (int, float)) is not None + + +class SimpleGetItemNotFound(Exception): + pass + + +@contextmanager +def reraise_getitem_errors(*exception_classes): + try: + yield + except exception_classes as e: + raise SimpleGetItemNotFound(e) + + +def parse_dotted_names(nodes, is_import_from, until_node=None): + level = 0 + names = [] + for node in nodes[1:]: + if node in ('.', '...'): + if not names: + level += len(node.value) + elif node.type == 'dotted_name': + for n in node.children[::2]: + names.append(n) + if n is until_node: + break + else: + continue + break + elif node.type == 'name': + names.append(node) + if node is until_node: + break + elif node == ',': + if not is_import_from: + names = [] + else: + # Here if the keyword `import` comes along it stops checking + # for names. + break + return level, names + + +def values_from_qualified_names(inference_state, *names): + return inference_state.import_module(names[:-1]).py__getattribute__(names[-1]) + + +def is_big_annoying_library(context): + string_names = context.get_root_context().string_names + if string_names is None: + return False + + # Especially pandas and tensorflow are huge complicated Python libraries + # that get even slower than they already are when Jedi tries to undrstand + # dynamic features like decorators, ifs and other stuff. + return string_names[0] in ('pandas', 'numpy', 'tensorflow', 'matplotlib') diff --git a/lib/python3.10/site-packages/jedi/inference/imports.py b/lib/python3.10/site-packages/jedi/inference/imports.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a4953fbbaf8b3cb16b279956300560c8a591e2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/imports.py @@ -0,0 +1,592 @@ +""" +:mod:`jedi.inference.imports` is here to resolve import statements and return +the modules/classes/functions/whatever, which they stand for. However there's +not any actual importing done. This module is about finding modules in the +filesystem. This can be quite tricky sometimes, because Python imports are not +always that simple. + +This module also supports import autocompletion, which means to complete +statements like ``from datetim`` (cursor at the end would return ``datetime``). +""" +import os +from pathlib import Path + +from parso.python import tree +from parso.tree import search_ancestor + +from jedi import debug +from jedi import settings +from jedi.file_io import FolderIO +from jedi.parser_utils import get_cached_code_lines +from jedi.inference import sys_path +from jedi.inference import helpers +from jedi.inference import compiled +from jedi.inference import analysis +from jedi.inference.utils import unite +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.names import ImportName, SubModuleName +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.gradual.typeshed import import_module_decorator, \ + create_stub_module, parse_stub_module +from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo +from jedi.plugins import plugin_manager + + +class ModuleCache: + def __init__(self): + self._name_cache = {} + + def add(self, string_names, value_set): + if string_names is not None: + self._name_cache[string_names] = value_set + + def get(self, string_names): + return self._name_cache.get(string_names) + + +# This memoization is needed, because otherwise we will infinitely loop on +# certain imports. +@inference_state_method_cache(default=NO_VALUES) +def infer_import(context, tree_name): + module_context = context.get_root_context() + from_import_name, import_path, level, values = \ + _prepare_infer_import(module_context, tree_name) + if values: + + if from_import_name is not None: + values = values.py__getattribute__( + from_import_name, + name_context=context, + analysis_errors=False + ) + + if not values: + path = import_path + (from_import_name,) + importer = Importer(context.inference_state, path, module_context, level) + values = importer.follow() + debug.dbg('after import: %s', values) + return values + + +@inference_state_method_cache(default=[]) +def goto_import(context, tree_name): + module_context = context.get_root_context() + from_import_name, import_path, level, values = \ + _prepare_infer_import(module_context, tree_name) + if not values: + return [] + + if from_import_name is not None: + names = unite([ + c.goto( + from_import_name, + name_context=context, + analysis_errors=False + ) for c in values + ]) + # Avoid recursion on the same names. + if names and not any(n.tree_name is tree_name for n in names): + return names + + path = import_path + (from_import_name,) + importer = Importer(context.inference_state, path, module_context, level) + values = importer.follow() + return set(s.name for s in values) + + +def _prepare_infer_import(module_context, tree_name): + import_node = search_ancestor(tree_name, 'import_name', 'import_from') + import_path = import_node.get_path_for_name(tree_name) + from_import_name = None + try: + from_names = import_node.get_from_names() + except AttributeError: + # Is an import_name + pass + else: + if len(from_names) + 1 == len(import_path): + # We have to fetch the from_names part first and then check + # if from_names exists in the modules. + from_import_name = import_path[-1] + import_path = from_names + + importer = Importer(module_context.inference_state, tuple(import_path), + module_context, import_node.level) + + return from_import_name, tuple(import_path), import_node.level, importer.follow() + + +def _add_error(value, name, message): + if hasattr(name, 'parent') and value is not None: + analysis.add(value, 'import-error', name, message) + else: + debug.warning('ImportError without origin: ' + message) + + +def _level_to_base_import_path(project_path, directory, level): + """ + In case the level is outside of the currently known package (something like + import .....foo), we can still try our best to help the user for + completions. + """ + for i in range(level - 1): + old = directory + directory = os.path.dirname(directory) + if old == directory: + return None, None + + d = directory + level_import_paths = [] + # Now that we are on the level that the user wants to be, calculate the + # import path for it. + while True: + if d == project_path: + return level_import_paths, d + dir_name = os.path.basename(d) + if dir_name: + level_import_paths.insert(0, dir_name) + d = os.path.dirname(d) + else: + return None, directory + + +class Importer: + def __init__(self, inference_state, import_path, module_context, level=0): + """ + An implementation similar to ``__import__``. Use `follow` + to actually follow the imports. + + *level* specifies whether to use absolute or relative imports. 0 (the + default) means only perform absolute imports. Positive values for level + indicate the number of parent directories to search relative to the + directory of the module calling ``__import__()`` (see PEP 328 for the + details). + + :param import_path: List of namespaces (strings or Names). + """ + debug.speed('import %s %s' % (import_path, module_context)) + self._inference_state = inference_state + self.level = level + self._module_context = module_context + + self._fixed_sys_path = None + self._infer_possible = True + if level: + base = module_context.get_value().py__package__() + # We need to care for two cases, the first one is if it's a valid + # Python import. This import has a properly defined module name + # chain like `foo.bar.baz` and an import in baz is made for + # `..lala.` It can then resolve to `foo.bar.lala`. + # The else here is a heuristic for all other cases, if for example + # in `foo` you search for `...bar`, it's obviously out of scope. + # However since Jedi tries to just do it's best, we help the user + # here, because he might have specified something wrong in his + # project. + if level <= len(base): + # Here we basically rewrite the level to 0. + base = tuple(base) + if level > 1: + base = base[:-level + 1] + import_path = base + tuple(import_path) + else: + path = module_context.py__file__() + project_path = self._inference_state.project.path + import_path = list(import_path) + if path is None: + # If no path is defined, our best guess is that the current + # file is edited by a user on the current working + # directory. We need to add an initial path, because it + # will get removed as the name of the current file. + directory = project_path + else: + directory = os.path.dirname(path) + + base_import_path, base_directory = _level_to_base_import_path( + project_path, directory, level, + ) + if base_directory is None: + # Everything is lost, the relative import does point + # somewhere out of the filesystem. + self._infer_possible = False + else: + self._fixed_sys_path = [base_directory] + + if base_import_path is None: + if import_path: + _add_error( + module_context, import_path[0], + message='Attempted relative import beyond top-level package.' + ) + else: + import_path = base_import_path + import_path + self.import_path = import_path + + @property + def _str_import_path(self): + """Returns the import path as pure strings instead of `Name`.""" + return tuple( + name.value if isinstance(name, tree.Name) else name + for name in self.import_path + ) + + def _sys_path_with_modifications(self, is_completion): + if self._fixed_sys_path is not None: + return self._fixed_sys_path + + return ( + # For import completions we don't want to see init paths, but for + # inference we want to show the user as much as possible. + # See GH #1446. + self._inference_state.get_sys_path(add_init_paths=not is_completion) + + [ + str(p) for p + in sys_path.check_sys_path_modifications(self._module_context) + ] + ) + + def follow(self): + if not self.import_path: + if self._fixed_sys_path: + # This is a bit of a special case, that maybe should be + # revisited. If the project path is wrong or the user uses + # relative imports the wrong way, we might end up here, where + # the `fixed_sys_path == project.path` in that case we kind of + # use the project.path.parent directory as our path. This is + # usually not a problem, except if imports in other places are + # using the same names. Example: + # + # foo/ < #1 + # - setup.py + # - foo/ < #2 + # - __init__.py + # - foo.py < #3 + # + # If the top foo is our project folder and somebody uses + # `from . import foo` in `setup.py`, it will resolve to foo #2, + # which means that the import for foo.foo is cached as + # `__init__.py` (#2) and not as `foo.py` (#3). This is usually + # not an issue, because this case is probably pretty rare, but + # might be an issue for some people. + # + # However for most normal cases where we work with different + # file names, this code path hits where we basically change the + # project path to an ancestor of project path. + from jedi.inference.value.namespace import ImplicitNamespaceValue + import_path = (os.path.basename(self._fixed_sys_path[0]),) + ns = ImplicitNamespaceValue( + self._inference_state, + string_names=import_path, + paths=self._fixed_sys_path, + ) + return ValueSet({ns}) + return NO_VALUES + if not self._infer_possible: + return NO_VALUES + + # Check caches first + from_cache = self._inference_state.stub_module_cache.get(self._str_import_path) + if from_cache is not None: + return ValueSet({from_cache}) + from_cache = self._inference_state.module_cache.get(self._str_import_path) + if from_cache is not None: + return from_cache + + sys_path = self._sys_path_with_modifications(is_completion=False) + + return import_module_by_names( + self._inference_state, self.import_path, sys_path, self._module_context + ) + + def _get_module_names(self, search_path=None, in_module=None): + """ + Get the names of all modules in the search_path. This means file names + and not names defined in the files. + """ + if search_path is None: + sys_path = self._sys_path_with_modifications(is_completion=True) + else: + sys_path = search_path + return list(iter_module_names( + self._inference_state, self._module_context, sys_path, + module_cls=ImportName if in_module is None else SubModuleName, + add_builtin_modules=search_path is None and in_module is None, + )) + + def completion_names(self, inference_state, only_modules=False): + """ + :param only_modules: Indicates wheter it's possible to import a + definition that is not defined in a module. + """ + if not self._infer_possible: + return [] + + names = [] + if self.import_path: + # flask + if self._str_import_path == ('flask', 'ext'): + # List Flask extensions like ``flask_foo`` + for mod in self._get_module_names(): + modname = mod.string_name + if modname.startswith('flask_'): + extname = modname[len('flask_'):] + names.append(ImportName(self._module_context, extname)) + # Now the old style: ``flaskext.foo`` + for dir in self._sys_path_with_modifications(is_completion=True): + flaskext = os.path.join(dir, 'flaskext') + if os.path.isdir(flaskext): + names += self._get_module_names([flaskext]) + + values = self.follow() + for value in values: + # Non-modules are not completable. + if value.api_type not in ('module', 'namespace'): # not a module + continue + if not value.is_compiled(): + # sub_modules_dict is not implemented for compiled modules. + names += value.sub_modules_dict().values() + + if not only_modules: + from jedi.inference.gradual.conversion import convert_values + + both_values = values | convert_values(values) + for c in both_values: + for filter in c.get_filters(): + names += filter.values() + else: + if self.level: + # We only get here if the level cannot be properly calculated. + names += self._get_module_names(self._fixed_sys_path) + else: + # This is just the list of global imports. + names += self._get_module_names() + return names + + +def import_module_by_names(inference_state, import_names, sys_path=None, + module_context=None, prefer_stubs=True): + if sys_path is None: + sys_path = inference_state.get_sys_path() + + str_import_names = tuple( + i.value if isinstance(i, tree.Name) else i + for i in import_names + ) + value_set = [None] + for i, name in enumerate(import_names): + value_set = ValueSet.from_sets([ + import_module( + inference_state, + str_import_names[:i+1], + parent_module_value, + sys_path, + prefer_stubs=prefer_stubs, + ) for parent_module_value in value_set + ]) + if not value_set: + message = 'No module named ' + '.'.join(str_import_names) + if module_context is not None: + _add_error(module_context, name, message) + else: + debug.warning(message) + return NO_VALUES + return value_set + + +@plugin_manager.decorate() +@import_module_decorator +def import_module(inference_state, import_names, parent_module_value, sys_path): + """ + This method is very similar to importlib's `_gcd_import`. + """ + if import_names[0] in settings.auto_import_modules: + module = _load_builtin_module(inference_state, import_names, sys_path) + if module is None: + return NO_VALUES + return ValueSet([module]) + + module_name = '.'.join(import_names) + if parent_module_value is None: + # Override the sys.path. It works only good that way. + # Injecting the path directly into `find_module` did not work. + file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info( + string=import_names[-1], + full_name=module_name, + sys_path=sys_path, + is_global_search=True, + ) + if is_pkg is None: + return NO_VALUES + else: + paths = parent_module_value.py__path__() + if paths is None: + # The module might not be a package. + return NO_VALUES + + file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info( + string=import_names[-1], + path=paths, + full_name=module_name, + is_global_search=False, + ) + if is_pkg is None: + return NO_VALUES + + if isinstance(file_io_or_ns, ImplicitNSInfo): + from jedi.inference.value.namespace import ImplicitNamespaceValue + module = ImplicitNamespaceValue( + inference_state, + string_names=tuple(file_io_or_ns.name.split('.')), + paths=file_io_or_ns.paths, + ) + elif file_io_or_ns is None: + module = _load_builtin_module(inference_state, import_names, sys_path) + if module is None: + return NO_VALUES + else: + module = _load_python_module( + inference_state, file_io_or_ns, + import_names=import_names, + is_package=is_pkg, + ) + + if parent_module_value is None: + debug.dbg('global search_module %s: %s', import_names[-1], module) + else: + debug.dbg('search_module %s in paths %s: %s', module_name, paths, module) + return ValueSet([module]) + + +def _load_python_module(inference_state, file_io, + import_names=None, is_package=False): + module_node = inference_state.parse( + file_io=file_io, + cache=True, + diff_cache=settings.fast_parser, + cache_path=settings.cache_directory, + ) + + from jedi.inference.value import ModuleValue + return ModuleValue( + inference_state, module_node, + file_io=file_io, + string_names=import_names, + code_lines=get_cached_code_lines(inference_state.grammar, file_io.path), + is_package=is_package, + ) + + +def _load_builtin_module(inference_state, import_names=None, sys_path=None): + project = inference_state.project + if sys_path is None: + sys_path = inference_state.get_sys_path() + if not project._load_unsafe_extensions: + safe_paths = project._get_base_sys_path(inference_state) + sys_path = [p for p in sys_path if p in safe_paths] + + dotted_name = '.'.join(import_names) + assert dotted_name is not None + module = compiled.load_module(inference_state, dotted_name=dotted_name, sys_path=sys_path) + if module is None: + # The file might raise an ImportError e.g. and therefore not be + # importable. + return None + return module + + +def load_module_from_path(inference_state, file_io, import_names=None, is_package=None): + """ + This should pretty much only be used for get_modules_containing_name. It's + here to ensure that a random path is still properly loaded into the Jedi + module structure. + """ + path = Path(file_io.path) + if import_names is None: + e_sys_path = inference_state.get_sys_path() + import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path) + else: + assert isinstance(is_package, bool) + + is_stub = path.suffix == '.pyi' + if is_stub: + folder_io = file_io.get_parent_folder() + if folder_io.path.endswith('-stubs'): + folder_io = FolderIO(folder_io.path[:-6]) + if path.name == '__init__.pyi': + python_file_io = folder_io.get_file_io('__init__.py') + else: + python_file_io = folder_io.get_file_io(import_names[-1] + '.py') + + try: + v = load_module_from_path( + inference_state, python_file_io, + import_names, is_package=is_package + ) + values = ValueSet([v]) + except FileNotFoundError: + values = NO_VALUES + + return create_stub_module( + inference_state, inference_state.latest_grammar, values, + parse_stub_module(inference_state, file_io), file_io, import_names + ) + else: + module = _load_python_module( + inference_state, file_io, + import_names=import_names, + is_package=is_package, + ) + inference_state.module_cache.add(import_names, ValueSet([module])) + return module + + +def load_namespace_from_path(inference_state, folder_io): + import_names, is_package = sys_path.transform_path_to_dotted( + inference_state.get_sys_path(), + Path(folder_io.path) + ) + from jedi.inference.value.namespace import ImplicitNamespaceValue + return ImplicitNamespaceValue(inference_state, import_names, [folder_io.path]) + + +def follow_error_node_imports_if_possible(context, name): + error_node = tree.search_ancestor(name, 'error_node') + if error_node is not None: + # Get the first command start of a started simple_stmt. The error + # node is sometimes a small_stmt and sometimes a simple_stmt. Check + # for ; leaves that start a new statements. + start_index = 0 + for index, n in enumerate(error_node.children): + if n.start_pos > name.start_pos: + break + if n == ';': + start_index = index + 1 + nodes = error_node.children[start_index:] + first_name = nodes[0].get_first_leaf().value + + # Make it possible to infer stuff like `import foo.` or + # `from foo.bar`. + if first_name in ('from', 'import'): + is_import_from = first_name == 'from' + level, names = helpers.parse_dotted_names( + nodes, + is_import_from=is_import_from, + until_node=name, + ) + return Importer( + context.inference_state, names, context.get_root_context(), level).follow() + return None + + +def iter_module_names(inference_state, module_context, search_path, + module_cls=ImportName, add_builtin_modules=True): + """ + Get the names of all modules in the search_path. This means file names + and not names defined in the files. + """ + # add builtin module names + if add_builtin_modules: + for name in inference_state.compiled_subprocess.get_builtin_module_names(): + yield module_cls(module_context, name) + + for name in inference_state.compiled_subprocess.iter_module_names(search_path): + yield module_cls(module_context, name) diff --git a/lib/python3.10/site-packages/jedi/inference/lazy_value.py b/lib/python3.10/site-packages/jedi/inference/lazy_value.py new file mode 100644 index 0000000000000000000000000000000000000000..b149f21e69570c2538860009dd742c18cd3b7754 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/lazy_value.py @@ -0,0 +1,61 @@ +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.common import monkeypatch + + +class AbstractLazyValue: + def __init__(self, data, min=1, max=1): + self.data = data + self.min = min + self.max = max + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.data) + + def infer(self): + raise NotImplementedError + + +class LazyKnownValue(AbstractLazyValue): + """data is a Value.""" + def infer(self): + return ValueSet([self.data]) + + +class LazyKnownValues(AbstractLazyValue): + """data is a ValueSet.""" + def infer(self): + return self.data + + +class LazyUnknownValue(AbstractLazyValue): + def __init__(self, min=1, max=1): + super().__init__(None, min, max) + + def infer(self): + return NO_VALUES + + +class LazyTreeValue(AbstractLazyValue): + def __init__(self, context, node, min=1, max=1): + super().__init__(node, min, max) + self.context = context + # We need to save the predefined names. It's an unfortunate side effect + # that needs to be tracked otherwise results will be wrong. + self._predefined_names = dict(context.predefined_names) + + def infer(self): + with monkeypatch(self.context, 'predefined_names', self._predefined_names): + return self.context.infer_node(self.data) + + +def get_merged_lazy_value(lazy_values): + if len(lazy_values) > 1: + return MergedLazyValues(lazy_values) + else: + return lazy_values[0] + + +class MergedLazyValues(AbstractLazyValue): + """data is a list of lazy values.""" + def infer(self): + return ValueSet.from_sets(l.infer() for l in self.data) diff --git a/lib/python3.10/site-packages/jedi/inference/names.py b/lib/python3.10/site-packages/jedi/inference/names.py new file mode 100644 index 0000000000000000000000000000000000000000..c3896923833a29214456dce0cbad5b9a84d339d3 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/names.py @@ -0,0 +1,677 @@ +from abc import abstractmethod +from inspect import Parameter +from typing import Optional, Tuple + +from parso.tree import search_ancestor + +from jedi.parser_utils import find_statement_documentation, clean_scope_docstring +from jedi.inference.utils import unite +from jedi.inference.base_value import ValueSet, NO_VALUES +from jedi.inference.cache import inference_state_method_cache +from jedi.inference import docstrings +from jedi.cache import memoize_method +from jedi.inference.helpers import deep_ast_copy, infer_call_of_leaf +from jedi.plugins import plugin_manager + + +def _merge_name_docs(names): + doc = '' + for name in names: + if doc: + # In case we have multiple values, just return all of them + # separated by a few dashes. + doc += '\n' + '-' * 30 + '\n' + doc += name.py__doc__() + return doc + + +class AbstractNameDefinition: + start_pos: Optional[Tuple[int, int]] = None + string_name: str + parent_context = None + tree_name = None + is_value_name = True + """ + Used for the Jedi API to know if it's a keyword or an actual name. + """ + + @abstractmethod + def infer(self): + raise NotImplementedError + + @abstractmethod + def goto(self): + # Typically names are already definitions and therefore a goto on that + # name will always result on itself. + return {self} + + def get_qualified_names(self, include_module_names=False): + qualified_names = self._get_qualified_names() + if qualified_names is None or not include_module_names: + return qualified_names + + module_names = self.get_root_context().string_names + if module_names is None: + return None + return module_names + qualified_names + + def _get_qualified_names(self): + # By default, a name has no qualified names. + return None + + def get_root_context(self): + return self.parent_context.get_root_context() + + def get_public_name(self): + return self.string_name + + def __repr__(self): + if self.start_pos is None: + return '<%s: string_name=%s>' % (self.__class__.__name__, self.string_name) + return '<%s: string_name=%s start_pos=%s>' % (self.__class__.__name__, + self.string_name, self.start_pos) + + def is_import(self): + return False + + def py__doc__(self): + return '' + + @property + def api_type(self): + return self.parent_context.api_type + + def get_defining_qualified_value(self): + """ + Returns either None or the value that is public and qualified. Won't + return a function, because a name in a function is never public. + """ + return None + + +class AbstractArbitraryName(AbstractNameDefinition): + """ + When you e.g. want to complete dicts keys, you probably want to complete + string literals, which is not really a name, but for Jedi we use this + concept of Name for completions as well. + """ + is_value_name = False + + def __init__(self, inference_state, string): + self.inference_state = inference_state + self.string_name = string + self.parent_context = inference_state.builtins_module + + def infer(self): + return NO_VALUES + + +class AbstractTreeName(AbstractNameDefinition): + def __init__(self, parent_context, tree_name): + self.parent_context = parent_context + self.tree_name = tree_name + + def get_qualified_names(self, include_module_names=False): + import_node = search_ancestor(self.tree_name, 'import_name', 'import_from') + # For import nodes we cannot just have names, because it's very unclear + # how they would look like. For now we just ignore them in most cases. + # In case of level == 1, it works always, because it's like a submodule + # lookup. + if import_node is not None and not (import_node.level == 1 + and self.get_root_context().get_value().is_package()): + # TODO improve the situation for when level is present. + if include_module_names and not import_node.level: + return tuple(n.value for n in import_node.get_path_for_name(self.tree_name)) + else: + return None + + return super().get_qualified_names(include_module_names) + + def _get_qualified_names(self): + parent_names = self.parent_context.get_qualified_names() + if parent_names is None: + return None + return parent_names + (self.tree_name.value,) + + def get_defining_qualified_value(self): + if self.is_import(): + raise NotImplementedError("Shouldn't really happen, please report") + elif self.parent_context: + return self.parent_context.get_value() # Might be None + return None + + def goto(self): + context = self.parent_context + name = self.tree_name + definition = name.get_definition(import_name_always=True) + if definition is not None: + type_ = definition.type + if type_ == 'expr_stmt': + # Only take the parent, because if it's more complicated than just + # a name it's something you can "goto" again. + is_simple_name = name.parent.type not in ('power', 'trailer') + if is_simple_name: + return [self] + elif type_ in ('import_from', 'import_name'): + from jedi.inference.imports import goto_import + module_names = goto_import(context, name) + return module_names + else: + return [self] + else: + from jedi.inference.imports import follow_error_node_imports_if_possible + values = follow_error_node_imports_if_possible(context, name) + if values is not None: + return [value.name for value in values] + + par = name.parent + node_type = par.type + if node_type == 'argument' and par.children[1] == '=' and par.children[0] == name: + # Named param goto. + trailer = par.parent + if trailer.type == 'arglist': + trailer = trailer.parent + if trailer.type != 'classdef': + if trailer.type == 'decorator': + value_set = context.infer_node(trailer.children[1]) + else: + i = trailer.parent.children.index(trailer) + to_infer = trailer.parent.children[:i] + if to_infer[0] == 'await': + to_infer.pop(0) + value_set = context.infer_node(to_infer[0]) + from jedi.inference.syntax_tree import infer_trailer + for trailer in to_infer[1:]: + value_set = infer_trailer(context, value_set, trailer) + param_names = [] + for value in value_set: + for signature in value.get_signatures(): + for param_name in signature.get_param_names(): + if param_name.string_name == name.value: + param_names.append(param_name) + return param_names + elif node_type == 'dotted_name': # Is a decorator. + index = par.children.index(name) + if index > 0: + new_dotted = deep_ast_copy(par) + new_dotted.children[index - 1:] = [] + values = context.infer_node(new_dotted) + return unite( + value.goto(name, name_context=context) + for value in values + ) + + if node_type == 'trailer' and par.children[0] == '.': + values = infer_call_of_leaf(context, name, cut_own_trailer=True) + return values.goto(name, name_context=context) + else: + stmt = search_ancestor( + name, 'expr_stmt', 'lambdef' + ) or name + if stmt.type == 'lambdef': + stmt = name + return context.goto(name, position=stmt.start_pos) + + def is_import(self): + imp = search_ancestor(self.tree_name, 'import_from', 'import_name') + return imp is not None + + @property + def string_name(self): + return self.tree_name.value + + @property + def start_pos(self): + return self.tree_name.start_pos + + +class ValueNameMixin: + def infer(self): + return ValueSet([self._value]) + + def py__doc__(self): + doc = self._value.py__doc__() + if not doc and self._value.is_stub(): + from jedi.inference.gradual.conversion import convert_names + names = convert_names([self], prefer_stub_to_compiled=False) + if self not in names: + return _merge_name_docs(names) + return doc + + def _get_qualified_names(self): + return self._value.get_qualified_names() + + def get_root_context(self): + if self.parent_context is None: # A module + return self._value.as_context() + return super().get_root_context() + + def get_defining_qualified_value(self): + context = self.parent_context + if context is not None and (context.is_module() or context.is_class()): + return self.parent_context.get_value() # Might be None + return None + + @property + def api_type(self): + return self._value.api_type + + +class ValueName(ValueNameMixin, AbstractTreeName): + def __init__(self, value, tree_name): + super().__init__(value.parent_context, tree_name) + self._value = value + + def goto(self): + return ValueSet([self._value.name]) + + +class TreeNameDefinition(AbstractTreeName): + _API_TYPES = dict( + import_name='module', + import_from='module', + funcdef='function', + param='param', + classdef='class', + ) + + def infer(self): + # Refactor this, should probably be here. + from jedi.inference.syntax_tree import tree_name_to_values + return tree_name_to_values( + self.parent_context.inference_state, + self.parent_context, + self.tree_name + ) + + @property + def api_type(self): + definition = self.tree_name.get_definition(import_name_always=True) + if definition is None: + return 'statement' + return self._API_TYPES.get(definition.type, 'statement') + + def assignment_indexes(self): + """ + Returns an array of tuple(int, node) of the indexes that are used in + tuple assignments. + + For example if the name is ``y`` in the following code:: + + x, (y, z) = 2, '' + + would result in ``[(1, xyz_node), (0, yz_node)]``. + + When searching for b in the case ``a, *b, c = [...]`` it will return:: + + [(slice(1, -1), abc_node)] + """ + indexes = [] + is_star_expr = False + node = self.tree_name.parent + compare = self.tree_name + while node is not None: + if node.type in ('testlist', 'testlist_comp', 'testlist_star_expr', 'exprlist'): + for i, child in enumerate(node.children): + if child == compare: + index = int(i / 2) + if is_star_expr: + from_end = int((len(node.children) - i) / 2) + index = slice(index, -from_end) + indexes.insert(0, (index, node)) + break + else: + raise LookupError("Couldn't find the assignment.") + is_star_expr = False + elif node.type == 'star_expr': + is_star_expr = True + elif node.type in ('expr_stmt', 'sync_comp_for'): + break + + compare = node + node = node.parent + return indexes + + @property + def inference_state(self): + # Used by the cache function below + return self.parent_context.inference_state + + @inference_state_method_cache(default='') + def py__doc__(self): + api_type = self.api_type + if api_type in ('function', 'class', 'property'): + if self.parent_context.get_root_context().is_stub(): + from jedi.inference.gradual.conversion import convert_names + names = convert_names([self], prefer_stub_to_compiled=False) + if self not in names: + return _merge_name_docs(names) + + # Make sure the names are not TreeNameDefinitions anymore. + return clean_scope_docstring(self.tree_name.get_definition()) + + if api_type == 'module': + names = self.goto() + if self not in names: + return _merge_name_docs(names) + + if api_type == 'statement' and self.tree_name.is_definition(): + return find_statement_documentation(self.tree_name.get_definition()) + return '' + + +class _ParamMixin: + def maybe_positional_argument(self, include_star=True): + options = [Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD] + if include_star: + options.append(Parameter.VAR_POSITIONAL) + return self.get_kind() in options + + def maybe_keyword_argument(self, include_stars=True): + options = [Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD] + if include_stars: + options.append(Parameter.VAR_KEYWORD) + return self.get_kind() in options + + def _kind_string(self): + kind = self.get_kind() + if kind == Parameter.VAR_POSITIONAL: # *args + return '*' + if kind == Parameter.VAR_KEYWORD: # **kwargs + return '**' + return '' + + def get_qualified_names(self, include_module_names=False): + return None + + +class ParamNameInterface(_ParamMixin): + api_type = 'param' + + def get_kind(self): + raise NotImplementedError + + def to_string(self): + raise NotImplementedError + + def get_executed_param_name(self): + """ + For dealing with type inference and working around the graph, we + sometimes want to have the param name of the execution. This feels a + bit strange and we might have to refactor at some point. + + For now however it exists to avoid infering params when we don't really + need them (e.g. when we can just instead use annotations. + """ + return None + + @property + def star_count(self): + kind = self.get_kind() + if kind == Parameter.VAR_POSITIONAL: + return 1 + if kind == Parameter.VAR_KEYWORD: + return 2 + return 0 + + def infer_default(self): + return NO_VALUES + + +class BaseTreeParamName(ParamNameInterface, AbstractTreeName): + annotation_node = None + default_node = None + + def to_string(self): + output = self._kind_string() + self.get_public_name() + annotation = self.annotation_node + default = self.default_node + if annotation is not None: + output += ': ' + annotation.get_code(include_prefix=False) + if default is not None: + output += '=' + default.get_code(include_prefix=False) + return output + + def get_public_name(self): + name = self.string_name + if name.startswith('__'): + # Params starting with __ are an equivalent to positional only + # variables in typeshed. + name = name[2:] + return name + + def goto(self, **kwargs): + return [self] + + +class _ActualTreeParamName(BaseTreeParamName): + def __init__(self, function_value, tree_name): + super().__init__( + function_value.get_default_param_context(), tree_name) + self.function_value = function_value + + def _get_param_node(self): + return search_ancestor(self.tree_name, 'param') + + @property + def annotation_node(self): + return self._get_param_node().annotation + + def infer_annotation(self, execute_annotation=True, ignore_stars=False): + from jedi.inference.gradual.annotation import infer_param + values = infer_param( + self.function_value, self._get_param_node(), + ignore_stars=ignore_stars) + if execute_annotation: + values = values.execute_annotation() + return values + + def infer_default(self): + node = self.default_node + if node is None: + return NO_VALUES + return self.parent_context.infer_node(node) + + @property + def default_node(self): + return self._get_param_node().default + + def get_kind(self): + tree_param = self._get_param_node() + if tree_param.star_count == 1: # *args + return Parameter.VAR_POSITIONAL + if tree_param.star_count == 2: # **kwargs + return Parameter.VAR_KEYWORD + + # Params starting with __ are an equivalent to positional only + # variables in typeshed. + if tree_param.name.value.startswith('__'): + return Parameter.POSITIONAL_ONLY + + parent = tree_param.parent + param_appeared = False + for p in parent.children: + if param_appeared: + if p == '/': + return Parameter.POSITIONAL_ONLY + else: + if p == '*': + return Parameter.KEYWORD_ONLY + if p.type == 'param': + if p.star_count: + return Parameter.KEYWORD_ONLY + if p == tree_param: + param_appeared = True + return Parameter.POSITIONAL_OR_KEYWORD + + def infer(self): + values = self.infer_annotation() + if values: + return values + + doc_params = docstrings.infer_param(self.function_value, self._get_param_node()) + return doc_params + + +class AnonymousParamName(_ActualTreeParamName): + @plugin_manager.decorate(name='goto_anonymous_param') + def goto(self): + return super().goto() + + @plugin_manager.decorate(name='infer_anonymous_param') + def infer(self): + values = super().infer() + if values: + return values + from jedi.inference.dynamic_params import dynamic_param_lookup + param = self._get_param_node() + values = dynamic_param_lookup(self.function_value, param.position_index) + if values: + return values + + if param.star_count == 1: + from jedi.inference.value.iterable import FakeTuple + value = FakeTuple(self.function_value.inference_state, []) + elif param.star_count == 2: + from jedi.inference.value.iterable import FakeDict + value = FakeDict(self.function_value.inference_state, {}) + elif param.default is None: + return NO_VALUES + else: + return self.function_value.parent_context.infer_node(param.default) + return ValueSet({value}) + + +class ParamName(_ActualTreeParamName): + def __init__(self, function_value, tree_name, arguments): + super().__init__(function_value, tree_name) + self.arguments = arguments + + def infer(self): + values = super().infer() + if values: + return values + + return self.get_executed_param_name().infer() + + def get_executed_param_name(self): + from jedi.inference.param import get_executed_param_names + params_names = get_executed_param_names(self.function_value, self.arguments) + return params_names[self._get_param_node().position_index] + + +class ParamNameWrapper(_ParamMixin): + def __init__(self, param_name): + self._wrapped_param_name = param_name + + def __getattr__(self, name): + return getattr(self._wrapped_param_name, name) + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self._wrapped_param_name) + + +class ImportName(AbstractNameDefinition): + start_pos = (1, 0) + _level = 0 + + def __init__(self, parent_context, string_name): + self._from_module_context = parent_context + self.string_name = string_name + + def get_qualified_names(self, include_module_names=False): + if include_module_names: + if self._level: + assert self._level == 1, "Everything else is not supported for now" + module_names = self._from_module_context.string_names + if module_names is None: + return module_names + return module_names + (self.string_name,) + return (self.string_name,) + return () + + @property + def parent_context(self): + m = self._from_module_context + import_values = self.infer() + if not import_values: + return m + # It's almost always possible to find the import or to not find it. The + # importing returns only one value, pretty much always. + return next(iter(import_values)).as_context() + + @memoize_method + def infer(self): + from jedi.inference.imports import Importer + m = self._from_module_context + return Importer(m.inference_state, [self.string_name], m, level=self._level).follow() + + def goto(self): + return [m.name for m in self.infer()] + + @property + def api_type(self): + return 'module' + + def py__doc__(self): + return _merge_name_docs(self.goto()) + + +class SubModuleName(ImportName): + _level = 1 + + +class NameWrapper: + def __init__(self, wrapped_name): + self._wrapped_name = wrapped_name + + def __getattr__(self, name): + return getattr(self._wrapped_name, name) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self._wrapped_name) + + +class StubNameMixin: + def py__doc__(self): + from jedi.inference.gradual.conversion import convert_names + # Stubs are not complicated and we can just follow simple statements + # that have an equals in them, because they typically make something + # else public. See e.g. stubs for `requests`. + names = [self] + if self.api_type == 'statement' and '=' in self.tree_name.get_definition().children: + names = [v.name for v in self.infer()] + + names = convert_names(names, prefer_stub_to_compiled=False) + if self in names: + return super().py__doc__() + else: + # We have signatures ourselves in stubs, so don't use signatures + # from the implementation. + return _merge_name_docs(names) + + +# From here on down we make looking up the sys.version_info fast. +class StubName(StubNameMixin, TreeNameDefinition): + def infer(self): + inferred = super().infer() + if self.string_name == 'version_info' and self.get_root_context().py__name__() == 'sys': + from jedi.inference.gradual.stub_value import VersionInfo + return ValueSet(VersionInfo(c) for c in inferred) + return inferred + + +class ModuleName(ValueNameMixin, AbstractNameDefinition): + start_pos = 1, 0 + + def __init__(self, value, name): + self._value = value + self._name = name + + @property + def string_name(self): + return self._name + + +class StubModuleName(StubNameMixin, ModuleName): + pass diff --git a/lib/python3.10/site-packages/jedi/inference/param.py b/lib/python3.10/site-packages/jedi/inference/param.py new file mode 100644 index 0000000000000000000000000000000000000000..1f296215d6685a1804dc61b8d79e828eab3f3aae --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/param.py @@ -0,0 +1,257 @@ +from collections import defaultdict +from inspect import Parameter + +from jedi import debug +from jedi.inference.utils import PushBackIterator +from jedi.inference import analysis +from jedi.inference.lazy_value import LazyKnownValue, \ + LazyTreeValue, LazyUnknownValue +from jedi.inference.value import iterable +from jedi.inference.names import ParamName + + +def _add_argument_issue(error_name, lazy_value, message): + if isinstance(lazy_value, LazyTreeValue): + node = lazy_value.data + if node.parent.type == 'argument': + node = node.parent + return analysis.add(lazy_value.context, error_name, node, message) + + +class ExecutedParamName(ParamName): + def __init__(self, function_value, arguments, param_node, lazy_value, is_default=False): + super().__init__(function_value, param_node.name, arguments=arguments) + self._lazy_value = lazy_value + self._is_default = is_default + + def infer(self): + return self._lazy_value.infer() + + def matches_signature(self): + if self._is_default: + return True + argument_values = self.infer().py__class__() + if self.get_kind() in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD): + return True + annotations = self.infer_annotation(execute_annotation=False) + if not annotations: + # If we cannot infer annotations - or there aren't any - pretend + # that the signature matches. + return True + matches = any(c1.is_sub_class_of(c2) + for c1 in argument_values + for c2 in annotations.gather_annotation_classes()) + debug.dbg("param compare %s: %s <=> %s", + matches, argument_values, annotations, color='BLUE') + return matches + + def __repr__(self): + return '<%s: %s>' % (self.__class__.__name__, self.string_name) + + +def get_executed_param_names_and_issues(function_value, arguments): + """ + Return a tuple of: + - a list of `ExecutedParamName`s corresponding to the arguments of the + function execution `function_value`, containing the inferred value of + those arguments (whether explicit or default) + - a list of the issues encountered while building that list + + For example, given: + ``` + def foo(a, b, c=None, d='d'): ... + + foo(42, c='c') + ``` + + Then for the execution of `foo`, this will return a tuple containing: + - a list with entries for each parameter a, b, c & d; the entries for a, + c, & d will have their values (42, 'c' and 'd' respectively) included. + - a list with a single entry about the lack of a value for `b` + """ + def too_many_args(argument): + m = _error_argument_count(funcdef, len(unpacked_va)) + # Just report an error for the first param that is not needed (like + # cPython). + if arguments.get_calling_nodes(): + # There might not be a valid calling node so check for that first. + issues.append( + _add_argument_issue( + 'type-error-too-many-arguments', + argument, + message=m + ) + ) + else: + issues.append(None) + debug.warning('non-public warning: %s', m) + + issues = [] # List[Optional[analysis issue]] + result_params = [] + param_dict = {} + funcdef = function_value.tree_node + # Default params are part of the value where the function was defined. + # This means that they might have access on class variables that the + # function itself doesn't have. + default_param_context = function_value.get_default_param_context() + + for param in funcdef.get_params(): + param_dict[param.name.value] = param + unpacked_va = list(arguments.unpack(funcdef)) + var_arg_iterator = PushBackIterator(iter(unpacked_va)) + + non_matching_keys = defaultdict(lambda: []) + keys_used = {} + keys_only = False + had_multiple_value_error = False + for param in funcdef.get_params(): + # The value and key can both be null. There, the defaults apply. + # args / kwargs will just be empty arrays / dicts, respectively. + # Wrong value count is just ignored. If you try to test cases that are + # not allowed in Python, Jedi will maybe not show any completions. + is_default = False + key, argument = next(var_arg_iterator, (None, None)) + while key is not None: + keys_only = True + try: + key_param = param_dict[key] + except KeyError: + non_matching_keys[key] = argument + else: + if key in keys_used: + had_multiple_value_error = True + m = ("TypeError: %s() got multiple values for keyword argument '%s'." + % (funcdef.name, key)) + for contextualized_node in arguments.get_calling_nodes(): + issues.append( + analysis.add(contextualized_node.context, + 'type-error-multiple-values', + contextualized_node.node, message=m) + ) + else: + keys_used[key] = ExecutedParamName( + function_value, arguments, key_param, argument) + key, argument = next(var_arg_iterator, (None, None)) + + try: + result_params.append(keys_used[param.name.value]) + continue + except KeyError: + pass + + if param.star_count == 1: + # *args param + lazy_value_list = [] + if argument is not None: + lazy_value_list.append(argument) + for key, argument in var_arg_iterator: + # Iterate until a key argument is found. + if key: + var_arg_iterator.push_back((key, argument)) + break + lazy_value_list.append(argument) + seq = iterable.FakeTuple(function_value.inference_state, lazy_value_list) + result_arg = LazyKnownValue(seq) + elif param.star_count == 2: + if argument is not None: + too_many_args(argument) + # **kwargs param + dct = iterable.FakeDict(function_value.inference_state, dict(non_matching_keys)) + result_arg = LazyKnownValue(dct) + non_matching_keys = {} + else: + # normal param + if argument is None: + # No value: Return an empty container + if param.default is None: + result_arg = LazyUnknownValue() + if not keys_only: + for contextualized_node in arguments.get_calling_nodes(): + m = _error_argument_count(funcdef, len(unpacked_va)) + issues.append( + analysis.add( + contextualized_node.context, + 'type-error-too-few-arguments', + contextualized_node.node, + message=m, + ) + ) + else: + result_arg = LazyTreeValue(default_param_context, param.default) + is_default = True + else: + result_arg = argument + + result_params.append(ExecutedParamName( + function_value, arguments, param, result_arg, is_default=is_default + )) + if not isinstance(result_arg, LazyUnknownValue): + keys_used[param.name.value] = result_params[-1] + + if keys_only: + # All arguments should be handed over to the next function. It's not + # about the values inside, it's about the names. Jedi needs to now that + # there's nothing to find for certain names. + for k in set(param_dict) - set(keys_used): + param = param_dict[k] + + if not (non_matching_keys or had_multiple_value_error + or param.star_count or param.default): + # add a warning only if there's not another one. + for contextualized_node in arguments.get_calling_nodes(): + m = _error_argument_count(funcdef, len(unpacked_va)) + issues.append( + analysis.add(contextualized_node.context, + 'type-error-too-few-arguments', + contextualized_node.node, message=m) + ) + + for key, lazy_value in non_matching_keys.items(): + m = "TypeError: %s() got an unexpected keyword argument '%s'." \ + % (funcdef.name, key) + issues.append( + _add_argument_issue( + 'type-error-keyword-argument', + lazy_value, + message=m + ) + ) + + remaining_arguments = list(var_arg_iterator) + if remaining_arguments: + first_key, lazy_value = remaining_arguments[0] + too_many_args(lazy_value) + return result_params, issues + + +def get_executed_param_names(function_value, arguments): + """ + Return a list of `ExecutedParamName`s corresponding to the arguments of the + function execution `function_value`, containing the inferred value of those + arguments (whether explicit or default). Any issues building this list (for + example required arguments which are missing in the invocation) are ignored. + + For example, given: + ``` + def foo(a, b, c=None, d='d'): ... + + foo(42, c='c') + ``` + + Then for the execution of `foo`, this will return a list containing entries + for each parameter a, b, c & d; the entries for a, c, & d will have their + values (42, 'c' and 'd' respectively) included. + """ + return get_executed_param_names_and_issues(function_value, arguments)[0] + + +def _error_argument_count(funcdef, actual_count): + params = funcdef.get_params() + default_arguments = sum(1 for p in params if p.default or p.star_count) + + if default_arguments == 0: + before = 'exactly ' + else: + before = 'from %s to ' % (len(params) - default_arguments) + return ('TypeError: %s() takes %s%s arguments (%s given).' + % (funcdef.name, before, len(params), actual_count)) diff --git a/lib/python3.10/site-packages/jedi/inference/parser_cache.py b/lib/python3.10/site-packages/jedi/inference/parser_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..c9b9b2bd6d10aa9a1620ae3cb8e9c876e48304ad --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/parser_cache.py @@ -0,0 +1,6 @@ +from jedi.inference.cache import inference_state_function_cache + + +@inference_state_function_cache() +def get_yield_exprs(inference_state, funcdef): + return list(funcdef.iter_yield_exprs()) diff --git a/lib/python3.10/site-packages/jedi/inference/recursion.py b/lib/python3.10/site-packages/jedi/inference/recursion.py new file mode 100644 index 0000000000000000000000000000000000000000..cc2418738b64adb4a99801add1ffb60486554f8a --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/recursion.py @@ -0,0 +1,153 @@ +""" +Recursions are the recipe of |jedi| to conquer Python code. However, someone +must stop recursions going mad. Some settings are here to make |jedi| stop at +the right time. You can read more about them :ref:`here `. + +Next to the internal ``jedi.inference.cache`` this module also makes |jedi| not +thread-safe, because ``execution_recursion_decorator`` uses class variables to +count the function calls. + +.. _settings-recursion: + +Settings +~~~~~~~~~~ + +Recursion settings are important if you don't want extremely +recursive python code to go absolutely crazy. + +The default values are based on experiments while completing the |jedi| library +itself (inception!). But I don't think there's any other Python library that +uses recursion in a similarly extreme way. Completion should also be fast and +therefore the quality might not always be maximal. + +.. autodata:: recursion_limit +.. autodata:: total_function_execution_limit +.. autodata:: per_function_execution_limit +.. autodata:: per_function_recursion_limit +""" + +from contextlib import contextmanager + +from jedi import debug +from jedi.inference.base_value import NO_VALUES + + +recursion_limit = 15 +""" +Like :func:`sys.getrecursionlimit()`, just for |jedi|. +""" +total_function_execution_limit = 200 +""" +This is a hard limit of how many non-builtin functions can be executed. +""" +per_function_execution_limit = 6 +""" +The maximal amount of times a specific function may be executed. +""" +per_function_recursion_limit = 2 +""" +A function may not be executed more than this number of times recursively. +""" + + +class RecursionDetector: + def __init__(self): + self.pushed_nodes = [] + + +@contextmanager +def execution_allowed(inference_state, node): + """ + A decorator to detect recursions in statements. In a recursion a statement + at the same place, in the same module may not be executed two times. + """ + pushed_nodes = inference_state.recursion_detector.pushed_nodes + + if node in pushed_nodes: + debug.warning('catched stmt recursion: %s @%s', node, + getattr(node, 'start_pos', None)) + yield False + else: + try: + pushed_nodes.append(node) + yield True + finally: + pushed_nodes.pop() + + +def execution_recursion_decorator(default=NO_VALUES): + def decorator(func): + def wrapper(self, **kwargs): + detector = self.inference_state.execution_recursion_detector + limit_reached = detector.push_execution(self) + try: + if limit_reached: + result = default + else: + result = func(self, **kwargs) + finally: + detector.pop_execution() + return result + return wrapper + return decorator + + +class ExecutionRecursionDetector: + """ + Catches recursions of executions. + """ + def __init__(self, inference_state): + self._inference_state = inference_state + + self._recursion_level = 0 + self._parent_execution_funcs = [] + self._funcdef_execution_counts = {} + self._execution_count = 0 + + def pop_execution(self): + self._parent_execution_funcs.pop() + self._recursion_level -= 1 + + def push_execution(self, execution): + funcdef = execution.tree_node + + # These two will be undone in pop_execution. + self._recursion_level += 1 + self._parent_execution_funcs.append(funcdef) + + module_context = execution.get_root_context() + + if module_context.is_builtins_module(): + # We have control over builtins so we know they are not recursing + # like crazy. Therefore we just let them execute always, because + # they usually just help a lot with getting good results. + return False + + if self._recursion_level > recursion_limit: + debug.warning('Recursion limit (%s) reached', recursion_limit) + return True + + if self._execution_count >= total_function_execution_limit: + debug.warning('Function execution limit (%s) reached', total_function_execution_limit) + return True + self._execution_count += 1 + + if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit: + if module_context.py__name__() == 'typing': + return False + debug.warning( + 'Per function execution limit (%s) reached: %s', + per_function_execution_limit, + funcdef + ) + return True + self._funcdef_execution_counts[funcdef] += 1 + + if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit: + debug.warning( + 'Per function recursion limit (%s) reached: %s', + per_function_recursion_limit, + funcdef + ) + return True + return False diff --git a/lib/python3.10/site-packages/jedi/inference/references.py b/lib/python3.10/site-packages/jedi/inference/references.py new file mode 100644 index 0000000000000000000000000000000000000000..d01890bd97819ee1e780ecb8dd688c2cfaff30ec --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/references.py @@ -0,0 +1,319 @@ +import os +import re + +from parso import python_bytes_to_unicode + +from jedi.debug import dbg +from jedi.file_io import KnownContentFileIO, FolderIO +from jedi.inference.names import SubModuleName +from jedi.inference.imports import load_module_from_path +from jedi.inference.filters import ParserTreeFilter +from jedi.inference.gradual.conversion import convert_names + +_IGNORE_FOLDERS = ('.tox', '.venv', '.mypy_cache', 'venv', '__pycache__') + +_OPENED_FILE_LIMIT = 2000 +""" +Stats from a 2016 Lenovo Notebook running Linux: +With os.walk, it takes about 10s to scan 11'000 files (without filesystem +caching). Once cached it only takes 5s. So it is expected that reading all +those files might take a few seconds, but not a lot more. +""" +_PARSED_FILE_LIMIT = 30 +""" +For now we keep the amount of parsed files really low, since parsing might take +easily 100ms for bigger files. +""" + + +def _resolve_names(definition_names, avoid_names=()): + for name in definition_names: + if name in avoid_names: + # Avoiding recursions here, because goto on a module name lands + # on the same module. + continue + + if not isinstance(name, SubModuleName): + # SubModuleNames are not actually existing names but created + # names when importing something like `import foo.bar.baz`. + yield name + + if name.api_type == 'module': + yield from _resolve_names(name.goto(), definition_names) + + +def _dictionarize(names): + return dict( + (n if n.tree_name is None else n.tree_name, n) + for n in names + ) + + +def _find_defining_names(module_context, tree_name): + found_names = _find_names(module_context, tree_name) + + for name in list(found_names): + # Convert from/to stubs, because those might also be usages. + found_names |= set(convert_names( + [name], + only_stubs=not name.get_root_context().is_stub(), + prefer_stub_to_compiled=False + )) + + found_names |= set(_find_global_variables(found_names, tree_name.value)) + for name in list(found_names): + if name.api_type == 'param' or name.tree_name is None \ + or name.tree_name.parent.type == 'trailer': + continue + found_names |= set(_add_names_in_same_context(name.parent_context, name.string_name)) + return set(_resolve_names(found_names)) + + +def _find_names(module_context, tree_name): + name = module_context.create_name(tree_name) + found_names = set(name.goto()) + found_names.add(name) + + return set(_resolve_names(found_names)) + + +def _add_names_in_same_context(context, string_name): + if context.tree_node is None: + return + + until_position = None + while True: + filter_ = ParserTreeFilter( + parent_context=context, + until_position=until_position, + ) + names = set(filter_.get(string_name)) + if not names: + break + yield from names + ordered = sorted(names, key=lambda x: x.start_pos) + until_position = ordered[0].start_pos + + +def _find_global_variables(names, search_name): + for name in names: + if name.tree_name is None: + continue + module_context = name.get_root_context() + try: + method = module_context.get_global_filter + except AttributeError: + continue + else: + for global_name in method().get(search_name): + yield global_name + c = module_context.create_context(global_name.tree_name) + yield from _add_names_in_same_context(c, global_name.string_name) + + +def find_references(module_context, tree_name, only_in_module=False): + inf = module_context.inference_state + search_name = tree_name.value + + # We disable flow analysis, because if we have ifs that are only true in + # certain cases, we want both sides. + try: + inf.flow_analysis_enabled = False + found_names = _find_defining_names(module_context, tree_name) + finally: + inf.flow_analysis_enabled = True + + found_names_dct = _dictionarize(found_names) + + module_contexts = [module_context] + if not only_in_module: + for m in set(d.get_root_context() for d in found_names): + if m != module_context and m.tree_node is not None \ + and inf.project.path in m.py__file__().parents: + module_contexts.append(m) + # For param no search for other modules is necessary. + if only_in_module or any(n.api_type == 'param' for n in found_names): + potential_modules = module_contexts + else: + potential_modules = get_module_contexts_containing_name( + inf, + module_contexts, + search_name, + ) + + non_matching_reference_maps = {} + for module_context in potential_modules: + for name_leaf in module_context.tree_node.get_used_names().get(search_name, []): + new = _dictionarize(_find_names(module_context, name_leaf)) + if any(tree_name in found_names_dct for tree_name in new): + found_names_dct.update(new) + for tree_name in new: + for dct in non_matching_reference_maps.get(tree_name, []): + # A reference that was previously searched for matches + # with a now found name. Merge. + found_names_dct.update(dct) + try: + del non_matching_reference_maps[tree_name] + except KeyError: + pass + else: + for name in new: + non_matching_reference_maps.setdefault(name, []).append(new) + result = found_names_dct.values() + if only_in_module: + return [n for n in result if n.get_root_context() == module_context] + return result + + +def _check_fs(inference_state, file_io, regex): + try: + code = file_io.read() + except FileNotFoundError: + return None + code = python_bytes_to_unicode(code, errors='replace') + if not regex.search(code): + return None + new_file_io = KnownContentFileIO(file_io.path, code) + m = load_module_from_path(inference_state, new_file_io) + if m.is_compiled(): + return None + return m.as_context() + + +def gitignored_paths(folder_io, file_io): + ignored_paths_abs = set() + ignored_paths_rel = set() + + for l in file_io.read().splitlines(): + if not l or l.startswith(b'#') or l.startswith(b'!') or b'*' in l: + continue + + p = l.decode('utf-8', 'ignore').rstrip('/') + if '/' in p: + name = p.lstrip('/') + ignored_paths_abs.add(os.path.join(folder_io.path, name)) + else: + name = p + ignored_paths_rel.add((folder_io.path, name)) + + return ignored_paths_abs, ignored_paths_rel + + +def expand_relative_ignore_paths(folder_io, relative_paths): + curr_path = folder_io.path + return {os.path.join(curr_path, p[1]) for p in relative_paths if curr_path.startswith(p[0])} + + +def recurse_find_python_folders_and_files(folder_io, except_paths=()): + except_paths = set(except_paths) + except_paths_relative = set() + + for root_folder_io, folder_ios, file_ios in folder_io.walk(): + # Delete folders that we don't want to iterate over. + for file_io in file_ios: + path = file_io.path + if path.suffix in ('.py', '.pyi'): + if path not in except_paths: + yield None, file_io + + if path.name == '.gitignore': + ignored_paths_abs, ignored_paths_rel = gitignored_paths( + root_folder_io, file_io + ) + except_paths |= ignored_paths_abs + except_paths_relative |= ignored_paths_rel + + except_paths_relative_expanded = expand_relative_ignore_paths( + root_folder_io, except_paths_relative + ) + + folder_ios[:] = [ + folder_io + for folder_io in folder_ios + if folder_io.path not in except_paths + and folder_io.path not in except_paths_relative_expanded + and folder_io.get_base_name() not in _IGNORE_FOLDERS + ] + for folder_io in folder_ios: + yield folder_io, None + + +def recurse_find_python_files(folder_io, except_paths=()): + for folder_io, file_io in recurse_find_python_folders_and_files(folder_io, except_paths): + if file_io is not None: + yield file_io + + +def _find_python_files_in_sys_path(inference_state, module_contexts): + sys_path = inference_state.get_sys_path() + except_paths = set() + yielded_paths = [m.py__file__() for m in module_contexts] + for module_context in module_contexts: + file_io = module_context.get_value().file_io + if file_io is None: + continue + + folder_io = file_io.get_parent_folder() + while True: + path = folder_io.path + if not any(path.startswith(p) for p in sys_path) or path in except_paths: + break + for file_io in recurse_find_python_files(folder_io, except_paths): + if file_io.path not in yielded_paths: + yield file_io + except_paths.add(path) + folder_io = folder_io.get_parent_folder() + + +def _find_project_modules(inference_state, module_contexts): + except_ = [m.py__file__() for m in module_contexts] + yield from recurse_find_python_files(FolderIO(inference_state.project.path), except_) + + +def get_module_contexts_containing_name(inference_state, module_contexts, name, + limit_reduction=1): + """ + Search a name in the directories of modules. + + :param limit_reduction: Divides the limits on opening/parsing files by this + factor. + """ + # Skip non python modules + for module_context in module_contexts: + if module_context.is_compiled(): + continue + yield module_context + + # Very short names are not searched in other modules for now to avoid lots + # of file lookups. + if len(name) <= 2: + return + + # Currently not used, because there's only `scope=project` and `scope=file` + # At the moment there is no such thing as `scope=sys.path`. + # file_io_iterator = _find_python_files_in_sys_path(inference_state, module_contexts) + file_io_iterator = _find_project_modules(inference_state, module_contexts) + yield from search_in_file_ios(inference_state, file_io_iterator, name, + limit_reduction=limit_reduction) + + +def search_in_file_ios(inference_state, file_io_iterator, name, + limit_reduction=1, complete=False): + parse_limit = _PARSED_FILE_LIMIT / limit_reduction + open_limit = _OPENED_FILE_LIMIT / limit_reduction + file_io_count = 0 + parsed_file_count = 0 + regex = re.compile(r'\b' + re.escape(name) + (r'' if complete else r'\b')) + for file_io in file_io_iterator: + file_io_count += 1 + m = _check_fs(inference_state, file_io, regex) + if m is not None: + parsed_file_count += 1 + yield m + if parsed_file_count >= parse_limit: + dbg('Hit limit of parsed files: %s', parse_limit) + break + + if file_io_count >= open_limit: + dbg('Hit limit of opened files: %s', open_limit) + break diff --git a/lib/python3.10/site-packages/jedi/inference/signature.py b/lib/python3.10/site-packages/jedi/inference/signature.py new file mode 100644 index 0000000000000000000000000000000000000000..565a269b8caa8c9423f1acf5c6b1220c9d5b2755 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/signature.py @@ -0,0 +1,152 @@ +from inspect import Parameter + +from jedi.cache import memoize_method +from jedi import debug +from jedi import parser_utils + + +class _SignatureMixin: + def to_string(self): + def param_strings(): + is_positional = False + is_kw_only = False + for n in self.get_param_names(resolve_stars=True): + kind = n.get_kind() + is_positional |= kind == Parameter.POSITIONAL_ONLY + if is_positional and kind != Parameter.POSITIONAL_ONLY: + yield '/' + is_positional = False + + if kind == Parameter.VAR_POSITIONAL: + is_kw_only = True + elif kind == Parameter.KEYWORD_ONLY and not is_kw_only: + yield '*' + is_kw_only = True + + yield n.to_string() + + if is_positional: + yield '/' + + s = self.name.string_name + '(' + ', '.join(param_strings()) + ')' + annotation = self.annotation_string + if annotation: + s += ' -> ' + annotation + return s + + +class AbstractSignature(_SignatureMixin): + def __init__(self, value, is_bound=False): + self.value = value + self.is_bound = is_bound + + @property + def name(self): + return self.value.name + + @property + def annotation_string(self): + return '' + + def get_param_names(self, resolve_stars=False): + param_names = self._function_value.get_param_names() + if self.is_bound: + return param_names[1:] + return param_names + + def bind(self, value): + raise NotImplementedError + + def matches_signature(self, arguments): + return True + + def __repr__(self): + if self.value is self._function_value: + return '<%s: %s>' % (self.__class__.__name__, self.value) + return '<%s: %s, %s>' % (self.__class__.__name__, self.value, self._function_value) + + +class TreeSignature(AbstractSignature): + def __init__(self, value, function_value=None, is_bound=False): + super().__init__(value, is_bound) + self._function_value = function_value or value + + def bind(self, value): + return TreeSignature(value, self._function_value, is_bound=True) + + @property + def _annotation(self): + # Classes don't need annotations, even if __init__ has one. They always + # return themselves. + if self.value.is_class(): + return None + return self._function_value.tree_node.annotation + + @property + def annotation_string(self): + a = self._annotation + if a is None: + return '' + return a.get_code(include_prefix=False) + + @memoize_method + def get_param_names(self, resolve_stars=False): + params = self._function_value.get_param_names() + if resolve_stars: + from jedi.inference.star_args import process_params + params = process_params(params) + if self.is_bound: + return params[1:] + return params + + def matches_signature(self, arguments): + from jedi.inference.param import get_executed_param_names_and_issues + executed_param_names, issues = \ + get_executed_param_names_and_issues(self._function_value, arguments) + if issues: + return False + + matches = all(executed_param_name.matches_signature() + for executed_param_name in executed_param_names) + if debug.enable_notice: + tree_node = self._function_value.tree_node + signature = parser_utils.get_signature(tree_node) + if matches: + debug.dbg("Overloading match: %s@%s (%s)", + signature, tree_node.start_pos[0], arguments, color='BLUE') + else: + debug.dbg("Overloading no match: %s@%s (%s)", + signature, tree_node.start_pos[0], arguments, color='BLUE') + return matches + + +class BuiltinSignature(AbstractSignature): + def __init__(self, value, return_string, function_value=None, is_bound=False): + super().__init__(value, is_bound) + self._return_string = return_string + self.__function_value = function_value + + @property + def annotation_string(self): + return self._return_string + + @property + def _function_value(self): + if self.__function_value is None: + return self.value + return self.__function_value + + def bind(self, value): + return BuiltinSignature( + value, self._return_string, + function_value=self.value, + is_bound=True + ) + + +class SignatureWrapper(_SignatureMixin): + def __init__(self, wrapped_signature): + self._wrapped_signature = wrapped_signature + + def __getattr__(self, name): + return getattr(self._wrapped_signature, name) diff --git a/lib/python3.10/site-packages/jedi/inference/star_args.py b/lib/python3.10/site-packages/jedi/inference/star_args.py new file mode 100644 index 0000000000000000000000000000000000000000..71ea7093808a97fbacd895badba27aed575faba6 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/star_args.py @@ -0,0 +1,220 @@ +""" +This module is responsible for inferring *args and **kwargs for signatures. + +This means for example in this case:: + + def foo(a, b, c): ... + + def bar(*args): + return foo(1, *args) + +The signature here for bar should be `bar(b, c)` instead of bar(*args). +""" +from inspect import Parameter + +from parso import tree + +from jedi.inference.utils import to_list +from jedi.inference.names import ParamNameWrapper +from jedi.inference.helpers import is_big_annoying_library + + +def _iter_nodes_for_param(param_name): + from parso.python.tree import search_ancestor + from jedi.inference.arguments import TreeArguments + + execution_context = param_name.parent_context + # Walk up the parso tree to get the FunctionNode we want. We use the parso + # tree rather than going via the execution context so that we're agnostic of + # the specific scope we're evaluating within (i.e: module or function, + # etc.). + function_node = tree.search_ancestor(param_name.tree_name, 'funcdef', 'lambdef') + module_node = function_node.get_root_node() + start = function_node.children[-1].start_pos + end = function_node.children[-1].end_pos + for name in module_node.get_used_names().get(param_name.string_name): + if start <= name.start_pos < end: + # Is used in the function + argument = name.parent + if argument.type == 'argument' \ + and argument.children[0] == '*' * param_name.star_count: + trailer = search_ancestor(argument, 'trailer') + if trailer is not None: # Make sure we're in a function + context = execution_context.create_context(trailer) + if _goes_to_param_name(param_name, context, name): + values = _to_callables(context, trailer) + + args = TreeArguments.create_cached( + execution_context.inference_state, + context=context, + argument_node=trailer.children[1], + trailer=trailer, + ) + for c in values: + yield c, args + + +def _goes_to_param_name(param_name, context, potential_name): + if potential_name.type != 'name': + return False + from jedi.inference.names import TreeNameDefinition + found = TreeNameDefinition(context, potential_name).goto() + return any(param_name.parent_context == p.parent_context + and param_name.start_pos == p.start_pos + for p in found) + + +def _to_callables(context, trailer): + from jedi.inference.syntax_tree import infer_trailer + + atom_expr = trailer.parent + index = atom_expr.children[0] == 'await' + # Infer atom first + values = context.infer_node(atom_expr.children[index]) + for trailer2 in atom_expr.children[index + 1:]: + if trailer == trailer2: + break + values = infer_trailer(context, values, trailer2) + return values + + +def _remove_given_params(arguments, param_names): + count = 0 + used_keys = set() + for key, _ in arguments.unpack(): + if key is None: + count += 1 + else: + used_keys.add(key) + + for p in param_names: + if count and p.maybe_positional_argument(): + count -= 1 + continue + if p.string_name in used_keys and p.maybe_keyword_argument(): + continue + yield p + + +@to_list +def process_params(param_names, star_count=3): # default means both * and ** + if param_names: + if is_big_annoying_library(param_names[0].parent_context): + # At first this feature can look innocent, but it does a lot of + # type inference in some cases, so we just ditch it. + yield from param_names + return + + used_names = set() + arg_callables = [] + kwarg_callables = [] + + kw_only_names = [] + kwarg_names = [] + arg_names = [] + original_arg_name = None + original_kwarg_name = None + for p in param_names: + kind = p.get_kind() + if kind == Parameter.VAR_POSITIONAL: + if star_count & 1: + arg_callables = _iter_nodes_for_param(p) + original_arg_name = p + elif p.get_kind() == Parameter.VAR_KEYWORD: + if star_count & 2: + kwarg_callables = list(_iter_nodes_for_param(p)) + original_kwarg_name = p + elif kind == Parameter.KEYWORD_ONLY: + if star_count & 2: + kw_only_names.append(p) + elif kind == Parameter.POSITIONAL_ONLY: + if star_count & 1: + yield p + else: + if star_count == 1: + yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY) + elif star_count == 2: + kw_only_names.append(ParamNameFixedKind(p, Parameter.KEYWORD_ONLY)) + else: + used_names.add(p.string_name) + yield p + + # First process *args + longest_param_names = () + found_arg_signature = False + found_kwarg_signature = False + for func_and_argument in arg_callables: + func, arguments = func_and_argument + new_star_count = star_count + if func_and_argument in kwarg_callables: + kwarg_callables.remove(func_and_argument) + else: + new_star_count = 1 + + for signature in func.get_signatures(): + found_arg_signature = True + if new_star_count == 3: + found_kwarg_signature = True + args_for_this_func = [] + for p in process_params( + list(_remove_given_params( + arguments, + signature.get_param_names(resolve_stars=False) + )), new_star_count): + if p.get_kind() == Parameter.VAR_KEYWORD: + kwarg_names.append(p) + elif p.get_kind() == Parameter.VAR_POSITIONAL: + arg_names.append(p) + elif p.get_kind() == Parameter.KEYWORD_ONLY: + kw_only_names.append(p) + else: + args_for_this_func.append(p) + if len(args_for_this_func) > len(longest_param_names): + longest_param_names = args_for_this_func + + for p in longest_param_names: + if star_count == 1 and p.get_kind() != Parameter.VAR_POSITIONAL: + yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY) + else: + if p.get_kind() == Parameter.POSITIONAL_OR_KEYWORD: + used_names.add(p.string_name) + yield p + + if not found_arg_signature and original_arg_name is not None: + yield original_arg_name + elif arg_names: + yield arg_names[0] + + # Then process **kwargs + for func, arguments in kwarg_callables: + for signature in func.get_signatures(): + found_kwarg_signature = True + for p in process_params( + list(_remove_given_params( + arguments, + signature.get_param_names(resolve_stars=False) + )), star_count=2): + if p.get_kind() == Parameter.VAR_KEYWORD: + kwarg_names.append(p) + elif p.get_kind() == Parameter.KEYWORD_ONLY: + kw_only_names.append(p) + + for p in kw_only_names: + if p.string_name in used_names: + continue + yield p + used_names.add(p.string_name) + + if not found_kwarg_signature and original_kwarg_name is not None: + yield original_kwarg_name + elif kwarg_names: + yield kwarg_names[0] + + +class ParamNameFixedKind(ParamNameWrapper): + def __init__(self, param_name, new_kind): + super().__init__(param_name) + self._new_kind = new_kind + + def get_kind(self): + return self._new_kind diff --git a/lib/python3.10/site-packages/jedi/inference/syntax_tree.py b/lib/python3.10/site-packages/jedi/inference/syntax_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..294fb7e4c9787514dfd0637c6df719b80c8ce9c5 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/syntax_tree.py @@ -0,0 +1,903 @@ +""" +Functions inferring the syntax tree. +""" +import copy +import itertools + +from parso.python import tree + +from jedi import debug +from jedi import parser_utils +from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \ + iterator_to_value_set, iterate_values +from jedi.inference.lazy_value import LazyTreeValue +from jedi.inference import compiled +from jedi.inference import recursion +from jedi.inference import analysis +from jedi.inference import imports +from jedi.inference import arguments +from jedi.inference.value import ClassValue, FunctionValue +from jedi.inference.value import iterable +from jedi.inference.value.dynamic_arrays import ListModification, DictModification +from jedi.inference.value import TreeInstance +from jedi.inference.helpers import is_string, is_literal, is_number, \ + get_names_of_node, is_big_annoying_library +from jedi.inference.compiled.access import COMPARISON_OPERATORS +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.gradual.stub_value import VersionInfo +from jedi.inference.gradual import annotation +from jedi.inference.names import TreeNameDefinition +from jedi.inference.context import CompForContext +from jedi.inference.value.decorator import Decoratee +from jedi.plugins import plugin_manager + +operator_to_magic_method = { + '+': '__add__', + '-': '__sub__', + '*': '__mul__', + '@': '__matmul__', + '/': '__truediv__', + '//': '__floordiv__', + '%': '__mod__', + '**': '__pow__', + '<<': '__lshift__', + '>>': '__rshift__', + '&': '__and__', + '|': '__or__', + '^': '__xor__', +} + +reverse_operator_to_magic_method = { + k: '__r' + v[2:] for k, v in operator_to_magic_method.items() +} + + +def _limit_value_infers(func): + """ + This is for now the way how we limit type inference going wild. There are + other ways to ensure recursion limits as well. This is mostly necessary + because of instance (self) access that can be quite tricky to limit. + + I'm still not sure this is the way to go, but it looks okay for now and we + can still go anther way in the future. Tests are there. ~ dave + """ + def wrapper(context, *args, **kwargs): + n = context.tree_node + inference_state = context.inference_state + try: + inference_state.inferred_element_counts[n] += 1 + maximum = 300 + if context.parent_context is None \ + and context.get_value() is inference_state.builtins_module: + # Builtins should have a more generous inference limit. + # It is important that builtins can be executed, otherwise some + # functions that depend on certain builtins features would be + # broken, see e.g. GH #1432 + maximum *= 100 + + if inference_state.inferred_element_counts[n] > maximum: + debug.warning('In value %s there were too many inferences.', n) + return NO_VALUES + except KeyError: + inference_state.inferred_element_counts[n] = 1 + return func(context, *args, **kwargs) + + return wrapper + + +def infer_node(context, element): + if isinstance(context, CompForContext): + return _infer_node(context, element) + + if_stmt = element + while if_stmt is not None: + if_stmt = if_stmt.parent + if if_stmt.type in ('if_stmt', 'for_stmt'): + break + if parser_utils.is_scope(if_stmt): + if_stmt = None + break + predefined_if_name_dict = context.predefined_names.get(if_stmt) + # TODO there's a lot of issues with this one. We actually should do + # this in a different way. Caching should only be active in certain + # cases and this all sucks. + if predefined_if_name_dict is None and if_stmt \ + and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis: + if_stmt_test = if_stmt.children[1] + name_dicts = [{}] + # If we already did a check, we don't want to do it again -> If + # value.predefined_names is filled, we stop. + # We don't want to check the if stmt itself, it's just about + # the content. + if element.start_pos > if_stmt_test.end_pos: + # Now we need to check if the names in the if_stmt match the + # names in the suite. + if_names = get_names_of_node(if_stmt_test) + element_names = get_names_of_node(element) + str_element_names = [e.value for e in element_names] + if any(i.value in str_element_names for i in if_names): + for if_name in if_names: + definitions = context.inference_state.infer(context, if_name) + # Every name that has multiple different definitions + # causes the complexity to rise. The complexity should + # never fall below 1. + if len(definitions) > 1: + if len(name_dicts) * len(definitions) > 16: + debug.dbg('Too many options for if branch inference %s.', if_stmt) + # There's only a certain amount of branches + # Jedi can infer, otherwise it will take to + # long. + name_dicts = [{}] + break + + original_name_dicts = list(name_dicts) + name_dicts = [] + for definition in definitions: + new_name_dicts = list(original_name_dicts) + for i, name_dict in enumerate(new_name_dicts): + new_name_dicts[i] = name_dict.copy() + new_name_dicts[i][if_name.value] = ValueSet([definition]) + + name_dicts += new_name_dicts + else: + for name_dict in name_dicts: + name_dict[if_name.value] = definitions + if len(name_dicts) > 1: + result = NO_VALUES + for name_dict in name_dicts: + with context.predefine_names(if_stmt, name_dict): + result |= _infer_node(context, element) + return result + else: + return _infer_node_if_inferred(context, element) + else: + if predefined_if_name_dict: + return _infer_node(context, element) + else: + return _infer_node_if_inferred(context, element) + + +def _infer_node_if_inferred(context, element): + """ + TODO This function is temporary: Merge with infer_node. + """ + parent = element + while parent is not None: + parent = parent.parent + predefined_if_name_dict = context.predefined_names.get(parent) + if predefined_if_name_dict is not None: + return _infer_node(context, element) + return _infer_node_cached(context, element) + + +@inference_state_method_cache(default=NO_VALUES) +def _infer_node_cached(context, element): + return _infer_node(context, element) + + +@debug.increase_indent +@_limit_value_infers +def _infer_node(context, element): + debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context) + inference_state = context.inference_state + typ = element.type + if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'): + return infer_atom(context, element) + elif typ == 'lambdef': + return ValueSet([FunctionValue.from_context(context, element)]) + elif typ == 'expr_stmt': + return infer_expr_stmt(context, element) + elif typ in ('power', 'atom_expr'): + first_child = element.children[0] + children = element.children[1:] + had_await = False + if first_child.type == 'keyword' and first_child.value == 'await': + had_await = True + first_child = children.pop(0) + + value_set = context.infer_node(first_child) + for (i, trailer) in enumerate(children): + if trailer == '**': # has a power operation. + right = context.infer_node(children[i + 1]) + value_set = _infer_comparison( + context, + value_set, + trailer, + right + ) + break + value_set = infer_trailer(context, value_set, trailer) + + if had_await: + return value_set.py__await__().py__stop_iteration_returns() + return value_set + elif typ in ('testlist_star_expr', 'testlist',): + # The implicit tuple in statements. + return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)]) + elif typ in ('not_test', 'factor'): + value_set = context.infer_node(element.children[-1]) + for operator in element.children[:-1]: + value_set = infer_factor(value_set, operator) + return value_set + elif typ == 'test': + # `x if foo else y` case. + return (context.infer_node(element.children[0]) + | context.infer_node(element.children[-1])) + elif typ == 'operator': + # Must be an ellipsis, other operators are not inferred. + if element.value != '...': + origin = element.parent + raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin)) + return ValueSet([compiled.builtin_from_name(inference_state, 'Ellipsis')]) + elif typ == 'dotted_name': + value_set = infer_atom(context, element.children[0]) + for next_name in element.children[2::2]: + value_set = value_set.py__getattribute__(next_name, name_context=context) + return value_set + elif typ == 'eval_input': + return context.infer_node(element.children[0]) + elif typ == 'annassign': + return annotation.infer_annotation(context, element.children[1]) \ + .execute_annotation() + elif typ == 'yield_expr': + if len(element.children) and element.children[1].type == 'yield_arg': + # Implies that it's a yield from. + element = element.children[1].children[1] + generators = context.infer_node(element) \ + .py__getattribute__('__iter__').execute_with_values() + return generators.py__stop_iteration_returns() + + # Generator.send() is not implemented. + return NO_VALUES + elif typ == 'namedexpr_test': + return context.infer_node(element.children[2]) + else: + return infer_or_test(context, element) + + +def infer_trailer(context, atom_values, trailer): + trailer_op, node = trailer.children[:2] + if node == ')': # `arglist` is optional. + node = None + + if trailer_op == '[': + trailer_op, node, _ = trailer.children + return atom_values.get_item( + _infer_subscript_list(context, node), + ContextualizedNode(context, trailer) + ) + else: + debug.dbg('infer_trailer: %s in %s', trailer, atom_values) + if trailer_op == '.': + return atom_values.py__getattribute__( + name_context=context, + name_or_str=node + ) + else: + assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op + args = arguments.TreeArguments(context.inference_state, context, node, trailer) + return atom_values.execute(args) + + +def infer_atom(context, atom): + """ + Basically to process ``atom`` nodes. The parser sometimes doesn't + generate the node (because it has just one child). In that case an atom + might be a name or a literal as well. + """ + state = context.inference_state + if atom.type == 'name': + # This is the first global lookup. + stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom + if stmt.type == 'if_stmt': + if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()): + stmt = atom + elif stmt.type == 'lambdef': + stmt = atom + position = stmt.start_pos + if _is_annotation_name(atom): + # Since Python 3.7 (with from __future__ import annotations), + # annotations are essentially strings and can reference objects + # that are defined further down in code. Therefore just set the + # position to None, so the finder will not try to stop at a certain + # position in the module. + position = None + return context.py__getattribute__(atom, position=position) + elif atom.type == 'keyword': + # For False/True/None + if atom.value in ('False', 'True', 'None'): + return ValueSet([compiled.builtin_from_name(state, atom.value)]) + elif atom.value == 'yield': + # Contrary to yield from, yield can just appear alone to return a + # value when used with `.send()`. + return NO_VALUES + assert False, 'Cannot infer the keyword %s' % atom + + elif isinstance(atom, tree.Literal): + string = state.compiled_subprocess.safe_literal_eval(atom.value) + return ValueSet([compiled.create_simple_object(state, string)]) + elif atom.type == 'strings': + # Will be multiple string. + value_set = infer_atom(context, atom.children[0]) + for string in atom.children[1:]: + right = infer_atom(context, string) + value_set = _infer_comparison(context, value_set, '+', right) + return value_set + elif atom.type == 'fstring': + return compiled.get_string_value_set(state) + else: + c = atom.children + # Parentheses without commas are not tuples. + if c[0] == '(' and not len(c) == 2 \ + and not (c[1].type == 'testlist_comp' + and len(c[1].children) > 1): + return context.infer_node(c[1]) + + try: + comp_for = c[1].children[1] + except (IndexError, AttributeError): + pass + else: + if comp_for == ':': + # Dict comprehensions have a colon at the 3rd index. + try: + comp_for = c[1].children[3] + except IndexError: + pass + + if comp_for.type in ('comp_for', 'sync_comp_for'): + return ValueSet([iterable.comprehension_from_atom( + state, context, atom + )]) + + # It's a dict/list/tuple literal. + array_node = c[1] + try: + array_node_c = array_node.children + except AttributeError: + array_node_c = [] + if c[0] == '{' and (array_node == '}' or ':' in array_node_c + or '**' in array_node_c): + new_value = iterable.DictLiteralValue(state, context, atom) + else: + new_value = iterable.SequenceLiteralValue(state, context, atom) + return ValueSet([new_value]) + + +@_limit_value_infers +def infer_expr_stmt(context, stmt, seek_name=None): + with recursion.execution_allowed(context.inference_state, stmt) as allowed: + if allowed: + if seek_name is not None: + pep0484_values = \ + annotation.find_type_from_comment_hint_assign(context, stmt, seek_name) + if pep0484_values: + return pep0484_values + + return _infer_expr_stmt(context, stmt, seek_name) + return NO_VALUES + + +@debug.increase_indent +def _infer_expr_stmt(context, stmt, seek_name=None): + """ + The starting point of the completion. A statement always owns a call + list, which are the calls, that a statement does. In case multiple + names are defined in the statement, `seek_name` returns the result for + this name. + + expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist_star_expr))*) + annassign: ':' test ['=' test] + augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') + + :param stmt: A `tree.ExprStmt`. + """ + def check_setitem(stmt): + atom_expr = stmt.children[0] + if atom_expr.type not in ('atom_expr', 'power'): + return False, None + name = atom_expr.children[0] + if name.type != 'name' or len(atom_expr.children) != 2: + return False, None + trailer = atom_expr.children[-1] + return trailer.children[0] == '[', trailer.children[1] + + debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name) + rhs = stmt.get_rhs() + + value_set = context.infer_node(rhs) + + if seek_name: + n = TreeNameDefinition(context, seek_name) + value_set = check_tuple_assignments(n, value_set) + + first_operator = next(stmt.yield_operators(), None) + is_setitem, subscriptlist = check_setitem(stmt) + is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator' + if is_annassign or is_setitem: + # `=` is always the last character in aug assignments -> -1 + name = stmt.get_defined_names(include_setitem=True)[0].value + left_values = context.py__getattribute__(name, position=stmt.start_pos) + + if is_setitem: + def to_mod(v): + c = ContextualizedSubscriptListNode(context, subscriptlist) + if v.array_type == 'dict': + return DictModification(v, value_set, c) + elif v.array_type == 'list': + return ListModification(v, value_set, c) + return v + + value_set = ValueSet(to_mod(v) for v in left_values) + else: + operator = copy.copy(first_operator) + operator.value = operator.value[:-1] + for_stmt = tree.search_ancestor(stmt, 'for_stmt') + if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \ + and parser_utils.for_stmt_defines_one_name(for_stmt): + # Iterate through result and add the values, that's possible + # only in for loops without clutter, because they are + # predictable. Also only do it, if the variable is not a tuple. + node = for_stmt.get_testlist() + cn = ContextualizedNode(context, node) + ordered = list(cn.infer().iterate(cn)) + + for lazy_value in ordered: + dct = {for_stmt.children[1].value: lazy_value.infer()} + with context.predefine_names(for_stmt, dct): + t = context.infer_node(rhs) + left_values = _infer_comparison(context, left_values, operator, t) + value_set = left_values + else: + value_set = _infer_comparison(context, left_values, operator, value_set) + debug.dbg('infer_expr_stmt result %s', value_set) + return value_set + + +def infer_or_test(context, or_test): + iterator = iter(or_test.children) + types = context.infer_node(next(iterator)) + for operator in iterator: + right = next(iterator) + if operator.type == 'comp_op': # not in / is not + operator = ' '.join(c.value for c in operator.children) + + # handle type inference of and/or here. + if operator in ('and', 'or'): + left_bools = set(left.py__bool__() for left in types) + if left_bools == {True}: + if operator == 'and': + types = context.infer_node(right) + elif left_bools == {False}: + if operator != 'and': + types = context.infer_node(right) + # Otherwise continue, because of uncertainty. + else: + types = _infer_comparison(context, types, operator, + context.infer_node(right)) + debug.dbg('infer_or_test types %s', types) + return types + + +@iterator_to_value_set +def infer_factor(value_set, operator): + """ + Calculates `+`, `-`, `~` and `not` prefixes. + """ + for value in value_set: + if operator == '-': + if is_number(value): + yield value.negate() + elif operator == 'not': + b = value.py__bool__() + if b is None: # Uncertainty. + yield list(value.inference_state.builtins_module.py__getattribute__('bool') + .execute_annotation()).pop() + else: + yield compiled.create_simple_object(value.inference_state, not b) + else: + yield value + + +def _literals_to_types(inference_state, result): + # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), + # int(), float(), etc). + new_result = NO_VALUES + for typ in result: + if is_literal(typ): + # Literals are only valid as long as the operations are + # correct. Otherwise add a value-free instance. + cls = compiled.builtin_from_name(inference_state, typ.name.string_name) + new_result |= cls.execute_with_values() + else: + new_result |= ValueSet([typ]) + return new_result + + +def _infer_comparison(context, left_values, operator, right_values): + state = context.inference_state + if isinstance(operator, str): + operator_str = operator + else: + operator_str = str(operator.value) + if not left_values or not right_values: + # illegal slices e.g. cause left/right_result to be None + result = (left_values or NO_VALUES) | (right_values or NO_VALUES) + return _literals_to_types(state, result) + elif operator_str == "|" and all( + value.is_class() or value.is_compiled() + for value in itertools.chain(left_values, right_values) + ): + # ^^^ A naive hack for PEP 604 + return ValueSet.from_sets((left_values, right_values)) + else: + # I don't think there's a reasonable chance that a string + # operation is still correct, once we pass something like six + # objects. + if len(left_values) * len(right_values) > 6: + return _literals_to_types(state, left_values | right_values) + else: + return ValueSet.from_sets( + _infer_comparison_part(state, context, left, operator, right) + for left in left_values + for right in right_values + ) + + +def _is_annotation_name(name): + ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt') + if ancestor is None: + return False + + if ancestor.type in ('param', 'funcdef'): + ann = ancestor.annotation + if ann is not None: + return ann.start_pos <= name.start_pos < ann.end_pos + elif ancestor.type == 'expr_stmt': + c = ancestor.children + if len(c) > 1 and c[1].type == 'annassign': + return c[1].start_pos <= name.start_pos < c[1].end_pos + return False + + +def _is_list(value): + return value.array_type == 'list' + + +def _is_tuple(value): + return value.array_type == 'tuple' + + +def _bool_to_value(inference_state, bool_): + return compiled.builtin_from_name(inference_state, str(bool_)) + + +def _get_tuple_ints(value): + if not isinstance(value, iterable.SequenceLiteralValue): + return None + numbers = [] + for lazy_value in value.py__iter__(): + if not isinstance(lazy_value, LazyTreeValue): + return None + node = lazy_value.data + if node.type != 'number': + return None + try: + numbers.append(int(node.value)) + except ValueError: + return None + return numbers + + +def _infer_comparison_part(inference_state, context, left, operator, right): + l_is_num = is_number(left) + r_is_num = is_number(right) + if isinstance(operator, str): + str_operator = operator + else: + str_operator = str(operator.value) + + if str_operator == '*': + # for iterables, ignore * operations + if isinstance(left, iterable.Sequence) or is_string(left): + return ValueSet([left]) + elif isinstance(right, iterable.Sequence) or is_string(right): + return ValueSet([right]) + elif str_operator == '+': + if l_is_num and r_is_num or is_string(left) and is_string(right): + return left.execute_operation(right, str_operator) + elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right): + return ValueSet([iterable.MergedArray(inference_state, (left, right))]) + elif str_operator == '-': + if l_is_num and r_is_num: + return left.execute_operation(right, str_operator) + elif str_operator == '%': + # With strings and numbers the left type typically remains. Except for + # `int() % float()`. + return ValueSet([left]) + elif str_operator in COMPARISON_OPERATORS: + if left.is_compiled() and right.is_compiled(): + # Possible, because the return is not an option. Just compare. + result = left.execute_operation(right, str_operator) + if result: + return result + else: + if str_operator in ('is', '!=', '==', 'is not'): + operation = COMPARISON_OPERATORS[str_operator] + bool_ = operation(left, right) + # Only if == returns True or != returns False, we can continue. + # There's no guarantee that they are not equal. This can help + # in some cases, but does not cover everything. + if (str_operator in ('is', '==')) == bool_: + return ValueSet([_bool_to_value(inference_state, bool_)]) + + if isinstance(left, VersionInfo): + version_info = _get_tuple_ints(right) + if version_info is not None: + bool_result = compiled.access.COMPARISON_OPERATORS[operator]( + inference_state.environment.version_info, + tuple(version_info) + ) + return ValueSet([_bool_to_value(inference_state, bool_result)]) + + return ValueSet([ + _bool_to_value(inference_state, True), + _bool_to_value(inference_state, False) + ]) + elif str_operator in ('in', 'not in'): + return inference_state.builtins_module.py__getattribute__('bool').execute_annotation() + + def check(obj): + """Checks if a Jedi object is either a float or an int.""" + return isinstance(obj, TreeInstance) and \ + obj.name.string_name in ('int', 'float') + + # Static analysis, one is a number, the other one is not. + if str_operator in ('+', '-') and l_is_num != r_is_num \ + and not (check(left) or check(right)): + message = "TypeError: unsupported operand type(s) for +: %s and %s" + analysis.add(context, 'type-error-operation', operator, + message % (left, right)) + + if left.is_class() or right.is_class(): + return NO_VALUES + + method_name = operator_to_magic_method[str_operator] + magic_methods = left.py__getattribute__(method_name) + if magic_methods: + result = magic_methods.execute_with_values(right) + if result: + return result + + if not magic_methods: + reverse_method_name = reverse_operator_to_magic_method[str_operator] + magic_methods = right.py__getattribute__(reverse_method_name) + + result = magic_methods.execute_with_values(left) + if result: + return result + + result = ValueSet([left, right]) + debug.dbg('Used operator %s resulting in %s', operator, result) + return result + + +@plugin_manager.decorate() +def tree_name_to_values(inference_state, context, tree_name): + value_set = NO_VALUES + module_node = context.get_root_context().tree_node + # First check for annotations, like: `foo: int = 3` + if module_node is not None: + names = module_node.get_used_names().get(tree_name.value, []) + found_annotation = False + for name in names: + expr_stmt = name.parent + + if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign": + correct_scope = parser_utils.get_parent_scope(name) == context.tree_node + ann_assign = expr_stmt.children[1] + if correct_scope: + found_annotation = True + if ( + (ann_assign.children[1].type == 'name') + and (ann_assign.children[1].value == tree_name.value) + and context.parent_context + ): + context = context.parent_context + value_set |= annotation.infer_annotation( + context, expr_stmt.children[1].children[1] + ).execute_annotation() + if found_annotation: + return value_set + + types = [] + node = tree_name.get_definition(import_name_always=True, include_setitem=True) + if node is None: + node = tree_name.parent + if node.type == 'global_stmt': + c = context.create_context(tree_name) + if c.is_module(): + # In case we are already part of the module, there is no point + # in looking up the global statement anymore, because it's not + # valid at that point anyway. + return NO_VALUES + # For global_stmt lookups, we only need the first possible scope, + # which means the function itself. + filter = next(c.get_filters()) + names = filter.get(tree_name.value) + return ValueSet.from_sets(name.infer() for name in names) + elif node.type not in ('import_from', 'import_name'): + c = context.create_context(tree_name) + return infer_atom(c, tree_name) + + typ = node.type + if typ == 'for_stmt': + types = annotation.find_type_from_comment_hint_for(context, node, tree_name) + if types: + return types + if typ == 'with_stmt': + types = annotation.find_type_from_comment_hint_with(context, node, tree_name) + if types: + return types + + if typ in ('for_stmt', 'comp_for', 'sync_comp_for'): + try: + types = context.predefined_names[node][tree_name.value] + except KeyError: + cn = ContextualizedNode(context, node.children[3]) + for_types = iterate_values( + cn.infer(), + contextualized_node=cn, + is_async=node.parent.type == 'async_stmt', + ) + n = TreeNameDefinition(context, tree_name) + types = check_tuple_assignments(n, for_types) + elif typ == 'expr_stmt': + types = infer_expr_stmt(context, node, tree_name) + elif typ == 'with_stmt': + value_managers = context.infer_node(node.get_test_node_from_name(tree_name)) + if node.parent.type == 'async_stmt': + # In the case of `async with` statements, we need to + # first get the coroutine from the `__aenter__` method, + # then "unwrap" via the `__await__` method + enter_methods = value_managers.py__getattribute__('__aenter__') + coro = enter_methods.execute_with_values() + return coro.py__await__().py__stop_iteration_returns() + enter_methods = value_managers.py__getattribute__('__enter__') + return enter_methods.execute_with_values() + elif typ in ('import_from', 'import_name'): + types = imports.infer_import(context, tree_name) + elif typ in ('funcdef', 'classdef'): + types = _apply_decorators(context, node) + elif typ == 'try_stmt': + # TODO an exception can also be a tuple. Check for those. + # TODO check for types that are not classes and add it to + # the static analysis report. + exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling()) + types = exceptions.execute_with_values() + elif typ == 'param': + types = NO_VALUES + elif typ == 'del_stmt': + types = NO_VALUES + elif typ == 'namedexpr_test': + types = infer_node(context, node) + else: + raise ValueError("Should not happen. type: %s" % typ) + return types + + +# We don't want to have functions/classes that are created by the same +# tree_node. +@inference_state_method_cache() +def _apply_decorators(context, node): + """ + Returns the function, that should to be executed in the end. + This is also the places where the decorators are processed. + """ + if node.type == 'classdef': + decoratee_value = ClassValue( + context.inference_state, + parent_context=context, + tree_node=node + ) + else: + decoratee_value = FunctionValue.from_context(context, node) + initial = values = ValueSet([decoratee_value]) + + if is_big_annoying_library(context): + return values + + for dec in reversed(node.get_decorators()): + debug.dbg('decorator: %s %s', dec, values, color="MAGENTA") + with debug.increase_indent_cm(): + dec_values = context.infer_node(dec.children[1]) + trailer_nodes = dec.children[2:-1] + if trailer_nodes: + # Create a trailer and infer it. + trailer = tree.PythonNode('trailer', trailer_nodes) + trailer.parent = dec + dec_values = infer_trailer(context, dec_values, trailer) + + if not len(dec_values): + code = dec.get_code(include_prefix=False) + # For the short future, we don't want to hear about the runtime + # decorator in typing that was intentionally omitted. This is not + # "correct", but helps with debugging. + if code != '@runtime\n': + debug.warning('decorator not found: %s on %s', dec, node) + return initial + + values = dec_values.execute(arguments.ValuesArguments([values])) + if not len(values): + debug.warning('not possible to resolve wrappers found %s', node) + return initial + + debug.dbg('decorator end %s', values, color="MAGENTA") + if values != initial: + return ValueSet([Decoratee(c, decoratee_value) for c in values]) + return values + + +def check_tuple_assignments(name, value_set): + """ + Checks if tuples are assigned. + """ + lazy_value = None + for index, node in name.assignment_indexes(): + cn = ContextualizedNode(name.parent_context, node) + iterated = value_set.iterate(cn) + if isinstance(index, slice): + # For no star unpacking is not possible. + return NO_VALUES + i = 0 + while i <= index: + try: + lazy_value = next(iterated) + except StopIteration: + # We could do this with the default param in next. But this + # would allow this loop to run for a very long time if the + # index number is high. Therefore break if the loop is + # finished. + return NO_VALUES + else: + i += lazy_value.max + value_set = lazy_value.infer() + return value_set + + +class ContextualizedSubscriptListNode(ContextualizedNode): + def infer(self): + return _infer_subscript_list(self.context, self.node) + + +def _infer_subscript_list(context, index): + """ + Handles slices in subscript nodes. + """ + if index == ':': + # Like array[:] + return ValueSet([iterable.Slice(context, None, None, None)]) + + elif index.type == 'subscript' and not index.children[0] == '.': + # subscript basically implies a slice operation + # e.g. array[:3] + result = [] + for el in index.children: + if el == ':': + if not result: + result.append(None) + elif el.type == 'sliceop': + if len(el.children) == 2: + result.append(el.children[1]) + else: + result.append(el) + result += [None] * (3 - len(result)) + + return ValueSet([iterable.Slice(context, *result)]) + elif index.type == 'subscriptlist': + return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)]) + + # No slices + return context.infer_node(index) diff --git a/lib/python3.10/site-packages/jedi/inference/sys_path.py b/lib/python3.10/site-packages/jedi/inference/sys_path.py new file mode 100644 index 0000000000000000000000000000000000000000..062a0aa3afc9edaf02e7bb49b1877aec84c540c2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/sys_path.py @@ -0,0 +1,272 @@ +import os +import re +from pathlib import Path +from importlib.machinery import all_suffixes + +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.base_value import ContextualizedNode +from jedi.inference.helpers import is_string, get_str_or_none +from jedi.parser_utils import get_cached_code_lines +from jedi.file_io import FileIO +from jedi import settings +from jedi import debug + +_BUILDOUT_PATH_INSERTION_LIMIT = 10 + + +def _abs_path(module_context, str_path: str): + path = Path(str_path) + if path.is_absolute(): + return path + + module_path = module_context.py__file__() + if module_path is None: + # In this case we have no idea where we actually are in the file + # system. + return None + + base_dir = module_path.parent + return base_dir.joinpath(path).absolute() + + +def _paths_from_assignment(module_context, expr_stmt): + """ + Extracts the assigned strings from an assignment that looks as follows:: + + sys.path[0:0] = ['module/path', 'another/module/path'] + + This function is in general pretty tolerant (and therefore 'buggy'). + However, it's not a big issue usually to add more paths to Jedi's sys_path, + because it will only affect Jedi in very random situations and by adding + more paths than necessary, it usually benefits the general user. + """ + for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): + try: + assert operator in ['=', '+='] + assert assignee.type in ('power', 'atom_expr') and \ + len(assignee.children) > 1 + c = assignee.children + assert c[0].type == 'name' and c[0].value == 'sys' + trailer = c[1] + assert trailer.children[0] == '.' and trailer.children[1].value == 'path' + # TODO Essentially we're not checking details on sys.path + # manipulation. Both assigment of the sys.path and changing/adding + # parts of the sys.path are the same: They get added to the end of + # the current sys.path. + """ + execution = c[2] + assert execution.children[0] == '[' + subscript = execution.children[1] + assert subscript.type == 'subscript' + assert ':' in subscript.children + """ + except AssertionError: + continue + + cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt) + for lazy_value in cn.infer().iterate(cn): + for value in lazy_value.infer(): + if is_string(value): + abs_path = _abs_path(module_context, value.get_safe_value()) + if abs_path is not None: + yield abs_path + + +def _paths_from_list_modifications(module_context, trailer1, trailer2): + """ extract the path from either "sys.path.append" or "sys.path.insert" """ + # Guarantee that both are trailers, the first one a name and the second one + # a function execution with at least one param. + if not (trailer1.type == 'trailer' and trailer1.children[0] == '.' + and trailer2.type == 'trailer' and trailer2.children[0] == '(' + and len(trailer2.children) == 3): + return + + name = trailer1.children[1].value + if name not in ['insert', 'append']: + return + arg = trailer2.children[1] + if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. + arg = arg.children[2] + + for value in module_context.create_context(arg).infer_node(arg): + p = get_str_or_none(value) + if p is None: + continue + abs_path = _abs_path(module_context, p) + if abs_path is not None: + yield abs_path + + +@inference_state_method_cache(default=[]) +def check_sys_path_modifications(module_context): + """ + Detect sys.path modifications within module. + """ + def get_sys_path_powers(names): + for name in names: + power = name.parent.parent + if power is not None and power.type in ('power', 'atom_expr'): + c = power.children + if c[0].type == 'name' and c[0].value == 'sys' \ + and c[1].type == 'trailer': + n = c[1].children[1] + if n.type == 'name' and n.value == 'path': + yield name, power + + if module_context.tree_node is None: + return [] + + added = [] + try: + possible_names = module_context.tree_node.get_used_names()['path'] + except KeyError: + pass + else: + for name, power in get_sys_path_powers(possible_names): + expr_stmt = power.parent + if len(power.children) >= 4: + added.extend( + _paths_from_list_modifications( + module_context, *power.children[2:4] + ) + ) + elif expr_stmt is not None and expr_stmt.type == 'expr_stmt': + added.extend(_paths_from_assignment(module_context, expr_stmt)) + return added + + +def discover_buildout_paths(inference_state, script_path): + buildout_script_paths = set() + + for buildout_script_path in _get_buildout_script_paths(script_path): + for path in _get_paths_from_buildout_script(inference_state, buildout_script_path): + buildout_script_paths.add(path) + if len(buildout_script_paths) >= _BUILDOUT_PATH_INSERTION_LIMIT: + break + + return buildout_script_paths + + +def _get_paths_from_buildout_script(inference_state, buildout_script_path): + file_io = FileIO(str(buildout_script_path)) + try: + module_node = inference_state.parse( + file_io=file_io, + cache=True, + cache_path=settings.cache_directory + ) + except IOError: + debug.warning('Error trying to read buildout_script: %s', buildout_script_path) + return + + from jedi.inference.value import ModuleValue + module_context = ModuleValue( + inference_state, module_node, + file_io=file_io, + string_names=None, + code_lines=get_cached_code_lines(inference_state.grammar, buildout_script_path), + ).as_context() + yield from check_sys_path_modifications(module_context) + + +def _get_parent_dir_with_file(path: Path, filename): + for parent in path.parents: + try: + if parent.joinpath(filename).is_file(): + return parent + except OSError: + continue + return None + + +def _get_buildout_script_paths(search_path: Path): + """ + if there is a 'buildout.cfg' file in one of the parent directories of the + given module it will return a list of all files in the buildout bin + directory that look like python files. + + :param search_path: absolute path to the module. + """ + project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg') + if not project_root: + return + bin_path = project_root.joinpath('bin') + if not bin_path.exists(): + return + + for filename in os.listdir(bin_path): + try: + filepath = bin_path.joinpath(filename) + with open(filepath, 'r') as f: + firstline = f.readline() + if firstline.startswith('#!') and 'python' in firstline: + yield filepath + except (UnicodeDecodeError, IOError) as e: + # Probably a binary file; permission error or race cond. because + # file got deleted. Ignore it. + debug.warning(str(e)) + continue + + +def remove_python_path_suffix(path): + for suffix in all_suffixes() + ['.pyi']: + if path.suffix == suffix: + path = path.with_name(path.stem) + break + return path + + +def transform_path_to_dotted(sys_path, module_path): + """ + Returns the dotted path inside a sys.path as a list of names. e.g. + + >>> transform_path_to_dotted([str(Path("/foo").absolute())], Path('/foo/bar/baz.py').absolute()) + (('bar', 'baz'), False) + + Returns (None, False) if the path doesn't really resolve to anything. + The second return part is if it is a package. + """ + # First remove the suffix. + module_path = remove_python_path_suffix(module_path) + if module_path.name.startswith('.'): + return None, False + + # Once the suffix was removed we are using the files as we know them. This + # means that if someone uses an ending like .vim for a Python file, .vim + # will be part of the returned dotted part. + + is_package = module_path.name == '__init__' + if is_package: + module_path = module_path.parent + + def iter_potential_solutions(): + for p in sys_path: + if str(module_path).startswith(p): + # Strip the trailing slash/backslash + rest = str(module_path)[len(p):] + # On Windows a path can also use a slash. + if rest.startswith(os.path.sep) or rest.startswith('/'): + # Remove a slash in cases it's still there. + rest = rest[1:] + + if rest: + split = rest.split(os.path.sep) + if not all(split): + # This means that part of the file path was empty, this + # is very strange and is probably a file that is called + # `.py`. + return + # Stub folders for foo can end with foo-stubs. Just remove + # it. + yield tuple(re.sub(r'-stubs$', '', s) for s in split) + + potential_solutions = tuple(iter_potential_solutions()) + if not potential_solutions: + return None, False + # Try to find the shortest path, this makes more sense usually, because the + # user usually has venvs somewhere. This means that a path like + # .tox/py37/lib/python3.7/os.py can be normal for a file. However in that + # case we definitely want to return ['os'] as a path and not a crazy + # ['.tox', 'py37', 'lib', 'python3.7', 'os']. Keep in mind that this is a + # heuristic and there's now ay to "always" do it right. + return sorted(potential_solutions, key=lambda p: len(p))[0], is_package diff --git a/lib/python3.10/site-packages/jedi/inference/utils.py b/lib/python3.10/site-packages/jedi/inference/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ab10bcd948bfcc0cf88307cddb4b52b510c453ca --- /dev/null +++ b/lib/python3.10/site-packages/jedi/inference/utils.py @@ -0,0 +1,90 @@ +""" A universal module with functions / classes without dependencies. """ +import functools +import re +import os + + +_sep = os.path.sep +if os.path.altsep is not None: + _sep += os.path.altsep +_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep))) +del _sep + + +def to_list(func): + def wrapper(*args, **kwargs): + return list(func(*args, **kwargs)) + return wrapper + + +def to_tuple(func): + def wrapper(*args, **kwargs): + return tuple(func(*args, **kwargs)) + return wrapper + + +def unite(iterable): + """Turns a two dimensional array into a one dimensional.""" + return set(typ for types in iterable for typ in types) + + +class UncaughtAttributeError(Exception): + """ + Important, because `__getattr__` and `hasattr` catch AttributeErrors + implicitly. This is really evil (mainly because of `__getattr__`). + Therefore this class originally had to be derived from `BaseException` + instead of `Exception`. But because I removed relevant `hasattr` from + the code base, we can now switch back to `Exception`. + + :param base: return values of sys.exc_info(). + """ + + +def safe_property(func): + return property(reraise_uncaught(func)) + + +def reraise_uncaught(func): + """ + Re-throw uncaught `AttributeError`. + + Usage: Put ``@rethrow_uncaught`` in front of the function + which does **not** suppose to raise `AttributeError`. + + AttributeError is easily get caught by `hasattr` and another + ``except AttributeError`` clause. This becomes problem when you use + a lot of "dynamic" attributes (e.g., using ``@property``) because you + can't distinguish if the property does not exist for real or some code + inside of the "dynamic" attribute through that error. In a well + written code, such error should not exist but getting there is very + difficult. This decorator is to help us getting there by changing + `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. + This helps us noticing bugs earlier and facilitates debugging. + """ + @functools.wraps(func) + def wrapper(*args, **kwds): + try: + return func(*args, **kwds) + except AttributeError as e: + raise UncaughtAttributeError(e) from e + return wrapper + + +class PushBackIterator: + def __init__(self, iterator): + self.pushes = [] + self.iterator = iterator + self.current = None + + def push_back(self, value): + self.pushes.append(value) + + def __iter__(self): + return self + + def __next__(self): + if self.pushes: + self.current = self.pushes.pop() + else: + self.current = next(self.iterator) + return self.current diff --git a/lib/python3.10/site-packages/jedi/plugins/__init__.py b/lib/python3.10/site-packages/jedi/plugins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8067676d03ee07725b5aa44e99303701381dc224 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/plugins/__init__.py @@ -0,0 +1,47 @@ +from functools import wraps + + +class _PluginManager: + def __init__(self): + self._registered_plugins = [] + self._cached_base_callbacks = {} + self._built_functions = {} + + def register(self, *plugins): + """ + Makes it possible to register your plugin. + """ + self._registered_plugins.extend(plugins) + self._build_functions() + + def decorate(self, name=None): + def decorator(callback): + @wraps(callback) + def wrapper(*args, **kwargs): + return built_functions[public_name](*args, **kwargs) + + public_name = name or callback.__name__ + + assert public_name not in self._built_functions + built_functions = self._built_functions + built_functions[public_name] = callback + self._cached_base_callbacks[public_name] = callback + + return wrapper + + return decorator + + def _build_functions(self): + for name, callback in self._cached_base_callbacks.items(): + for plugin in reversed(self._registered_plugins): + # Need to reverse so the first plugin is run first. + try: + func = getattr(plugin, name) + except AttributeError: + pass + else: + callback = func(callback) + self._built_functions[name] = callback + + +plugin_manager = _PluginManager() diff --git a/lib/python3.10/site-packages/jedi/plugins/django.py b/lib/python3.10/site-packages/jedi/plugins/django.py new file mode 100644 index 0000000000000000000000000000000000000000..cd443bbda59c466c693f89a33fae8c8b53594c24 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/plugins/django.py @@ -0,0 +1,296 @@ +""" +Module is used to infer Django model fields. +""" +from inspect import Parameter + +from jedi import debug +from jedi.inference.cache import inference_state_function_cache +from jedi.inference.base_value import ValueSet, iterator_to_value_set, ValueWrapper +from jedi.inference.filters import DictFilter, AttributeOverwrite +from jedi.inference.names import NameWrapper, BaseTreeParamName +from jedi.inference.compiled.value import EmptyCompiledName +from jedi.inference.value.instance import TreeInstance +from jedi.inference.value.klass import ClassMixin +from jedi.inference.gradual.base import GenericClass +from jedi.inference.gradual.generics import TupleGenericManager +from jedi.inference.signature import AbstractSignature + + +mapping = { + 'IntegerField': (None, 'int'), + 'BigIntegerField': (None, 'int'), + 'PositiveIntegerField': (None, 'int'), + 'SmallIntegerField': (None, 'int'), + 'CharField': (None, 'str'), + 'TextField': (None, 'str'), + 'EmailField': (None, 'str'), + 'GenericIPAddressField': (None, 'str'), + 'URLField': (None, 'str'), + 'FloatField': (None, 'float'), + 'BinaryField': (None, 'bytes'), + 'BooleanField': (None, 'bool'), + 'DecimalField': ('decimal', 'Decimal'), + 'TimeField': ('datetime', 'time'), + 'DurationField': ('datetime', 'timedelta'), + 'DateField': ('datetime', 'date'), + 'DateTimeField': ('datetime', 'datetime'), + 'UUIDField': ('uuid', 'UUID'), +} + +_FILTER_LIKE_METHODS = ('create', 'filter', 'exclude', 'update', 'get', + 'get_or_create', 'update_or_create') + + +@inference_state_function_cache() +def _get_deferred_attributes(inference_state): + return inference_state.import_module( + ('django', 'db', 'models', 'query_utils') + ).py__getattribute__('DeferredAttribute').execute_annotation() + + +def _infer_scalar_field(inference_state, field_name, field_tree_instance, is_instance): + try: + module_name, attribute_name = mapping[field_tree_instance.py__name__()] + except KeyError: + return None + + if not is_instance: + return _get_deferred_attributes(inference_state) + + if module_name is None: + module = inference_state.builtins_module + else: + module = inference_state.import_module((module_name,)) + + for attribute in module.py__getattribute__(attribute_name): + return attribute.execute_with_values() + + +@iterator_to_value_set +def _get_foreign_key_values(cls, field_tree_instance): + if isinstance(field_tree_instance, TreeInstance): + # TODO private access.. + argument_iterator = field_tree_instance._arguments.unpack() + key, lazy_values = next(argument_iterator, (None, None)) + if key is None and lazy_values is not None: + for value in lazy_values.infer(): + if value.py__name__() == 'str': + foreign_key_class_name = value.get_safe_value() + module = cls.get_root_context() + for v in module.py__getattribute__(foreign_key_class_name): + if v.is_class(): + yield v + elif value.is_class(): + yield value + + +def _infer_field(cls, field_name, is_instance): + inference_state = cls.inference_state + result = field_name.infer() + for field_tree_instance in result: + scalar_field = _infer_scalar_field( + inference_state, field_name, field_tree_instance, is_instance) + if scalar_field is not None: + return scalar_field + + name = field_tree_instance.py__name__() + is_many_to_many = name == 'ManyToManyField' + if name in ('ForeignKey', 'OneToOneField') or is_many_to_many: + if not is_instance: + return _get_deferred_attributes(inference_state) + + values = _get_foreign_key_values(cls, field_tree_instance) + if is_many_to_many: + return ValueSet(filter(None, [ + _create_manager_for(v, 'RelatedManager') for v in values + ])) + else: + return values.execute_with_values() + + debug.dbg('django plugin: fail to infer `%s` from class `%s`', + field_name.string_name, cls.py__name__()) + return result + + +class DjangoModelName(NameWrapper): + def __init__(self, cls, name, is_instance): + super().__init__(name) + self._cls = cls + self._is_instance = is_instance + + def infer(self): + return _infer_field(self._cls, self._wrapped_name, self._is_instance) + + +def _create_manager_for(cls, manager_cls='BaseManager'): + managers = cls.inference_state.import_module( + ('django', 'db', 'models', 'manager') + ).py__getattribute__(manager_cls) + for m in managers: + if m.is_class_mixin(): + generics_manager = TupleGenericManager((ValueSet([cls]),)) + for c in GenericClass(m, generics_manager).execute_annotation(): + return c + return None + + +def _new_dict_filter(cls, is_instance): + filters = list(cls.get_filters( + is_instance=is_instance, + include_metaclasses=False, + include_type_when_class=False) + ) + dct = { + name.string_name: DjangoModelName(cls, name, is_instance) + for filter_ in reversed(filters) + for name in filter_.values() + } + if is_instance: + # Replace the objects with a name that amounts to nothing when accessed + # in an instance. This is not perfect and still completes "objects" in + # that case, but it at least not inferes stuff like `.objects.filter`. + # It would be nicer to do that in a better way, so that it also doesn't + # show up in completions, but it's probably just not worth doing that + # for the extra amount of work. + dct['objects'] = EmptyCompiledName(cls.inference_state, 'objects') + + return DictFilter(dct) + + +def is_django_model_base(value): + return value.py__name__() == 'ModelBase' \ + and value.get_root_context().py__name__() == 'django.db.models.base' + + +def get_metaclass_filters(func): + def wrapper(cls, metaclasses, is_instance): + for metaclass in metaclasses: + if is_django_model_base(metaclass): + return [_new_dict_filter(cls, is_instance)] + + return func(cls, metaclasses, is_instance) + return wrapper + + +def tree_name_to_values(func): + def wrapper(inference_state, context, tree_name): + result = func(inference_state, context, tree_name) + if tree_name.value in _FILTER_LIKE_METHODS: + # Here we try to overwrite stuff like User.objects.filter. We need + # this to make sure that keyword param completion works on these + # kind of methods. + for v in result: + if v.get_qualified_names() == ('_BaseQuerySet', tree_name.value) \ + and v.parent_context.is_module() \ + and v.parent_context.py__name__() == 'django.db.models.query': + qs = context.get_value() + generics = qs.get_generics() + if len(generics) >= 1: + return ValueSet(QuerySetMethodWrapper(v, model) + for model in generics[0]) + + elif tree_name.value == 'BaseManager' and context.is_module() \ + and context.py__name__() == 'django.db.models.manager': + return ValueSet(ManagerWrapper(r) for r in result) + + elif tree_name.value == 'Field' and context.is_module() \ + and context.py__name__() == 'django.db.models.fields': + return ValueSet(FieldWrapper(r) for r in result) + return result + return wrapper + + +def _find_fields(cls): + for name in _new_dict_filter(cls, is_instance=False).values(): + for value in name.infer(): + if value.name.get_qualified_names(include_module_names=True) \ + == ('django', 'db', 'models', 'query_utils', 'DeferredAttribute'): + yield name + + +def _get_signatures(cls): + return [DjangoModelSignature(cls, field_names=list(_find_fields(cls)))] + + +def get_metaclass_signatures(func): + def wrapper(cls, metaclasses): + for metaclass in metaclasses: + if is_django_model_base(metaclass): + return _get_signatures(cls) + return func(cls, metaclass) + return wrapper + + +class ManagerWrapper(ValueWrapper): + def py__getitem__(self, index_value_set, contextualized_node): + return ValueSet( + GenericManagerWrapper(generic) + for generic in self._wrapped_value.py__getitem__( + index_value_set, contextualized_node) + ) + + +class GenericManagerWrapper(AttributeOverwrite, ClassMixin): + def py__get__on_class(self, calling_instance, instance, class_value): + return calling_instance.class_value.with_generics( + (ValueSet({class_value}),) + ).py__call__(calling_instance._arguments) + + def with_generics(self, generics_tuple): + return self._wrapped_value.with_generics(generics_tuple) + + +class FieldWrapper(ValueWrapper): + def py__getitem__(self, index_value_set, contextualized_node): + return ValueSet( + GenericFieldWrapper(generic) + for generic in self._wrapped_value.py__getitem__( + index_value_set, contextualized_node) + ) + + +class GenericFieldWrapper(AttributeOverwrite, ClassMixin): + def py__get__on_class(self, calling_instance, instance, class_value): + # This is mostly an optimization to avoid Jedi aborting inference, + # because of too many function executions of Field.__get__. + return ValueSet({calling_instance}) + + +class DjangoModelSignature(AbstractSignature): + def __init__(self, value, field_names): + super().__init__(value) + self._field_names = field_names + + def get_param_names(self, resolve_stars=False): + return [DjangoParamName(name) for name in self._field_names] + + +class DjangoParamName(BaseTreeParamName): + def __init__(self, field_name): + super().__init__(field_name.parent_context, field_name.tree_name) + self._field_name = field_name + + def get_kind(self): + return Parameter.KEYWORD_ONLY + + def infer(self): + return self._field_name.infer() + + +class QuerySetMethodWrapper(ValueWrapper): + def __init__(self, method, model_cls): + super().__init__(method) + self._model_cls = model_cls + + def py__get__(self, instance, class_value): + return ValueSet({QuerySetBoundMethodWrapper(v, self._model_cls) + for v in self._wrapped_value.py__get__(instance, class_value)}) + + +class QuerySetBoundMethodWrapper(ValueWrapper): + def __init__(self, method, model_cls): + super().__init__(method) + self._model_cls = model_cls + + def get_signatures(self): + return _get_signatures(self._model_cls) diff --git a/lib/python3.10/site-packages/jedi/plugins/flask.py b/lib/python3.10/site-packages/jedi/plugins/flask.py new file mode 100644 index 0000000000000000000000000000000000000000..8d67b83959cfd6e760f2b6617849d5b08d3e156b --- /dev/null +++ b/lib/python3.10/site-packages/jedi/plugins/flask.py @@ -0,0 +1,21 @@ +def import_module(callback): + """ + Handle "magic" Flask extension imports: + ``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``. + """ + def wrapper(inference_state, import_names, module_context, *args, **kwargs): + if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'): + # New style. + ipath = ('flask_' + import_names[2]), + value_set = callback(inference_state, ipath, None, *args, **kwargs) + if value_set: + return value_set + value_set = callback(inference_state, ('flaskext',), None, *args, **kwargs) + return callback( + inference_state, + ('flaskext', import_names[2]), + next(iter(value_set)), + *args, **kwargs + ) + return callback(inference_state, import_names, module_context, *args, **kwargs) + return wrapper diff --git a/lib/python3.10/site-packages/jedi/plugins/pytest.py b/lib/python3.10/site-packages/jedi/plugins/pytest.py new file mode 100644 index 0000000000000000000000000000000000000000..acf6ad38b3cd2d21b147722cd21745efd2029574 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/plugins/pytest.py @@ -0,0 +1,269 @@ +import sys +from typing import List +from pathlib import Path + +from parso.tree import search_ancestor +from jedi.inference.cache import inference_state_method_cache +from jedi.inference.imports import goto_import, load_module_from_path +from jedi.inference.filters import ParserTreeFilter +from jedi.inference.base_value import NO_VALUES, ValueSet +from jedi.inference.helpers import infer_call_of_leaf + +_PYTEST_FIXTURE_MODULES = [ + ('_pytest', 'monkeypatch'), + ('_pytest', 'capture'), + ('_pytest', 'logging'), + ('_pytest', 'tmpdir'), + ('_pytest', 'pytester'), +] + + +def execute(callback): + def wrapper(value, arguments): + # This might not be necessary anymore in pytest 4/5, definitely needed + # for pytest 3. + if value.py__name__() == 'fixture' \ + and value.parent_context.py__name__() == '_pytest.fixtures': + return NO_VALUES + + return callback(value, arguments) + return wrapper + + +def infer_anonymous_param(func): + def get_returns(value): + if value.tree_node.annotation is not None: + result = value.execute_with_values() + if any(v.name.get_qualified_names(include_module_names=True) + == ('typing', 'Generator') + for v in result): + return ValueSet.from_sets( + v.py__getattribute__('__next__').execute_annotation() + for v in result + ) + return result + + # In pytest we need to differentiate between generators and normal + # returns. + # Parameters still need to be anonymous, .as_context() ensures that. + function_context = value.as_context() + if function_context.is_generator(): + return function_context.merge_yield_values() + else: + return function_context.get_return_values() + + def wrapper(param_name): + # parameters with an annotation do not need special handling + if param_name.annotation_node: + return func(param_name) + is_pytest_param, param_name_is_function_name = \ + _is_a_pytest_param_and_inherited(param_name) + if is_pytest_param: + module = param_name.get_root_context() + fixtures = _goto_pytest_fixture( + module, + param_name.string_name, + # This skips the current module, because we are basically + # inheriting a fixture from somewhere else. + skip_own_module=param_name_is_function_name, + ) + if fixtures: + return ValueSet.from_sets( + get_returns(value) + for fixture in fixtures + for value in fixture.infer() + ) + return func(param_name) + return wrapper + + +def goto_anonymous_param(func): + def wrapper(param_name): + is_pytest_param, param_name_is_function_name = \ + _is_a_pytest_param_and_inherited(param_name) + if is_pytest_param: + names = _goto_pytest_fixture( + param_name.get_root_context(), + param_name.string_name, + skip_own_module=param_name_is_function_name, + ) + if names: + return names + return func(param_name) + return wrapper + + +def complete_param_names(func): + def wrapper(context, func_name, decorator_nodes): + module_context = context.get_root_context() + if _is_pytest_func(func_name, decorator_nodes): + names = [] + for module_context in _iter_pytest_modules(module_context): + names += FixtureFilter(module_context).values() + if names: + return names + return func(context, func_name, decorator_nodes) + return wrapper + + +def _goto_pytest_fixture(module_context, name, skip_own_module): + for module_context in _iter_pytest_modules(module_context, skip_own_module=skip_own_module): + names = FixtureFilter(module_context).get(name) + if names: + return names + + +def _is_a_pytest_param_and_inherited(param_name): + """ + Pytest params are either in a `test_*` function or have a pytest fixture + with the decorator @pytest.fixture. + + This is a heuristic and will work in most cases. + """ + funcdef = search_ancestor(param_name.tree_name, 'funcdef') + if funcdef is None: # A lambda + return False, False + decorators = funcdef.get_decorators() + return _is_pytest_func(funcdef.name.value, decorators), \ + funcdef.name.value == param_name.string_name + + +def _is_pytest_func(func_name, decorator_nodes): + return func_name.startswith('test') \ + or any('fixture' in n.get_code() for n in decorator_nodes) + + +def _find_pytest_plugin_modules() -> List[List[str]]: + """ + Finds pytest plugin modules hooked by setuptools entry points + + See https://docs.pytest.org/en/stable/how-to/writing_plugins.html#setuptools-entry-points + """ + if sys.version_info >= (3, 8): + from importlib.metadata import entry_points + + if sys.version_info >= (3, 10): + pytest_entry_points = entry_points(group="pytest11") + else: + pytest_entry_points = entry_points().get("pytest11", ()) + + if sys.version_info >= (3, 9): + return [ep.module.split(".") for ep in pytest_entry_points] + else: + # Python 3.8 doesn't have `EntryPoint.module`. Implement equivalent + # to what Python 3.9 does (with additional None check to placate `mypy`) + matches = [ + ep.pattern.match(ep.value) + for ep in pytest_entry_points + ] + return [x.group('module').split(".") for x in matches if x] + + else: + from pkg_resources import iter_entry_points + return [ep.module_name.split(".") for ep in iter_entry_points(group="pytest11")] + + +@inference_state_method_cache() +def _iter_pytest_modules(module_context, skip_own_module=False): + if not skip_own_module: + yield module_context + + file_io = module_context.get_value().file_io + if file_io is not None: + folder = file_io.get_parent_folder() + sys_path = module_context.inference_state.get_sys_path() + + # prevent an infinite loop when reaching the root of the current drive + last_folder = None + + while any(folder.path.startswith(p) for p in sys_path): + file_io = folder.get_file_io('conftest.py') + if Path(file_io.path) != module_context.py__file__(): + try: + m = load_module_from_path(module_context.inference_state, file_io) + conftest_module = m.as_context() + yield conftest_module + + plugins_list = m.tree_node.get_used_names().get("pytest_plugins") + if plugins_list: + name = conftest_module.create_name(plugins_list[0]) + yield from _load_pytest_plugins(module_context, name) + except FileNotFoundError: + pass + folder = folder.get_parent_folder() + + # prevent an infinite for loop if the same parent folder is return twice + if last_folder is not None and folder.path == last_folder.path: + break + last_folder = folder # keep track of the last found parent name + + for names in _PYTEST_FIXTURE_MODULES + _find_pytest_plugin_modules(): + for module_value in module_context.inference_state.import_module(names): + yield module_value.as_context() + + +def _load_pytest_plugins(module_context, name): + from jedi.inference.helpers import get_str_or_none + + for inferred in name.infer(): + for seq_value in inferred.py__iter__(): + for value in seq_value.infer(): + fq_name = get_str_or_none(value) + if fq_name: + names = fq_name.split(".") + for module_value in module_context.inference_state.import_module(names): + yield module_value.as_context() + + +class FixtureFilter(ParserTreeFilter): + def _filter(self, names): + for name in super()._filter(names): + # look for fixture definitions of imported names + if name.parent.type == "import_from": + imported_names = goto_import(self.parent_context, name) + if any( + self._is_fixture(iname.parent_context, iname.tree_name) + for iname in imported_names + # discard imports of whole modules, that have no tree_name + if iname.tree_name + ): + yield name + + elif self._is_fixture(self.parent_context, name): + yield name + + def _is_fixture(self, context, name): + funcdef = name.parent + # Class fixtures are not supported + if funcdef.type != "funcdef": + return False + decorated = funcdef.parent + if decorated.type != "decorated": + return False + decorators = decorated.children[0] + if decorators.type == 'decorators': + decorators = decorators.children + else: + decorators = [decorators] + for decorator in decorators: + dotted_name = decorator.children[1] + # A heuristic, this makes it faster. + if 'fixture' in dotted_name.get_code(): + if dotted_name.type == 'atom_expr': + # Since Python3.9 a decorator does not have dotted names + # anymore. + last_trailer = dotted_name.children[-1] + last_leaf = last_trailer.get_last_leaf() + if last_leaf == ')': + values = infer_call_of_leaf( + context, last_leaf, cut_own_trailer=True + ) + else: + values = context.infer_node(dotted_name) + else: + values = context.infer_node(dotted_name) + for value in values: + if value.name.get_qualified_names(include_module_names=True) \ + == ('_pytest', 'fixtures', 'fixture'): + return True + return False diff --git a/lib/python3.10/site-packages/jedi/plugins/registry.py b/lib/python3.10/site-packages/jedi/plugins/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..c1a0b7496bfacba69d289f8a5383c9d4a8939653 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/plugins/registry.py @@ -0,0 +1,12 @@ +""" +This is not a plugin, this is just the place were plugins are registered. +""" + +from jedi.plugins import stdlib +from jedi.plugins import flask +from jedi.plugins import pytest +from jedi.plugins import django +from jedi.plugins import plugin_manager + + +plugin_manager.register(stdlib, flask, pytest, django) diff --git a/lib/python3.10/site-packages/jedi/plugins/stdlib.py b/lib/python3.10/site-packages/jedi/plugins/stdlib.py new file mode 100644 index 0000000000000000000000000000000000000000..e1004ec8171cd8b9ca474070497d79a32a51852a --- /dev/null +++ b/lib/python3.10/site-packages/jedi/plugins/stdlib.py @@ -0,0 +1,879 @@ +""" +Implementations of standard library functions, because it's not possible to +understand them with Jedi. + +To add a new implementation, create a function and add it to the +``_implemented`` dict at the bottom of this module. + +Note that this module exists only to implement very specific functionality in +the standard library. The usual way to understand the standard library is the +compiled module that returns the types for C-builtins. +""" +import parso +import os +from inspect import Parameter + +from jedi import debug +from jedi.inference.utils import safe_property +from jedi.inference.helpers import get_str_or_none +from jedi.inference.arguments import iterate_argument_clinic, ParamIssue, \ + repack_with_argument_clinic, AbstractArguments, TreeArgumentsWrapper +from jedi.inference import analysis +from jedi.inference import compiled +from jedi.inference.value.instance import \ + AnonymousMethodExecutionContext, MethodExecutionContext +from jedi.inference.base_value import ContextualizedNode, \ + NO_VALUES, ValueSet, ValueWrapper, LazyValueWrapper +from jedi.inference.value import ClassValue, ModuleValue +from jedi.inference.value.klass import ClassMixin +from jedi.inference.value.function import FunctionMixin +from jedi.inference.value import iterable +from jedi.inference.lazy_value import LazyTreeValue, LazyKnownValue, \ + LazyKnownValues +from jedi.inference.names import ValueName, BaseTreeParamName +from jedi.inference.filters import AttributeOverwrite, publish_method, \ + ParserTreeFilter, DictFilter +from jedi.inference.signature import AbstractSignature, SignatureWrapper + + +# Copied from Python 3.6's stdlib. +_NAMEDTUPLE_CLASS_TEMPLATE = """\ +_property = property +_tuple = tuple +from operator import itemgetter as _itemgetter +from collections import OrderedDict + +class {typename}(tuple): + __slots__ = () + + _fields = {field_names!r} + + def __new__(_cls, {arg_list}): + 'Create new instance of {typename}({arg_list})' + return _tuple.__new__(_cls, ({arg_list})) + + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new {typename} object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != {num_fields:d}: + raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result)) + return result + + def _replace(_self, **kwds): + 'Return a new {typename} object replacing specified fields with new values' + result = _self._make(map(kwds.pop, {field_names!r}, _self)) + if kwds: + raise ValueError('Got unexpected field names: %r' % list(kwds)) + return result + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + '({repr_fmt})' % self + + def _asdict(self): + 'Return a new OrderedDict which maps field names to their values.' + return OrderedDict(zip(self._fields, self)) + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return tuple(self) + + # These methods were added by Jedi. + # __new__ doesn't really work with Jedi. So adding this to nametuples seems + # like the easiest way. + def __init__(self, {arg_list}): + 'A helper function for namedtuple.' + self.__iterable = ({arg_list}) + + def __iter__(self): + for i in self.__iterable: + yield i + + def __getitem__(self, y): + return self.__iterable[y] + +{field_defs} +""" + +_NAMEDTUPLE_FIELD_TEMPLATE = '''\ + {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}') +''' + + +def execute(callback): + def wrapper(value, arguments): + def call(): + return callback(value, arguments=arguments) + + try: + obj_name = value.name.string_name + except AttributeError: + pass + else: + p = value.parent_context + if p is not None and p.is_builtins_module(): + module_name = 'builtins' + elif p is not None and p.is_module(): + module_name = p.py__name__() + else: + return call() + + if value.is_bound_method() or value.is_instance(): + # value can be an instance for example if it is a partial + # object. + return call() + + # for now we just support builtin functions. + try: + func = _implemented[module_name][obj_name] + except KeyError: + pass + else: + return func(value, arguments=arguments, callback=call) + return call() + + return wrapper + + +def _follow_param(inference_state, arguments, index): + try: + key, lazy_value = list(arguments.unpack())[index] + except IndexError: + return NO_VALUES + else: + return lazy_value.infer() + + +def argument_clinic(clinic_string, want_value=False, want_context=False, + want_arguments=False, want_inference_state=False, + want_callback=False): + """ + Works like Argument Clinic (PEP 436), to validate function params. + """ + + def f(func): + def wrapper(value, arguments, callback): + try: + args = tuple(iterate_argument_clinic( + value.inference_state, arguments, clinic_string)) + except ParamIssue: + return NO_VALUES + + debug.dbg('builtin start %s' % value, color='MAGENTA') + kwargs = {} + if want_context: + kwargs['context'] = arguments.context + if want_value: + kwargs['value'] = value + if want_inference_state: + kwargs['inference_state'] = value.inference_state + if want_arguments: + kwargs['arguments'] = arguments + if want_callback: + kwargs['callback'] = callback + result = func(*args, **kwargs) + debug.dbg('builtin end: %s', result, color='MAGENTA') + return result + + return wrapper + return f + + +@argument_clinic('iterator[, default], /', want_inference_state=True) +def builtins_next(iterators, defaults, inference_state): + # TODO theoretically we have to check here if something is an iterator. + # That is probably done by checking if it's not a class. + return defaults | iterators.py__getattribute__('__next__').execute_with_values() + + +@argument_clinic('iterator[, default], /') +def builtins_iter(iterators_or_callables, defaults): + # TODO implement this if it's a callable. + return iterators_or_callables.py__getattribute__('__iter__').execute_with_values() + + +@argument_clinic('object, name[, default], /') +def builtins_getattr(objects, names, defaults=None): + # follow the first param + for value in objects: + for name in names: + string = get_str_or_none(name) + if string is None: + debug.warning('getattr called without str') + continue + else: + return value.py__getattribute__(string) + return NO_VALUES + + +@argument_clinic('object[, bases, dict], /') +def builtins_type(objects, bases, dicts): + if bases or dicts: + # It's a type creation... maybe someday... + return NO_VALUES + else: + return objects.py__class__() + + +class SuperInstance(LazyValueWrapper): + """To be used like the object ``super`` returns.""" + def __init__(self, inference_state, instance): + self.inference_state = inference_state + self._instance = instance # Corresponds to super().__self__ + + def _get_bases(self): + return self._instance.py__class__().py__bases__() + + def _get_wrapped_value(self): + objs = self._get_bases()[0].infer().execute_with_values() + if not objs: + # This is just a fallback and will only be used, if it's not + # possible to find a class + return self._instance + return next(iter(objs)) + + def get_filters(self, origin_scope=None): + for b in self._get_bases(): + for value in b.infer().execute_with_values(): + for f in value.get_filters(): + yield f + + +@argument_clinic('[type[, value]], /', want_context=True) +def builtins_super(types, objects, context): + instance = None + if isinstance(context, AnonymousMethodExecutionContext): + instance = context.instance + elif isinstance(context, MethodExecutionContext): + instance = context.instance + if instance is None: + return NO_VALUES + return ValueSet({SuperInstance(instance.inference_state, instance)}) + + +class ReversedObject(AttributeOverwrite): + def __init__(self, reversed_obj, iter_list): + super().__init__(reversed_obj) + self._iter_list = iter_list + + def py__iter__(self, contextualized_node=None): + return self._iter_list + + @publish_method('__next__') + def _next(self, arguments): + return ValueSet.from_sets( + lazy_value.infer() for lazy_value in self._iter_list + ) + + +@argument_clinic('sequence, /', want_value=True, want_arguments=True) +def builtins_reversed(sequences, value, arguments): + # While we could do without this variable (just by using sequences), we + # want static analysis to work well. Therefore we need to generated the + # values again. + key, lazy_value = next(arguments.unpack()) + cn = None + if isinstance(lazy_value, LazyTreeValue): + cn = ContextualizedNode(lazy_value.context, lazy_value.data) + ordered = list(sequences.iterate(cn)) + + # Repack iterator values and then run it the normal way. This is + # necessary, because `reversed` is a function and autocompletion + # would fail in certain cases like `reversed(x).__iter__` if we + # just returned the result directly. + seq, = value.inference_state.typing_module.py__getattribute__('Iterator').execute_with_values() + return ValueSet([ReversedObject(seq, list(reversed(ordered)))]) + + +@argument_clinic('value, type, /', want_arguments=True, want_inference_state=True) +def builtins_isinstance(objects, types, arguments, inference_state): + bool_results = set() + for o in objects: + cls = o.py__class__() + try: + cls.py__bases__ + except AttributeError: + # This is temporary. Everything should have a class attribute in + # Python?! Maybe we'll leave it here, because some numpy objects or + # whatever might not. + bool_results = set([True, False]) + break + + mro = list(cls.py__mro__()) + + for cls_or_tup in types: + if cls_or_tup.is_class(): + bool_results.add(cls_or_tup in mro) + elif cls_or_tup.name.string_name == 'tuple' \ + and cls_or_tup.get_root_context().is_builtins_module(): + # Check for tuples. + classes = ValueSet.from_sets( + lazy_value.infer() + for lazy_value in cls_or_tup.iterate() + ) + bool_results.add(any(cls in mro for cls in classes)) + else: + _, lazy_value = list(arguments.unpack())[1] + if isinstance(lazy_value, LazyTreeValue): + node = lazy_value.data + message = 'TypeError: isinstance() arg 2 must be a ' \ + 'class, type, or tuple of classes and types, ' \ + 'not %s.' % cls_or_tup + analysis.add(lazy_value.context, 'type-error-isinstance', node, message) + + return ValueSet( + compiled.builtin_from_name(inference_state, str(b)) + for b in bool_results + ) + + +class StaticMethodObject(ValueWrapper): + def py__get__(self, instance, class_value): + return ValueSet([self._wrapped_value]) + + +@argument_clinic('sequence, /') +def builtins_staticmethod(functions): + return ValueSet(StaticMethodObject(f) for f in functions) + + +class ClassMethodObject(ValueWrapper): + def __init__(self, class_method_obj, function): + super().__init__(class_method_obj) + self._function = function + + def py__get__(self, instance, class_value): + return ValueSet([ + ClassMethodGet(__get__, class_value, self._function) + for __get__ in self._wrapped_value.py__getattribute__('__get__') + ]) + + +class ClassMethodGet(ValueWrapper): + def __init__(self, get_method, klass, function): + super().__init__(get_method) + self._class = klass + self._function = function + + def get_signatures(self): + return [sig.bind(self._function) for sig in self._function.get_signatures()] + + def py__call__(self, arguments): + return self._function.execute(ClassMethodArguments(self._class, arguments)) + + +class ClassMethodArguments(TreeArgumentsWrapper): + def __init__(self, klass, arguments): + super().__init__(arguments) + self._class = klass + + def unpack(self, func=None): + yield None, LazyKnownValue(self._class) + for values in self._wrapped_arguments.unpack(func): + yield values + + +@argument_clinic('sequence, /', want_value=True, want_arguments=True) +def builtins_classmethod(functions, value, arguments): + return ValueSet( + ClassMethodObject(class_method_object, function) + for class_method_object in value.py__call__(arguments=arguments) + for function in functions + ) + + +class PropertyObject(AttributeOverwrite, ValueWrapper): + api_type = 'property' + + def __init__(self, property_obj, function): + super().__init__(property_obj) + self._function = function + + def py__get__(self, instance, class_value): + if instance is None: + return ValueSet([self]) + return self._function.execute_with_values(instance) + + @publish_method('deleter') + @publish_method('getter') + @publish_method('setter') + def _return_self(self, arguments): + return ValueSet({self}) + + +@argument_clinic('func, /', want_callback=True) +def builtins_property(functions, callback): + return ValueSet( + PropertyObject(property_value, function) + for property_value in callback() + for function in functions + ) + + +def collections_namedtuple(value, arguments, callback): + """ + Implementation of the namedtuple function. + + This has to be done by processing the namedtuple class template and + inferring the result. + + """ + inference_state = value.inference_state + + # Process arguments + name = 'jedi_unknown_namedtuple' + for c in _follow_param(inference_state, arguments, 0): + x = get_str_or_none(c) + if x is not None: + name = x + break + + # TODO here we only use one of the types, we should use all. + param_values = _follow_param(inference_state, arguments, 1) + if not param_values: + return NO_VALUES + _fields = list(param_values)[0] + string = get_str_or_none(_fields) + if string is not None: + fields = string.replace(',', ' ').split() + elif isinstance(_fields, iterable.Sequence): + fields = [ + get_str_or_none(v) + for lazy_value in _fields.py__iter__() + for v in lazy_value.infer() + ] + fields = [f for f in fields if f is not None] + else: + return NO_VALUES + + # Build source code + code = _NAMEDTUPLE_CLASS_TEMPLATE.format( + typename=name, + field_names=tuple(fields), + num_fields=len(fields), + arg_list=repr(tuple(fields)).replace("'", "")[1:-1], + repr_fmt='', + field_defs='\n'.join(_NAMEDTUPLE_FIELD_TEMPLATE.format(index=index, name=name) + for index, name in enumerate(fields)) + ) + + # Parse source code + module = inference_state.grammar.parse(code) + generated_class = next(module.iter_classdefs()) + parent_context = ModuleValue( + inference_state, module, + code_lines=parso.split_lines(code, keepends=True), + ).as_context() + + return ValueSet([ClassValue(inference_state, parent_context, generated_class)]) + + +class PartialObject(ValueWrapper): + def __init__(self, actual_value, arguments, instance=None): + super().__init__(actual_value) + self._arguments = arguments + self._instance = instance + + def _get_functions(self, unpacked_arguments): + key, lazy_value = next(unpacked_arguments, (None, None)) + if key is not None or lazy_value is None: + debug.warning("Partial should have a proper function %s", self._arguments) + return None + return lazy_value.infer() + + def get_signatures(self): + unpacked_arguments = self._arguments.unpack() + funcs = self._get_functions(unpacked_arguments) + if funcs is None: + return [] + + arg_count = 0 + if self._instance is not None: + arg_count = 1 + keys = set() + for key, _ in unpacked_arguments: + if key is None: + arg_count += 1 + else: + keys.add(key) + return [PartialSignature(s, arg_count, keys) for s in funcs.get_signatures()] + + def py__call__(self, arguments): + funcs = self._get_functions(self._arguments.unpack()) + if funcs is None: + return NO_VALUES + + return funcs.execute( + MergedPartialArguments(self._arguments, arguments, self._instance) + ) + + def py__doc__(self): + """ + In CPython partial does not replace the docstring. However we are still + imitating it here, because we want this docstring to be worth something + for the user. + """ + callables = self._get_functions(self._arguments.unpack()) + if callables is None: + return '' + for callable_ in callables: + return callable_.py__doc__() + return '' + + def py__get__(self, instance, class_value): + return ValueSet([self]) + + +class PartialMethodObject(PartialObject): + def py__get__(self, instance, class_value): + if instance is None: + return ValueSet([self]) + return ValueSet([PartialObject(self._wrapped_value, self._arguments, instance)]) + + +class PartialSignature(SignatureWrapper): + def __init__(self, wrapped_signature, skipped_arg_count, skipped_arg_set): + super().__init__(wrapped_signature) + self._skipped_arg_count = skipped_arg_count + self._skipped_arg_set = skipped_arg_set + + def get_param_names(self, resolve_stars=False): + names = self._wrapped_signature.get_param_names()[self._skipped_arg_count:] + return [n for n in names if n.string_name not in self._skipped_arg_set] + + +class MergedPartialArguments(AbstractArguments): + def __init__(self, partial_arguments, call_arguments, instance=None): + self._partial_arguments = partial_arguments + self._call_arguments = call_arguments + self._instance = instance + + def unpack(self, funcdef=None): + unpacked = self._partial_arguments.unpack(funcdef) + # Ignore this one, it's the function. It was checked before that it's + # there. + next(unpacked, None) + if self._instance is not None: + yield None, LazyKnownValue(self._instance) + for key_lazy_value in unpacked: + yield key_lazy_value + for key_lazy_value in self._call_arguments.unpack(funcdef): + yield key_lazy_value + + +def functools_partial(value, arguments, callback): + return ValueSet( + PartialObject(instance, arguments) + for instance in value.py__call__(arguments) + ) + + +def functools_partialmethod(value, arguments, callback): + return ValueSet( + PartialMethodObject(instance, arguments) + for instance in value.py__call__(arguments) + ) + + +@argument_clinic('first, /') +def _return_first_param(firsts): + return firsts + + +@argument_clinic('seq') +def _random_choice(sequences): + return ValueSet.from_sets( + lazy_value.infer() + for sequence in sequences + for lazy_value in sequence.py__iter__() + ) + + +def _dataclass(value, arguments, callback): + for c in _follow_param(value.inference_state, arguments, 0): + if c.is_class(): + return ValueSet([DataclassWrapper(c)]) + else: + return ValueSet([value]) + return NO_VALUES + + +class DataclassWrapper(ValueWrapper, ClassMixin): + def get_signatures(self): + param_names = [] + for cls in reversed(list(self.py__mro__())): + if isinstance(cls, DataclassWrapper): + filter_ = cls.as_context().get_global_filter() + # .values ordering is not guaranteed, at least not in + # Python < 3.6, when dicts where not ordered, which is an + # implementation detail anyway. + for name in sorted(filter_.values(), key=lambda name: name.start_pos): + d = name.tree_name.get_definition() + annassign = d.children[1] + if d.type == 'expr_stmt' and annassign.type == 'annassign': + if len(annassign.children) < 4: + default = None + else: + default = annassign.children[3] + param_names.append(DataclassParamName( + parent_context=cls.parent_context, + tree_name=name.tree_name, + annotation_node=annassign.children[1], + default_node=default, + )) + return [DataclassSignature(cls, param_names)] + + +class DataclassSignature(AbstractSignature): + def __init__(self, value, param_names): + super().__init__(value) + self._param_names = param_names + + def get_param_names(self, resolve_stars=False): + return self._param_names + + +class DataclassParamName(BaseTreeParamName): + def __init__(self, parent_context, tree_name, annotation_node, default_node): + super().__init__(parent_context, tree_name) + self.annotation_node = annotation_node + self.default_node = default_node + + def get_kind(self): + return Parameter.POSITIONAL_OR_KEYWORD + + def infer(self): + if self.annotation_node is None: + return NO_VALUES + else: + return self.parent_context.infer_node(self.annotation_node) + + +class ItemGetterCallable(ValueWrapper): + def __init__(self, instance, args_value_set): + super().__init__(instance) + self._args_value_set = args_value_set + + @repack_with_argument_clinic('item, /') + def py__call__(self, item_value_set): + value_set = NO_VALUES + for args_value in self._args_value_set: + lazy_values = list(args_value.py__iter__()) + if len(lazy_values) == 1: + # TODO we need to add the contextualized value. + value_set |= item_value_set.get_item(lazy_values[0].infer(), None) + else: + value_set |= ValueSet([iterable.FakeList( + self._wrapped_value.inference_state, + [ + LazyKnownValues(item_value_set.get_item(lazy_value.infer(), None)) + for lazy_value in lazy_values + ], + )]) + return value_set + + +@argument_clinic('func, /') +def _functools_wraps(funcs): + return ValueSet(WrapsCallable(func) for func in funcs) + + +class WrapsCallable(ValueWrapper): + # XXX this is not the correct wrapped value, it should be a weird + # partials object, but it doesn't matter, because it's always used as a + # decorator anyway. + @repack_with_argument_clinic('func, /') + def py__call__(self, funcs): + return ValueSet({Wrapped(func, self._wrapped_value) for func in funcs}) + + +class Wrapped(ValueWrapper, FunctionMixin): + def __init__(self, func, original_function): + super().__init__(func) + self._original_function = original_function + + @property + def name(self): + return self._original_function.name + + def get_signature_functions(self): + return [self] + + +@argument_clinic('*args, /', want_value=True, want_arguments=True) +def _operator_itemgetter(args_value_set, value, arguments): + return ValueSet([ + ItemGetterCallable(instance, args_value_set) + for instance in value.py__call__(arguments) + ]) + + +def _create_string_input_function(func): + @argument_clinic('string, /', want_value=True, want_arguments=True) + def wrapper(strings, value, arguments): + def iterate(): + for value in strings: + s = get_str_or_none(value) + if s is not None: + s = func(s) + yield compiled.create_simple_object(value.inference_state, s) + values = ValueSet(iterate()) + if values: + return values + return value.py__call__(arguments) + return wrapper + + +@argument_clinic('*args, /', want_callback=True) +def _os_path_join(args_set, callback): + if len(args_set) == 1: + string = '' + sequence, = args_set + is_first = True + for lazy_value in sequence.py__iter__(): + string_values = lazy_value.infer() + if len(string_values) != 1: + break + s = get_str_or_none(next(iter(string_values))) + if s is None: + break + if not is_first: + string += os.path.sep + string += s + is_first = False + else: + return ValueSet([compiled.create_simple_object(sequence.inference_state, string)]) + return callback() + + +_implemented = { + 'builtins': { + 'getattr': builtins_getattr, + 'type': builtins_type, + 'super': builtins_super, + 'reversed': builtins_reversed, + 'isinstance': builtins_isinstance, + 'next': builtins_next, + 'iter': builtins_iter, + 'staticmethod': builtins_staticmethod, + 'classmethod': builtins_classmethod, + 'property': builtins_property, + }, + 'copy': { + 'copy': _return_first_param, + 'deepcopy': _return_first_param, + }, + 'json': { + 'load': lambda value, arguments, callback: NO_VALUES, + 'loads': lambda value, arguments, callback: NO_VALUES, + }, + 'collections': { + 'namedtuple': collections_namedtuple, + }, + 'functools': { + 'partial': functools_partial, + 'partialmethod': functools_partialmethod, + 'wraps': _functools_wraps, + }, + '_weakref': { + 'proxy': _return_first_param, + }, + 'random': { + 'choice': _random_choice, + }, + 'operator': { + 'itemgetter': _operator_itemgetter, + }, + 'abc': { + # Not sure if this is necessary, but it's used a lot in typeshed and + # it's for now easier to just pass the function. + 'abstractmethod': _return_first_param, + }, + 'typing': { + # The _alias function just leads to some annoying type inference. + # Therefore, just make it return nothing, which leads to the stubs + # being used instead. This only matters for 3.7+. + '_alias': lambda value, arguments, callback: NO_VALUES, + # runtime_checkable doesn't really change anything and is just + # adding logs for infering stuff, so we can safely ignore it. + 'runtime_checkable': lambda value, arguments, callback: NO_VALUES, + }, + 'dataclasses': { + # For now this works at least better than Jedi trying to understand it. + 'dataclass': _dataclass + }, + # attrs exposes declaration interface roughly compatible with dataclasses + # via attrs.define, attrs.frozen and attrs.mutable + # https://www.attrs.org/en/stable/names.html + 'attr': { + 'define': _dataclass, + 'frozen': _dataclass, + }, + 'attrs': { + 'define': _dataclass, + 'frozen': _dataclass, + }, + 'os.path': { + 'dirname': _create_string_input_function(os.path.dirname), + 'abspath': _create_string_input_function(os.path.abspath), + 'relpath': _create_string_input_function(os.path.relpath), + 'join': _os_path_join, + } +} + + +def get_metaclass_filters(func): + def wrapper(cls, metaclasses, is_instance): + for metaclass in metaclasses: + if metaclass.py__name__() == 'EnumMeta' \ + and metaclass.get_root_context().py__name__() == 'enum': + filter_ = ParserTreeFilter(parent_context=cls.as_context()) + return [DictFilter({ + name.string_name: EnumInstance(cls, name).name + for name in filter_.values() + })] + return func(cls, metaclasses, is_instance) + return wrapper + + +class EnumInstance(LazyValueWrapper): + def __init__(self, cls, name): + self.inference_state = cls.inference_state + self._cls = cls # Corresponds to super().__self__ + self._name = name + self.tree_node = self._name.tree_name + + @safe_property + def name(self): + return ValueName(self, self._name.tree_name) + + def _get_wrapped_value(self): + n = self._name.string_name + if n.startswith('__') and n.endswith('__') or self._name.api_type == 'function': + inferred = self._name.infer() + if inferred: + return next(iter(inferred)) + o, = self.inference_state.builtins_module.py__getattribute__('object') + return o + + value, = self._cls.execute_with_values() + return value + + def get_filters(self, origin_scope=None): + yield DictFilter(dict( + name=compiled.create_simple_object(self.inference_state, self._name.string_name).name, + value=self._name, + )) + for f in self._get_wrapped_value().get_filters(): + yield f + + +def tree_name_to_values(func): + def wrapper(inference_state, context, tree_name): + if tree_name.value == 'sep' and context.is_module() and context.py__name__() == 'os.path': + return ValueSet({ + compiled.create_simple_object(inference_state, os.path.sep), + }) + return func(inference_state, context, tree_name) + return wrapper diff --git a/lib/python3.10/site-packages/jedi/third_party/django-stubs/LICENSE.txt b/lib/python3.10/site-packages/jedi/third_party/django-stubs/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..3ebfb51777a98f238ce51f5983de73ce3ba767c2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/django-stubs/LICENSE.txt @@ -0,0 +1,8 @@ +Copyright (c) Maxim Kurnikov. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/LICENSE b/lib/python3.10/site-packages/jedi/third_party/typeshed/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e5833ae4231d68a2cc6878b790199e1d97900b39 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/LICENSE @@ -0,0 +1,238 @@ +The "typeshed" project is licensed under the terms of the Apache license, as +reproduced below. + += = = = = + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += = = = = + +Parts of typeshed are licensed under different licenses (like the MIT +license), reproduced below. + += = = = = + +The MIT License + +Copyright (c) 2015 Jukka Lehtosalo and contributors + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + += = = = = + diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/enum.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/enum.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1a1dcf005b2d578495d5de3193928cf44cd8a070 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/enum.pyi @@ -0,0 +1,73 @@ +import sys +from abc import ABCMeta +from typing import Any, Dict, Iterator, List, Mapping, Type, TypeVar, Union + +_T = TypeVar("_T") +_S = TypeVar("_S", bound=Type[Enum]) + +# Note: EnumMeta actually subclasses type directly, not ABCMeta. +# This is a temporary workaround to allow multiple creation of enums with builtins +# such as str as mixins, which due to the handling of ABCs of builtin types, cause +# spurious inconsistent metaclass structure. See #1595. +# Structurally: Iterable[T], Reversible[T], Container[T] where T is the enum itself +class EnumMeta(ABCMeta): + def __iter__(self: Type[_T]) -> Iterator[_T]: ... + def __reversed__(self: Type[_T]) -> Iterator[_T]: ... + def __contains__(self: Type[_T], member: object) -> bool: ... + def __getitem__(self: Type[_T], name: str) -> _T: ... + @property + def __members__(self: Type[_T]) -> Mapping[str, _T]: ... + def __len__(self) -> int: ... + +class Enum(metaclass=EnumMeta): + name: str + value: Any + _name_: str + _value_: Any + _member_names_: List[str] # undocumented + _member_map_: Dict[str, Enum] # undocumented + _value2member_map_: Dict[int, Enum] # undocumented + if sys.version_info >= (3, 7): + _ignore_: Union[str, List[str]] + _order_: str + __order__: str + @classmethod + def _missing_(cls, value: object) -> Any: ... + @staticmethod + def _generate_next_value_(name: str, start: int, count: int, last_values: List[Any]) -> Any: ... + def __new__(cls: Type[_T], value: object) -> _T: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __dir__(self) -> List[str]: ... + def __format__(self, format_spec: str) -> str: ... + def __hash__(self) -> Any: ... + def __reduce_ex__(self, proto: object) -> Any: ... + +class IntEnum(int, Enum): + value: int + +def unique(enumeration: _S) -> _S: ... + +_auto_null: Any + +# subclassing IntFlag so it picks up all implemented base functions, best modeling behavior of enum.auto() +class auto(IntFlag): + value: Any + +class Flag(Enum): + def __contains__(self: _T, other: _T) -> bool: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __bool__(self) -> bool: ... + def __or__(self: _T, other: _T) -> _T: ... + def __and__(self: _T, other: _T) -> _T: ... + def __xor__(self: _T, other: _T) -> _T: ... + def __invert__(self: _T) -> _T: ... + +class IntFlag(int, Flag): + def __or__(self: _T, other: Union[int, _T]) -> _T: ... + def __and__(self: _T, other: Union[int, _T]) -> _T: ... + def __xor__(self: _T, other: Union[int, _T]) -> _T: ... + __ror__ = __or__ + __rand__ = __and__ + __rxor__ = __xor__ diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/ipaddress.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/ipaddress.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5d0c7dd6456e985578a92cf6c5c8f09db7066086 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/ipaddress.pyi @@ -0,0 +1,148 @@ +from typing import Any, Container, Generic, Iterable, Iterator, Optional, SupportsInt, Text, Tuple, TypeVar, overload + +# Undocumented length constants +IPV4LENGTH: int +IPV6LENGTH: int + +_A = TypeVar("_A", IPv4Address, IPv6Address) +_N = TypeVar("_N", IPv4Network, IPv6Network) +_T = TypeVar("_T") + +def ip_address(address: object) -> Any: ... # morally Union[IPv4Address, IPv6Address] +def ip_network(address: object, strict: bool = ...) -> Any: ... # morally Union[IPv4Network, IPv6Network] +def ip_interface(address: object) -> Any: ... # morally Union[IPv4Interface, IPv6Interface] + +class _IPAddressBase: + def __eq__(self, other: Any) -> bool: ... + def __ge__(self: _T, other: _T) -> bool: ... + def __gt__(self: _T, other: _T) -> bool: ... + def __le__(self: _T, other: _T) -> bool: ... + def __lt__(self: _T, other: _T) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + @property + def compressed(self) -> Text: ... + @property + def exploded(self) -> Text: ... + @property + def reverse_pointer(self) -> Text: ... + @property + def version(self) -> int: ... + +class _BaseAddress(_IPAddressBase, SupportsInt): + def __init__(self, address: object) -> None: ... + def __add__(self: _T, other: int) -> _T: ... + def __hash__(self) -> int: ... + def __int__(self) -> int: ... + def __sub__(self: _T, other: int) -> _T: ... + @property + def is_global(self) -> bool: ... + @property + def is_link_local(self) -> bool: ... + @property + def is_loopback(self) -> bool: ... + @property + def is_multicast(self) -> bool: ... + @property + def is_private(self) -> bool: ... + @property + def is_reserved(self) -> bool: ... + @property + def is_unspecified(self) -> bool: ... + @property + def max_prefixlen(self) -> int: ... + @property + def packed(self) -> bytes: ... + +class _BaseNetwork(_IPAddressBase, Container[_A], Iterable[_A], Generic[_A]): + network_address: _A + netmask: _A + def __init__(self, address: object, strict: bool = ...) -> None: ... + def __contains__(self, other: Any) -> bool: ... + def __getitem__(self, n: int) -> _A: ... + def __iter__(self) -> Iterator[_A]: ... + def address_exclude(self: _T, other: _T) -> Iterator[_T]: ... + @property + def broadcast_address(self) -> _A: ... + def compare_networks(self: _T, other: _T) -> int: ... + def hosts(self) -> Iterator[_A]: ... + @property + def is_global(self) -> bool: ... + @property + def is_link_local(self) -> bool: ... + @property + def is_loopback(self) -> bool: ... + @property + def is_multicast(self) -> bool: ... + @property + def is_private(self) -> bool: ... + @property + def is_reserved(self) -> bool: ... + @property + def is_unspecified(self) -> bool: ... + @property + def max_prefixlen(self) -> int: ... + @property + def num_addresses(self) -> int: ... + def overlaps(self, other: _BaseNetwork[_A]) -> bool: ... + @property + def prefixlen(self) -> int: ... + def subnets(self: _T, prefixlen_diff: int = ..., new_prefix: Optional[int] = ...) -> Iterator[_T]: ... + def supernet(self: _T, prefixlen_diff: int = ..., new_prefix: Optional[int] = ...) -> _T: ... + @property + def with_hostmask(self) -> Text: ... + @property + def with_netmask(self) -> Text: ... + @property + def with_prefixlen(self) -> Text: ... + @property + def hostmask(self) -> _A: ... + +class _BaseInterface(_BaseAddress, Generic[_A, _N]): + hostmask: _A + netmask: _A + network: _N + @property + def ip(self) -> _A: ... + @property + def with_hostmask(self) -> Text: ... + @property + def with_netmask(self) -> Text: ... + @property + def with_prefixlen(self) -> Text: ... + +class IPv4Address(_BaseAddress): ... +class IPv4Network(_BaseNetwork[IPv4Address]): ... +class IPv4Interface(IPv4Address, _BaseInterface[IPv4Address, IPv4Network]): ... + +class IPv6Address(_BaseAddress): + @property + def ipv4_mapped(self) -> Optional[IPv4Address]: ... + @property + def is_site_local(self) -> bool: ... + @property + def sixtofour(self) -> Optional[IPv4Address]: ... + @property + def teredo(self) -> Optional[Tuple[IPv4Address, IPv4Address]]: ... + +class IPv6Network(_BaseNetwork[IPv6Address]): + @property + def is_site_local(self) -> bool: ... + +class IPv6Interface(IPv6Address, _BaseInterface[IPv6Address, IPv6Network]): ... + +def v4_int_to_packed(address: int) -> bytes: ... +def v6_int_to_packed(address: int) -> bytes: ... +@overload +def summarize_address_range(first: IPv4Address, last: IPv4Address) -> Iterator[IPv4Network]: ... +@overload +def summarize_address_range(first: IPv6Address, last: IPv6Address) -> Iterator[IPv6Network]: ... +def collapse_addresses(addresses: Iterable[_N]) -> Iterator[_N]: ... +@overload +def get_mixed_type_key(obj: _A) -> Tuple[int, _A]: ... +@overload +def get_mixed_type_key(obj: IPv4Network) -> Tuple[int, IPv4Address, IPv4Address]: ... +@overload +def get_mixed_type_key(obj: IPv6Network) -> Tuple[int, IPv6Address, IPv6Address]: ... + +class AddressValueError(ValueError): ... +class NetmaskValueError(ValueError): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/pathlib2.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/pathlib2.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ce9bbc1d4939649495995a695c9a2d64b64a69a5 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/pathlib2.pyi @@ -0,0 +1,103 @@ +import os +import sys +from _typeshed import OpenBinaryMode, OpenBinaryModeReading, OpenBinaryModeUpdating, OpenBinaryModeWriting, OpenTextMode +from io import BufferedRandom, BufferedReader, BufferedWriter, FileIO, TextIOWrapper +from types import TracebackType +from typing import IO, Any, BinaryIO, Generator, List, Optional, Sequence, Text, TextIO, Tuple, Type, TypeVar, Union, overload +from typing_extensions import Literal + +_P = TypeVar("_P", bound=PurePath) + +_PurePathBase = object +_PathLike = PurePath + +class PurePath(_PurePathBase): + parts: Tuple[str, ...] + drive: str + root: str + anchor: str + name: str + suffix: str + suffixes: List[str] + stem: str + def __new__(cls: Type[_P], *args: Union[str, _PathLike]) -> _P: ... + def __hash__(self) -> int: ... + def __lt__(self, other: PurePath) -> bool: ... + def __le__(self, other: PurePath) -> bool: ... + def __gt__(self, other: PurePath) -> bool: ... + def __ge__(self, other: PurePath) -> bool: ... + def __truediv__(self: _P, key: Union[str, _PathLike]) -> _P: ... + def __rtruediv__(self: _P, key: Union[str, _PathLike]) -> _P: ... + def __div__(self: _P, key: Union[str, PurePath]) -> _P: ... + def __bytes__(self) -> bytes: ... + def as_posix(self) -> str: ... + def as_uri(self) -> str: ... + def is_absolute(self) -> bool: ... + def is_reserved(self) -> bool: ... + def match(self, path_pattern: str) -> bool: ... + def relative_to(self: _P, *other: Union[str, _PathLike]) -> _P: ... + def with_name(self: _P, name: str) -> _P: ... + def with_suffix(self: _P, suffix: str) -> _P: ... + def joinpath(self: _P, *other: Union[str, _PathLike]) -> _P: ... + @property + def parents(self: _P) -> Sequence[_P]: ... + @property + def parent(self: _P) -> _P: ... + +class PurePosixPath(PurePath): ... +class PureWindowsPath(PurePath): ... + +class Path(PurePath): + def __new__(cls: Type[_P], *args: Union[str, _PathLike], **kwargs: Any) -> _P: ... + def __enter__(self) -> Path: ... + def __exit__( + self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType] + ) -> Optional[bool]: ... + @classmethod + def cwd(cls: Type[_P]) -> _P: ... + def stat(self) -> os.stat_result: ... + def chmod(self, mode: int) -> None: ... + def exists(self) -> bool: ... + def glob(self, pattern: str) -> Generator[Path, None, None]: ... + def group(self) -> str: ... + def is_dir(self) -> bool: ... + def is_file(self) -> bool: ... + def is_symlink(self) -> bool: ... + def is_socket(self) -> bool: ... + def is_fifo(self) -> bool: ... + def is_block_device(self) -> bool: ... + def is_char_device(self) -> bool: ... + def iterdir(self) -> Generator[Path, None, None]: ... + def lchmod(self, mode: int) -> None: ... + def lstat(self) -> os.stat_result: ... + def mkdir(self, mode: int = ..., parents: bool = ...) -> None: ... + # Adapted from _io.open + def open( + self, + mode: Text = ..., + buffering: int = ..., + encoding: Optional[Text] = ..., + errors: Optional[Text] = ..., + newline: Optional[Text] = ..., + ) -> IO[Any]: ... + def owner(self) -> str: ... + def rename(self, target: Union[str, PurePath]) -> None: ... + def replace(self, target: Union[str, PurePath]) -> None: ... + def resolve(self: _P) -> _P: ... + def rglob(self, pattern: str) -> Generator[Path, None, None]: ... + def rmdir(self) -> None: ... + def symlink_to(self, target: Union[str, Path], target_is_directory: bool = ...) -> None: ... + def touch(self, mode: int = ..., exist_ok: bool = ...) -> None: ... + def unlink(self) -> None: ... + @classmethod + def home(cls: Type[_P]) -> _P: ... + def absolute(self: _P) -> _P: ... + def expanduser(self: _P) -> _P: ... + def read_bytes(self) -> bytes: ... + def read_text(self, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> str: ... + def samefile(self, other_path: Union[str, bytes, int, Path]) -> bool: ... + def write_bytes(self, data: bytes) -> int: ... + def write_text(self, data: str, encoding: Optional[str] = ..., errors: Optional[str] = ...) -> int: ... + +class PosixPath(Path, PurePosixPath): ... +class WindowsPath(Path, PureWindowsPath): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/pymssql.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/pymssql.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4f625626f6c7d8eb34b643c17e1364726a83ba54 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2/pymssql.pyi @@ -0,0 +1,44 @@ +from datetime import date, datetime, time +from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union + +Scalar = Union[int, float, str, datetime, date, time] +Result = Union[Tuple[Scalar, ...], Dict[str, Scalar]] + +class Connection(object): + def __init__(self, user, password, host, database, timeout, login_timeout, charset, as_dict) -> None: ... + def autocommit(self, status: bool) -> None: ... + def close(self) -> None: ... + def commit(self) -> None: ... + def cursor(self) -> Cursor: ... + def rollback(self) -> None: ... + +class Cursor(object): + def __init__(self) -> None: ... + def __iter__(self): ... + def __next__(self) -> Any: ... + def callproc(self, procname: str, **kwargs) -> None: ... + def close(self) -> None: ... + def execute(self, stmt: str, params: Optional[Union[Scalar, Tuple[Scalar, ...], Dict[str, Scalar]]]) -> None: ... + def executemany(self, stmt: str, params: Optional[Sequence[Tuple[Scalar, ...]]]) -> None: ... + def fetchall(self) -> List[Result]: ... + def fetchmany(self, size: Optional[int]) -> List[Result]: ... + def fetchone(self) -> Result: ... + +def connect( + server: Optional[str], + user: Optional[str], + password: Optional[str], + database: Optional[str], + timeout: Optional[int], + login_timeout: Optional[int], + charset: Optional[str], + as_dict: Optional[bool], + host: Optional[str], + appname: Optional[str], + port: Optional[str], + conn_properties: Optional[Union[str, Sequence[str]]], + autocommit: Optional[bool], + tds_version: Optional[str], +) -> Connection: ... +def get_max_connections() -> int: ... +def set_max_connections(n: int) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/backports_abc.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/backports_abc.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b48ae33667e0793e6790141de81aaaf26263e328 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/backports_abc.pyi @@ -0,0 +1,15 @@ +from typing import Any + +def mk_gen(): ... +def mk_awaitable(): ... +def mk_coroutine(): ... + +Generator: Any +Awaitable: Any +Coroutine: Any + +def isawaitable(obj): ... + +PATCHED: Any + +def patch(patch_inspect: bool = ...): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/certifi.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/certifi.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b4b5ba7d14f7d72ad6636609ba44129c3406962f --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/certifi.pyi @@ -0,0 +1 @@ +def where() -> str: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/croniter.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/croniter.pyi new file mode 100644 index 0000000000000000000000000000000000000000..293825e01e2bc97398e559a5b15ade90d048c564 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/croniter.pyi @@ -0,0 +1,43 @@ +import datetime +from typing import Any, Dict, Iterator, List, Optional, Text, Tuple, Type, TypeVar, Union + +_RetType = Union[Type[float], Type[datetime.datetime]] +_SelfT = TypeVar("_SelfT", bound=croniter) + +class CroniterError(ValueError): ... +class CroniterBadCronError(CroniterError): ... +class CroniterBadDateError(CroniterError): ... +class CroniterNotAlphaError(CroniterError): ... + +class croniter(Iterator[Any]): + MONTHS_IN_YEAR: int + RANGES: Tuple[Tuple[int, int], ...] + DAYS: Tuple[int, ...] + ALPHACONV: Tuple[Dict[str, Any], ...] + LOWMAP: Tuple[Dict[int, Any], ...] + bad_length: str + tzinfo: Optional[datetime.tzinfo] + cur: float + expanded: List[List[str]] + start_time: float + dst_start_time: float + nth_weekday_of_month: Dict[str, Any] + def __init__( + self, expr_format: Text, start_time: Optional[Union[float, datetime.datetime]] = ..., ret_type: Optional[_RetType] = ... + ) -> None: ... + # Most return value depend on ret_type, which can be passed in both as a method argument and as + # a constructor argument. + def get_next(self, ret_type: Optional[_RetType] = ...) -> Any: ... + def get_prev(self, ret_type: Optional[_RetType] = ...) -> Any: ... + def get_current(self, ret_type: Optional[_RetType] = ...) -> Any: ... + def __iter__(self: _SelfT) -> _SelfT: ... + def __next__(self, ret_type: Optional[_RetType] = ...) -> Any: ... + def next(self, ret_type: Optional[_RetType] = ...) -> Any: ... + def all_next(self, ret_type: Optional[_RetType] = ...) -> Iterator[Any]: ... + def all_prev(self, ret_type: Optional[_RetType] = ...) -> Iterator[Any]: ... + def iter(self, ret_type: Optional[_RetType] = ...) -> Iterator[Any]: ... + def is_leap(self, year: int) -> bool: ... + @classmethod + def expand(cls, expr_format: Text) -> Tuple[List[List[str]], Dict[str, Any]]: ... + @classmethod + def is_valid(cls, expression: Text) -> bool: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/dateparser.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/dateparser.pyi new file mode 100644 index 0000000000000000000000000000000000000000..dcb053ebdfb675b4af8a09194da0727b6391384d --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/dateparser.pyi @@ -0,0 +1,14 @@ +import datetime +from typing import Any, List, Mapping, Optional, Set, Tuple, Union + +__version__: str + +def parse( + date_string: str, + date_formats: Optional[Union[List[str], Tuple[str], Set[str]]] = ..., + languages: Optional[Union[List[str], Tuple[str], Set[str]]] = ..., + locales: Optional[Union[List[str], Tuple[str], Set[str]]] = ..., + region: Optional[str] = ..., + settings: Optional[Mapping[str, Any]] = ..., +) -> Optional[datetime.datetime]: ... +def __getattr__(name: str) -> Any: ... # incomplete diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/decorator.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/decorator.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0d6238e7574fcec014422d58333e9a0edb602695 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/decorator.pyi @@ -0,0 +1,81 @@ +import sys +from typing import Any, Callable, Dict, Iterator, List, NamedTuple, Optional, Pattern, Text, Tuple, TypeVar + +_C = TypeVar("_C", bound=Callable[..., Any]) +_Func = TypeVar("_Func", bound=Callable[..., Any]) +_T = TypeVar("_T") + +def get_init(cls): ... + +if sys.version_info >= (3,): + from inspect import getfullargspec as getfullargspec, iscoroutinefunction as iscoroutinefunction +else: + class FullArgSpec(NamedTuple): + args: List[str] + varargs: Optional[str] + varkw: Optional[str] + defaults: Tuple[Any, ...] + kwonlyargs: List[str] + kwonlydefaults: Dict[str, Any] + annotations: Dict[str, Any] + def iscoroutinefunction(f: Callable[..., Any]) -> bool: ... + def getfullargspec(func: Any) -> FullArgSpec: ... + +if sys.version_info >= (3, 2): + from contextlib import _GeneratorContextManager +else: + from contextlib import GeneratorContextManager as _GeneratorContextManager + +DEF: Pattern[str] + +class FunctionMaker(object): + args: List[Text] + varargs: Optional[Text] + varkw: Optional[Text] + defaults: Tuple[Any, ...] + kwonlyargs: List[Text] + kwonlydefaults: Optional[Text] + shortsignature: Optional[Text] + name: Text + doc: Optional[Text] + module: Optional[Text] + annotations: Dict[Text, Any] + signature: Text + dict: Dict[Text, Any] + def __init__( + self, + func: Optional[Callable[..., Any]] = ..., + name: Optional[Text] = ..., + signature: Optional[Text] = ..., + defaults: Optional[Tuple[Any, ...]] = ..., + doc: Optional[Text] = ..., + module: Optional[Text] = ..., + funcdict: Optional[Dict[Text, Any]] = ..., + ) -> None: ... + def update(self, func: Any, **kw: Any) -> None: ... + def make( + self, src_templ: Text, evaldict: Optional[Dict[Text, Any]] = ..., addsource: bool = ..., **attrs: Any + ) -> Callable[..., Any]: ... + @classmethod + def create( + cls, + obj: Any, + body: Text, + evaldict: Dict[Text, Any], + defaults: Optional[Tuple[Any, ...]] = ..., + doc: Optional[Text] = ..., + module: Optional[Text] = ..., + addsource: bool = ..., + **attrs: Any, + ) -> Callable[..., Any]: ... + +def decorate(func: _Func, caller: Callable[..., Any], extras: Any = ...) -> _Func: ... +def decorator( + caller: Callable[..., Any], _func: Optional[Callable[..., Any]] = ... +) -> Callable[[Callable[..., Any]], Callable[..., Any]]: ... + +class ContextManager(_GeneratorContextManager[_T]): + def __call__(self, func: _C) -> _C: ... + +def contextmanager(func: Callable[..., Iterator[_T]]) -> Callable[..., ContextManager[_T]]: ... +def dispatch_on(*dispatch_args: Any) -> Callable[[Callable[..., Any]], Callable[..., Any]]: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..054778ca78c35e51dbbfcf79b3e664c19bd60169 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/__init__.pyi @@ -0,0 +1,13 @@ +from .core import ( + demojize as demojize, + emoji_count as emoji_count, + emoji_lis as emoji_lis, + emojize as emojize, + get_emoji_regexp as get_emoji_regexp, +) +from .unicode_codes import ( + EMOJI_ALIAS_UNICODE as EMOJI_ALIAS_UNICODE, + EMOJI_UNICODE as EMOJI_UNICODE, + UNICODE_EMOJI as UNICODE_EMOJI, + UNICODE_EMOJI_ALIAS as UNICODE_EMOJI_ALIAS, +) diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/core.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/core.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3372d4c21072e2fbd8a24b0fefe0a9c73773c37c --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/core.pyi @@ -0,0 +1,9 @@ +from typing import Dict, List, Optional, Pattern, Text, Tuple, Union + +_DEFAULT_DELIMITER: str + +def emojize(string: str, use_aliases: bool = ..., delimiters: Tuple[str, str] = ...) -> str: ... +def demojize(string: str, delimiters: Tuple[str, str] = ...) -> str: ... +def get_emoji_regexp() -> Pattern[Text]: ... +def emoji_lis(string: str) -> List[Dict[str, Union[int, str]]]: ... +def emoji_count(string: str) -> int: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/unicode_codes.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/unicode_codes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ee1403cfddefd043d740946d260bfae4a283c756 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/emoji/unicode_codes.pyi @@ -0,0 +1,6 @@ +from typing import Dict, Text + +EMOJI_ALIAS_UNICODE: Dict[Text, Text] +EMOJI_UNICODE: Dict[Text, Text] +UNICODE_EMOJI: Dict[Text, Text] +UNICODE_EMOJI_ALIAS: Dict[Text, Text] diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/first.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/first.pyi new file mode 100644 index 0000000000000000000000000000000000000000..32f4736bc5892da7d831bd62a41b61ccc5a5839a --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/first.pyi @@ -0,0 +1,12 @@ +from typing import Any, Callable, Iterable, Optional, TypeVar, Union, overload + +_T = TypeVar("_T") +_S = TypeVar("_S") +@overload +def first(iterable: Iterable[_T]) -> Optional[_T]: ... +@overload +def first(iterable: Iterable[_T], default: _S) -> Union[_T, _S]: ... +@overload +def first(iterable: Iterable[_T], default: _S, key: Optional[Callable[[_T], Any]]) -> Union[_T, _S]: ... +@overload +def first(iterable: Iterable[_T], *, key: Optional[Callable[[_T], Any]]) -> Optional[_T]: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4e66d4d021919ed70975e0874f527f62684da658 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/__init__.pyi @@ -0,0 +1,41 @@ +from jinja2 import Markup as Markup, escape as escape +from werkzeug.exceptions import abort as abort +from werkzeug.utils import redirect as redirect + +from .app import Flask as Flask +from .blueprints import Blueprint as Blueprint +from .config import Config as Config +from .ctx import ( + after_this_request as after_this_request, + copy_current_request_context as copy_current_request_context, + has_app_context as has_app_context, + has_request_context as has_request_context, +) +from .globals import current_app as current_app, g as g, request as request, session as session +from .helpers import ( + flash as flash, + get_flashed_messages as get_flashed_messages, + get_template_attribute as get_template_attribute, + make_response as make_response, + safe_join as safe_join, + send_file as send_file, + send_from_directory as send_from_directory, + stream_with_context as stream_with_context, + url_for as url_for, +) +from .json import jsonify as jsonify +from .signals import ( + appcontext_popped as appcontext_popped, + appcontext_pushed as appcontext_pushed, + appcontext_tearing_down as appcontext_tearing_down, + before_render_template as before_render_template, + got_request_exception as got_request_exception, + message_flashed as message_flashed, + request_finished as request_finished, + request_started as request_started, + request_tearing_down as request_tearing_down, + signals_available as signals_available, + template_rendered as template_rendered, +) +from .templating import render_template as render_template, render_template_string as render_template_string +from .wrappers import Request as Request, Response as Response diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/app.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/app.pyi new file mode 100644 index 0000000000000000000000000000000000000000..30a476320d7bebcae7d98b17f81f7611844ff778 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/app.pyi @@ -0,0 +1,195 @@ +from datetime import timedelta +from types import TracebackType +from typing import ( + Any, + ByteString, + Callable, + ContextManager, + Dict, + Iterable, + List, + NoReturn, + Optional, + Text, + Tuple, + Type, + TypeVar, + Union, +) + +from .blueprints import Blueprint +from .config import Config +from .ctx import AppContext, RequestContext +from .helpers import _PackageBoundObject +from .testing import FlaskClient +from .wrappers import Response + +def setupmethod(f: Any): ... + +_T = TypeVar("_T") + +_ExcInfo = Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]] +_StartResponse = Callable[[str, List[Tuple[str, str]], Optional[_ExcInfo]], Callable[[bytes], Any]] +_WSGICallable = Callable[[Dict[Text, Any], _StartResponse], Iterable[bytes]] + +_Status = Union[str, int] +_Headers = Union[Dict[Any, Any], List[Tuple[Any, Any]]] +_Body = Union[Text, ByteString, Dict[Text, Any], Response, _WSGICallable] +_ViewFuncReturnType = Union[_Body, Tuple[_Body, _Status, _Headers], Tuple[_Body, _Status], Tuple[_Body, _Headers]] + +_ViewFunc = Union[Callable[..., NoReturn], Callable[..., _ViewFuncReturnType]] +_VT = TypeVar("_VT", bound=_ViewFunc) + +class Flask(_PackageBoundObject): + request_class: type = ... + response_class: type = ... + jinja_environment: type = ... + app_ctx_globals_class: type = ... + config_class: Type[Config] = ... + testing: Any = ... + secret_key: Union[Text, bytes, None] = ... + session_cookie_name: Any = ... + permanent_session_lifetime: timedelta = ... + send_file_max_age_default: timedelta = ... + use_x_sendfile: Any = ... + json_encoder: Any = ... + json_decoder: Any = ... + jinja_options: Any = ... + default_config: Any = ... + url_rule_class: type = ... + test_client_class: type = ... + test_cli_runner_class: type = ... + session_interface: Any = ... + import_name: str = ... + template_folder: str = ... + root_path: Union[str, Text] = ... + static_url_path: Any = ... + static_folder: Optional[str] = ... + instance_path: Union[str, Text] = ... + config: Config = ... + view_functions: Any = ... + error_handler_spec: Any = ... + url_build_error_handlers: Any = ... + before_request_funcs: Dict[Optional[str], List[Callable[[], Any]]] = ... + before_first_request_funcs: List[Callable[[], None]] = ... + after_request_funcs: Dict[Optional[str], List[Callable[[Response], Response]]] = ... + teardown_request_funcs: Dict[Optional[str], List[Callable[[Optional[Exception]], Any]]] = ... + teardown_appcontext_funcs: List[Callable[[Optional[Exception]], Any]] = ... + url_value_preprocessors: Any = ... + url_default_functions: Any = ... + template_context_processors: Any = ... + shell_context_processors: Any = ... + blueprints: Any = ... + extensions: Any = ... + url_map: Any = ... + subdomain_matching: Any = ... + cli: Any = ... + def __init__( + self, + import_name: str, + static_url_path: Optional[str] = ..., + static_folder: Optional[str] = ..., + static_host: Optional[str] = ..., + host_matching: bool = ..., + subdomain_matching: bool = ..., + template_folder: str = ..., + instance_path: Optional[str] = ..., + instance_relative_config: bool = ..., + root_path: Optional[str] = ..., + ) -> None: ... + @property + def name(self) -> str: ... + @property + def propagate_exceptions(self) -> bool: ... + @property + def preserve_context_on_exception(self): ... + @property + def logger(self): ... + @property + def jinja_env(self): ... + @property + def got_first_request(self) -> bool: ... + def make_config(self, instance_relative: bool = ...): ... + def auto_find_instance_path(self): ... + def open_instance_resource(self, resource: Union[str, Text], mode: str = ...): ... + templates_auto_reload: Any = ... + def create_jinja_environment(self): ... + def create_global_jinja_loader(self): ... + def select_jinja_autoescape(self, filename: Any): ... + def update_template_context(self, context: Any) -> None: ... + def make_shell_context(self): ... + env: Optional[str] = ... + debug: bool = ... + def run( + self, + host: Optional[str] = ..., + port: Optional[Union[int, str]] = ..., + debug: Optional[bool] = ..., + load_dotenv: bool = ..., + **options: Any, + ) -> None: ... + def test_client(self, use_cookies: bool = ..., **kwargs: Any) -> FlaskClient[Response]: ... + def test_cli_runner(self, **kwargs: Any): ... + def open_session(self, request: Any): ... + def save_session(self, session: Any, response: Any): ... + def make_null_session(self): ... + def register_blueprint(self, blueprint: Blueprint, **options: Any) -> None: ... + def iter_blueprints(self): ... + def add_url_rule( + self, + rule: str, + endpoint: Optional[str] = ..., + view_func: _ViewFunc = ..., + provide_automatic_options: Optional[bool] = ..., + **options: Any, + ) -> None: ... + def route(self, rule: str, **options: Any) -> Callable[[_VT], _VT]: ... + def endpoint(self, endpoint: str) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... + def errorhandler( + self, code_or_exception: Union[int, Type[Exception]] + ) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... + def register_error_handler(self, code_or_exception: Union[int, Type[Exception]], f: Callable[..., Any]) -> None: ... + def template_filter(self, name: Optional[Any] = ...): ... + def add_template_filter(self, f: Any, name: Optional[Any] = ...) -> None: ... + def template_test(self, name: Optional[Any] = ...): ... + def add_template_test(self, f: Any, name: Optional[Any] = ...) -> None: ... + def template_global(self, name: Optional[Any] = ...): ... + def add_template_global(self, f: Any, name: Optional[Any] = ...) -> None: ... + def before_request(self, f: Callable[[], _T]) -> Callable[[], _T]: ... + def before_first_request(self, f: Callable[[], _T]) -> Callable[[], _T]: ... + def after_request(self, f: Callable[[Response], Response]) -> Callable[[Response], Response]: ... + def teardown_request(self, f: Callable[[Optional[Exception]], _T]) -> Callable[[Optional[Exception]], _T]: ... + def teardown_appcontext(self, f: Callable[[Optional[Exception]], _T]) -> Callable[[Optional[Exception]], _T]: ... + def context_processor(self, f: Any): ... + def shell_context_processor(self, f: Any): ... + def url_value_preprocessor(self, f: Any): ... + def url_defaults(self, f: Any): ... + def handle_http_exception(self, e: Any): ... + def trap_http_exception(self, e: Any): ... + def handle_user_exception(self, e: Any): ... + def handle_exception(self, e: Any): ... + def log_exception(self, exc_info: Any) -> None: ... + def raise_routing_exception(self, request: Any) -> None: ... + def dispatch_request(self): ... + def full_dispatch_request(self): ... + def finalize_request(self, rv: Any, from_error_handler: bool = ...): ... + def try_trigger_before_first_request_functions(self): ... + def make_default_options_response(self): ... + def should_ignore_error(self, error: Any): ... + def make_response(self, rv: Any): ... + def create_url_adapter(self, request: Any): ... + def inject_url_defaults(self, endpoint: Any, values: Any) -> None: ... + def handle_url_build_error(self, error: Any, endpoint: Any, values: Any): ... + def preprocess_request(self): ... + def process_response(self, response: Any): ... + def do_teardown_request(self, exc: Any = ...) -> None: ... + def do_teardown_appcontext(self, exc: Any = ...) -> None: ... + def app_context(self) -> AppContext: ... + def request_context(self, environ: Any): ... + def test_request_context(self, *args: Any, **kwargs: Any) -> ContextManager[RequestContext]: ... + def wsgi_app(self, environ: Any, start_response: Any): ... + def __call__(self, environ: Any, start_response: Any): ... + # These are not preset at runtime but we add them since monkeypatching this + # class is quite common. + def __setattr__(self, name: str, value: Any): ... + def __getattr__(self, name: str): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/blueprints.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/blueprints.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a003452bc311f8325b1a8074213852c736109108 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/blueprints.pyi @@ -0,0 +1,80 @@ +from typing import Any, Callable, Optional, Type, TypeVar, Union + +from .app import _ViewFunc +from .helpers import _PackageBoundObject + +_T = TypeVar("_T") +_VT = TypeVar("_VT", bound=_ViewFunc) + +class _Sentinel(object): ... + +class BlueprintSetupState: + app: Any = ... + blueprint: Any = ... + options: Any = ... + first_registration: Any = ... + subdomain: Any = ... + url_prefix: Any = ... + url_defaults: Any = ... + def __init__(self, blueprint: Any, app: Any, options: Any, first_registration: Any) -> None: ... + def add_url_rule(self, rule: str, endpoint: Optional[str] = ..., view_func: _ViewFunc = ..., **options: Any) -> None: ... + +class Blueprint(_PackageBoundObject): + warn_on_modifications: bool = ... + json_encoder: Any = ... + json_decoder: Any = ... + import_name: str = ... + template_folder: Optional[str] = ... + root_path: str = ... + name: str = ... + url_prefix: Optional[str] = ... + subdomain: Optional[str] = ... + static_folder: Optional[str] = ... + static_url_path: Optional[str] = ... + deferred_functions: Any = ... + url_values_defaults: Any = ... + cli_group: Union[Optional[str], _Sentinel] = ... + def __init__( + self, + name: str, + import_name: str, + static_folder: Optional[str] = ..., + static_url_path: Optional[str] = ..., + template_folder: Optional[str] = ..., + url_prefix: Optional[str] = ..., + subdomain: Optional[str] = ..., + url_defaults: Optional[Any] = ..., + root_path: Optional[str] = ..., + cli_group: Union[Optional[str], _Sentinel] = ..., + ) -> None: ... + def record(self, func: Any) -> None: ... + def record_once(self, func: Any): ... + def make_setup_state(self, app: Any, options: Any, first_registration: bool = ...): ... + def register(self, app: Any, options: Any, first_registration: bool = ...) -> None: ... + def route(self, rule: str, **options: Any) -> Callable[[_VT], _VT]: ... + def add_url_rule(self, rule: str, endpoint: Optional[str] = ..., view_func: _ViewFunc = ..., **options: Any) -> None: ... + def endpoint(self, endpoint: str) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... + def app_template_filter(self, name: Optional[Any] = ...): ... + def add_app_template_filter(self, f: Any, name: Optional[Any] = ...) -> None: ... + def app_template_test(self, name: Optional[Any] = ...): ... + def add_app_template_test(self, f: Any, name: Optional[Any] = ...) -> None: ... + def app_template_global(self, name: Optional[Any] = ...): ... + def add_app_template_global(self, f: Any, name: Optional[Any] = ...) -> None: ... + def before_request(self, f: Any): ... + def before_app_request(self, f: Any): ... + def before_app_first_request(self, f: Any): ... + def after_request(self, f: Any): ... + def after_app_request(self, f: Any): ... + def teardown_request(self, f: Any): ... + def teardown_app_request(self, f: Any): ... + def context_processor(self, f: Any): ... + def app_context_processor(self, f: Any): ... + def app_errorhandler(self, code: Any): ... + def url_value_preprocessor(self, f: Any): ... + def url_defaults(self, f: Any): ... + def app_url_value_preprocessor(self, f: Any): ... + def app_url_defaults(self, f: Any): ... + def errorhandler( + self, code_or_exception: Union[int, Type[Exception]] + ) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... + def register_error_handler(self, code_or_exception: Union[int, Type[Exception]], f: Callable[..., Any]) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/cli.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/cli.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9083ebd5db25d818e1cedf35f29ea09f9d3eb5ed --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/cli.pyi @@ -0,0 +1,68 @@ +from typing import Any, Optional + +import click + +class NoAppException(click.UsageError): ... + +def find_best_app(script_info: Any, module: Any): ... +def call_factory(script_info: Any, app_factory: Any, arguments: Any = ...): ... +def find_app_by_string(script_info: Any, module: Any, app_name: Any): ... +def prepare_import(path: Any): ... +def locate_app(script_info: Any, module_name: Any, app_name: Any, raise_if_not_found: bool = ...): ... +def get_version(ctx: Any, param: Any, value: Any): ... + +version_option: Any + +class DispatchingApp: + loader: Any = ... + def __init__(self, loader: Any, use_eager_loading: bool = ...) -> None: ... + def __call__(self, environ: Any, start_response: Any): ... + +class ScriptInfo: + app_import_path: Any = ... + create_app: Any = ... + data: Any = ... + def __init__(self, app_import_path: Optional[Any] = ..., create_app: Optional[Any] = ...) -> None: ... + def load_app(self): ... + +pass_script_info: Any + +def with_appcontext(f: Any): ... + +class AppGroup(click.Group): + def command(self, *args: Any, **kwargs: Any): ... + def group(self, *args: Any, **kwargs: Any): ... + +class FlaskGroup(AppGroup): + create_app: Any = ... + load_dotenv: Any = ... + def __init__( + self, + add_default_commands: bool = ..., + create_app: Optional[Any] = ..., + add_version_option: bool = ..., + load_dotenv: bool = ..., + **extra: Any, + ) -> None: ... + def get_command(self, ctx: Any, name: Any): ... + def list_commands(self, ctx: Any): ... + def main(self, *args: Any, **kwargs: Any): ... + +def load_dotenv(path: Optional[Any] = ...): ... +def show_server_banner(env: Any, debug: Any, app_import_path: Any, eager_loading: Any): ... + +class CertParamType(click.ParamType): + name: str = ... + path_type: Any = ... + def __init__(self) -> None: ... + def convert(self, value: Any, param: Any, ctx: Any): ... + +def run_command( + info: Any, host: Any, port: Any, reload: Any, debugger: Any, eager_loading: Any, with_threads: Any, cert: Any +) -> None: ... +def shell_command() -> None: ... +def routes_command(sort: Any, all_methods: Any): ... + +cli: Any + +def main(as_module: bool = ...) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/config.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/config.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2b005422e267f7c65e06478eedc6e13abfd293e7 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/config.pyi @@ -0,0 +1,18 @@ +from typing import Any, Dict, Optional + +class ConfigAttribute: + __name__: Any = ... + get_converter: Any = ... + def __init__(self, name: Any, get_converter: Optional[Any] = ...) -> None: ... + def __get__(self, obj: Any, type: Optional[Any] = ...): ... + def __set__(self, obj: Any, value: Any) -> None: ... + +class Config(Dict[str, Any]): + root_path: Any = ... + def __init__(self, root_path: Any, defaults: Optional[Any] = ...) -> None: ... + def from_envvar(self, variable_name: Any, silent: bool = ...): ... + def from_pyfile(self, filename: Any, silent: bool = ...): ... + def from_object(self, obj: Any) -> None: ... + def from_json(self, filename: Any, silent: bool = ...): ... + def from_mapping(self, *mapping: Any, **kwargs: Any): ... + def get_namespace(self, namespace: Any, lowercase: bool = ..., trim_namespace: bool = ...): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/ctx.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/ctx.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c3dd934a3854cb6676da1d03e5d8e26fd60dcda4 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/ctx.pyi @@ -0,0 +1,40 @@ +from typing import Any, Optional + +class _AppCtxGlobals: + def get(self, name: Any, default: Optional[Any] = ...): ... + def pop(self, name: Any, default: Any = ...): ... + def setdefault(self, name: Any, default: Optional[Any] = ...): ... + def __contains__(self, item: Any): ... + def __iter__(self): ... + +def after_this_request(f: Any): ... +def copy_current_request_context(f: Any): ... +def has_request_context(): ... +def has_app_context(): ... + +class AppContext: + app: Any = ... + url_adapter: Any = ... + g: Any = ... + def __init__(self, app: Any) -> None: ... + def push(self) -> None: ... + def pop(self, exc: Any = ...) -> None: ... + def __enter__(self): ... + def __exit__(self, exc_type: Any, exc_value: Any, tb: Any) -> None: ... + +class RequestContext: + app: Any = ... + request: Any = ... + url_adapter: Any = ... + flashes: Any = ... + session: Any = ... + preserved: bool = ... + def __init__(self, app: Any, environ: Any, request: Optional[Any] = ...) -> None: ... + g: Any = ... + def copy(self): ... + def match_request(self) -> None: ... + def push(self) -> None: ... + def pop(self, exc: Any = ...) -> None: ... + def auto_pop(self, exc: Any) -> None: ... + def __enter__(self): ... + def __exit__(self, exc_type: Any, exc_value: Any, tb: Any) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/debughelpers.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/debughelpers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..45255a955d2fc27ec80f0849879934c3fb44181f --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/debughelpers.pyi @@ -0,0 +1,14 @@ +from typing import Any + +class UnexpectedUnicodeError(AssertionError, UnicodeError): ... + +class DebugFilesKeyError(KeyError, AssertionError): + msg: Any = ... + def __init__(self, request: Any, key: Any) -> None: ... + +class FormDataRoutingRedirect(AssertionError): + def __init__(self, request: Any) -> None: ... + +def attach_enctype_error_multidict(request: Any): ... +def explain_template_loading_attempts(app: Any, template: Any, attempts: Any) -> None: ... +def explain_ignored_app_run() -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/globals.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/globals.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c6043c726b8a0962ead729338a9de87543bca15a --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/globals.pyi @@ -0,0 +1,16 @@ +from typing import Any + +from werkzeug.local import LocalStack + +from .app import Flask +from .wrappers import Request + +class _FlaskLocalProxy(Flask): + def _get_current_object(self) -> Flask: ... + +_request_ctx_stack: LocalStack +_app_ctx_stack: LocalStack +current_app: _FlaskLocalProxy +request: Request +session: Any +g: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/helpers.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/helpers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..928f8837c912d6fba7186c94f022f5adec1cae85 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/helpers.pyi @@ -0,0 +1,55 @@ +from typing import Any, Optional + +from .cli import AppGroup +from .wrappers import Response + +def get_env(): ... +def get_debug_flag(): ... +def get_load_dotenv(default: bool = ...): ... +def stream_with_context(generator_or_function: Any): ... +def make_response(*args: Any) -> Response: ... +def url_for(endpoint: str, **values: Any) -> str: ... +def get_template_attribute(template_name: Any, attribute: Any): ... +def flash(message: Any, category: str = ...) -> None: ... +def get_flashed_messages(with_categories: bool = ..., category_filter: Any = ...): ... +def send_file( + filename_or_fp: Any, + mimetype: Optional[Any] = ..., + as_attachment: bool = ..., + attachment_filename: Optional[Any] = ..., + add_etags: bool = ..., + cache_timeout: Optional[Any] = ..., + conditional: bool = ..., + last_modified: Optional[Any] = ..., +) -> Response: ... +def safe_join(directory: Any, *pathnames: Any): ... +def send_from_directory(directory: Any, filename: Any, **options: Any) -> Response: ... +def get_root_path(import_name: Any): ... +def find_package(import_name: Any): ... + +class locked_cached_property: + __name__: Any = ... + __module__: Any = ... + __doc__: Any = ... + func: Any = ... + lock: Any = ... + def __init__(self, func: Any, name: Optional[Any] = ..., doc: Optional[Any] = ...) -> None: ... + def __get__(self, obj: Any, type: Optional[Any] = ...): ... + +class _PackageBoundObject: + import_name: Any = ... + template_folder: Any = ... + root_path: Any = ... + cli: AppGroup = ... + def __init__(self, import_name: Any, template_folder: Optional[Any] = ..., root_path: Optional[Any] = ...) -> None: ... + static_folder: Any = ... + static_url_path: Any = ... + @property + def has_static_folder(self): ... + def jinja_loader(self): ... + def get_send_file_max_age(self, filename: Any): ... + def send_static_file(self, filename: Any) -> Response: ... + def open_resource(self, resource: Any, mode: str = ...): ... + +def total_seconds(td: Any): ... +def is_ip(value: Any): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/logging.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/logging.pyi new file mode 100644 index 0000000000000000000000000000000000000000..75354cedcc972590eac9cacd1bdf153b62b8fed2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/logging.pyi @@ -0,0 +1,8 @@ +from typing import Any + +def wsgi_errors_stream(): ... +def has_level_handler(logger: Any): ... + +default_handler: Any + +def create_logger(app: Any): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/sessions.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/sessions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..77c008b730f3719cf84bbe08833bccc1e5bfbf94 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/sessions.pyi @@ -0,0 +1,57 @@ +from abc import ABCMeta +from typing import Any, MutableMapping, Optional + +from werkzeug.datastructures import CallbackDict + +class SessionMixin(MutableMapping[str, Any], metaclass=ABCMeta): + @property + def permanent(self): ... + @permanent.setter + def permanent(self, value: Any) -> None: ... + new: bool = ... + modified: bool = ... + accessed: bool = ... + +class SecureCookieSession(CallbackDict[str, Any], SessionMixin): + modified: bool = ... + accessed: bool = ... + def __init__(self, initial: Optional[Any] = ...) -> None: ... + def __getitem__(self, key: Any): ... + def get(self, key: Any, default: Optional[Any] = ...): ... + def setdefault(self, key: Any, default: Optional[Any] = ...): ... + +class NullSession(SecureCookieSession): + __setitem__: Any = ... + __delitem__: Any = ... + clear: Any = ... + pop: Any = ... + popitem: Any = ... + update: Any = ... + setdefault: Any = ... + +class SessionInterface: + null_session_class: Any = ... + pickle_based: bool = ... + def make_null_session(self, app: Any): ... + def is_null_session(self, obj: Any): ... + def get_cookie_domain(self, app: Any): ... + def get_cookie_path(self, app: Any): ... + def get_cookie_httponly(self, app: Any): ... + def get_cookie_secure(self, app: Any): ... + def get_cookie_samesite(self, app: Any): ... + def get_expiration_time(self, app: Any, session: Any): ... + def should_set_cookie(self, app: Any, session: Any): ... + def open_session(self, app: Any, request: Any) -> None: ... + def save_session(self, app: Any, session: Any, response: Any) -> None: ... + +session_json_serializer: Any + +class SecureCookieSessionInterface(SessionInterface): + salt: str = ... + digest_method: Any = ... + key_derivation: str = ... + serializer: Any = ... + session_class: Any = ... + def get_signing_serializer(self, app: Any): ... + def open_session(self, app: Any, request: Any): ... + def save_session(self, app: Any, session: Any, response: Any): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/signals.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/signals.pyi new file mode 100644 index 0000000000000000000000000000000000000000..66238d0ae9123d35a532becec3e019f0917f6b60 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/signals.pyi @@ -0,0 +1,29 @@ +from typing import Any, Optional + +signals_available: bool + +class Namespace: + def signal(self, name: Any, doc: Optional[Any] = ...): ... + +class _FakeSignal: + name: Any = ... + __doc__: Any = ... + def __init__(self, name: Any, doc: Optional[Any] = ...) -> None: ... + send: Any = ... + connect: Any = ... + disconnect: Any = ... + has_receivers_for: Any = ... + receivers_for: Any = ... + temporarily_connected_to: Any = ... + connected_to: Any = ... + +template_rendered: Any +before_render_template: Any +request_started: Any +request_finished: Any +request_tearing_down: Any +got_request_exception: Any +appcontext_tearing_down: Any +appcontext_pushed: Any +appcontext_popped: Any +message_flashed: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/templating.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/templating.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3f7aaae4023681044c87ce29d14e748d0158aabb --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/templating.pyi @@ -0,0 +1,16 @@ +from typing import Any, Iterable, Text, Union + +from jinja2 import BaseLoader, Environment as BaseEnvironment + +class Environment(BaseEnvironment): + app: Any = ... + def __init__(self, app: Any, **options: Any) -> None: ... + +class DispatchingJinjaLoader(BaseLoader): + app: Any = ... + def __init__(self, app: Any) -> None: ... + def get_source(self, environment: Any, template: Any): ... + def list_templates(self): ... + +def render_template(template_name_or_list: Union[Text, Iterable[Text]], **context: Any) -> Text: ... +def render_template_string(source: Text, **context: Any) -> Text: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/testing.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/testing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c3a652413fbd05f7363f08dac23e41d480dc9c33 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/testing.pyi @@ -0,0 +1,56 @@ +from typing import IO, Any, Iterable, Mapping, Optional, Text, TypeVar, Union + +from click import BaseCommand +from click.testing import CliRunner, Result +from werkzeug.test import Client, EnvironBuilder as WerkzeugEnvironBuilder + +# Response type for the client below. +# By default _R is Tuple[Iterable[Any], Union[Text, int], werkzeug.datastructures.Headers], however +# most commonly it is wrapped in a Reponse object. +_R = TypeVar("_R") + +class FlaskClient(Client[_R]): + preserve_context: bool = ... + environ_base: Any = ... + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def session_transaction(self, *args: Any, **kwargs: Any) -> None: ... + def __enter__(self): ... + def __exit__(self, exc_type: Any, exc_value: Any, tb: Any) -> None: ... + +class FlaskCliRunner(CliRunner): + app: Any = ... + def __init__(self, app: Any, **kwargs: Any) -> None: ... + def invoke( + self, + cli: Optional[BaseCommand] = ..., + args: Optional[Union[str, Iterable[str]]] = ..., + input: Optional[Union[bytes, IO[Any], Text]] = ..., + env: Optional[Mapping[str, str]] = ..., + catch_exceptions: bool = ..., + color: bool = ..., + **extra: Any, + ) -> Result: ... + +class EnvironBuilder(WerkzeugEnvironBuilder): + app: Any + def __init__( + self, + app: Any, + path: str = ..., + base_url: Optional[Any] = ..., + subdomain: Optional[Any] = ..., + url_scheme: Optional[Any] = ..., + *args: Any, + **kwargs: Any, + ) -> None: ... + def json_dumps(self, obj: Any, **kwargs: Any) -> str: ... + +def make_test_environ_builder( + app: Any, + path: str = ..., + base_url: Optional[Any] = ..., + subdomain: Optional[Any] = ..., + url_scheme: Optional[Any] = ..., + *args: Any, + **kwargs: Any, +): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/views.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/views.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e6112637569a0e7c9b749aa076a3bc5598e9e08b --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/views.pyi @@ -0,0 +1,17 @@ +from typing import Any + +http_method_funcs: Any + +class View: + methods: Any = ... + provide_automatic_options: Any = ... + decorators: Any = ... + def dispatch_request(self, *args: Any, **kwargs: Any) -> Any: ... + @classmethod + def as_view(cls, name: Any, *class_args: Any, **class_kwargs: Any): ... + +class MethodViewType(type): + def __init__(self, name: Any, bases: Any, d: Any) -> None: ... + +class MethodView(View, metaclass=MethodViewType): + def dispatch_request(self, *args: Any, **kwargs: Any) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/wrappers.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/wrappers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..590bea0b5a36a4226258dbb4255f298ebb972079 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/flask/wrappers.pyi @@ -0,0 +1,32 @@ +from typing import Any, Dict, Optional + +from werkzeug.exceptions import HTTPException +from werkzeug.routing import Rule +from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase + +class JSONMixin: + @property + def is_json(self) -> bool: ... + @property + def json(self): ... + def get_json(self, force: bool = ..., silent: bool = ..., cache: bool = ...): ... + def on_json_loading_failed(self, e: Any) -> None: ... + +class Request(RequestBase, JSONMixin): + url_rule: Optional[Rule] = ... + view_args: Dict[str, Any] = ... + routing_exception: Optional[HTTPException] = ... + # Request is making the max_content_length readonly, where it was not the + # case in its supertype. + # We would require something like https://github.com/python/typing/issues/241 + @property + def max_content_length(self) -> Optional[int]: ... # type: ignore + @property + def endpoint(self) -> Optional[str]: ... + @property + def blueprint(self) -> Optional[str]: ... + +class Response(ResponseBase, JSONMixin): + default_mimetype: Optional[str] = ... + @property + def max_cookie_size(self) -> int: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/database.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/database.pyi new file mode 100644 index 0000000000000000000000000000000000000000..eec539d65fe0c22834ac94069abc08547f7333df --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/database.pyi @@ -0,0 +1,27 @@ +from types import TracebackType +from typing import Optional, Sequence, Text, Type + +from geoip2.models import ASN, ISP, AnonymousIP, City, ConnectionType, Country, Domain, Enterprise +from maxminddb.reader import Metadata + +_Locales = Optional[Sequence[Text]] + +class Reader: + def __init__(self, filename: Text, locales: _Locales = ..., mode: int = ...) -> None: ... + def __enter__(self) -> Reader: ... + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = ..., + exc_val: Optional[BaseException] = ..., + exc_tb: Optional[TracebackType] = ..., + ) -> None: ... + def country(self, ip_address: Text) -> Country: ... + def city(self, ip_address: Text) -> City: ... + def anonymous_ip(self, ip_address: Text) -> AnonymousIP: ... + def asn(self, ip_address: Text) -> ASN: ... + def connection_type(self, ip_address: Text) -> ConnectionType: ... + def domain(self, ip_address: Text) -> Domain: ... + def enterprise(self, ip_address: Text) -> Enterprise: ... + def isp(self, ip_address: Text) -> ISP: ... + def metadata(self) -> Metadata: ... + def close(self) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/errors.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/errors.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5e2997d3c07805e22baf11acf2609e01c882ef59 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/errors.pyi @@ -0,0 +1,14 @@ +from typing import Optional, Text + +class GeoIP2Error(RuntimeError): ... +class AddressNotFoundError(GeoIP2Error): ... +class AuthenticationError(GeoIP2Error): ... + +class HTTPError(GeoIP2Error): + http_status: Optional[int] + uri: Optional[Text] + def __init__(self, message: Text, http_status: Optional[int] = ..., uri: Optional[Text] = ...) -> None: ... + +class InvalidRequestError(GeoIP2Error): ... +class OutOfQueriesError(GeoIP2Error): ... +class PermissionRequiredError(GeoIP2Error): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/mixins.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/mixins.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8c683c26b84b4a7918713d3b9b43d9b4858274e0 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/mixins.pyi @@ -0,0 +1,3 @@ +class SimpleEquality: + def __eq__(self, other: object) -> bool: ... + def __ne__(self, other: object) -> bool: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/models.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/models.pyi new file mode 100644 index 0000000000000000000000000000000000000000..96af74b8b34b5adc9f249e64fe6a8b64c63504d2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/models.pyi @@ -0,0 +1,62 @@ +from typing import Any, Mapping, Optional, Sequence, Text + +from geoip2 import records +from geoip2.mixins import SimpleEquality + +_Locales = Optional[Sequence[Text]] +_RawResponse = Mapping[Text, Mapping[Text, Any]] + +class Country(SimpleEquality): + continent: records.Continent + country: records.Country + registered_country: records.Country + represented_country: records.RepresentedCountry + maxmind: records.MaxMind + traits: records.Traits + raw: _RawResponse + def __init__(self, raw_response: _RawResponse, locales: _Locales = ...) -> None: ... + +class City(Country): + city: records.City + location: records.Location + postal: records.Postal + subdivisions: records.Subdivisions + def __init__(self, raw_response: _RawResponse, locales: _Locales = ...) -> None: ... + +class Insights(City): ... +class Enterprise(City): ... +class SimpleModel(SimpleEquality): ... + +class AnonymousIP(SimpleModel): + is_anonymous: bool + is_anonymous_vpn: bool + is_hosting_provider: bool + is_public_proxy: bool + is_tor_exit_node: bool + ip_address: Optional[Text] + raw: _RawResponse + def __init__(self, raw: _RawResponse) -> None: ... + +class ASN(SimpleModel): + autonomous_system_number: Optional[int] + autonomous_system_organization: Optional[Text] + ip_address: Optional[Text] + raw: _RawResponse + def __init__(self, raw: _RawResponse) -> None: ... + +class ConnectionType(SimpleModel): + connection_type: Optional[Text] + ip_address: Optional[Text] + raw: _RawResponse + def __init__(self, raw: _RawResponse) -> None: ... + +class Domain(SimpleModel): + domain: Optional[Text] + ip_address: Optional[Text] + raw: Optional[Text] + def __init__(self, raw: _RawResponse) -> None: ... + +class ISP(ASN): + isp: Optional[Text] + organization: Optional[Text] + def __init__(self, raw: _RawResponse) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/records.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/records.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0d90b18ca449f290b4693f9297b15566aec7dc6a --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/geoip2/records.pyi @@ -0,0 +1,83 @@ +from typing import Any, Mapping, Optional, Sequence, Text, Tuple + +from geoip2.mixins import SimpleEquality + +_Locales = Optional[Sequence[Text]] +_Names = Mapping[Text, Text] + +class Record(SimpleEquality): + def __init__(self, **kwargs: Any) -> None: ... + def __setattr__(self, name: Text, value: Any) -> None: ... + +class PlaceRecord(Record): + def __init__(self, locales: _Locales = ..., **kwargs: Any) -> None: ... + @property + def name(self) -> Text: ... + +class City(PlaceRecord): + confidence: int + geoname_id: int + names: _Names + +class Continent(PlaceRecord): + code: Text + geoname_id: int + names: _Names + +class Country(PlaceRecord): + confidence: int + geoname_id: int + is_in_european_union: bool + iso_code: Text + names: _Names + def __init__(self, locales: _Locales = ..., **kwargs: Any) -> None: ... + +class RepresentedCountry(Country): + type: Text + +class Location(Record): + average_income: int + accuracy_radius: int + latitude: float + longitude: float + metro_code: int + population_density: int + time_zone: Text + +class MaxMind(Record): + queries_remaining: int + +class Postal(Record): + code: Text + confidence: int + +class Subdivision(PlaceRecord): + confidence: int + geoname_id: int + iso_code: Text + names: _Names + +class Subdivisions(Tuple[Subdivision]): + def __new__(cls, locales: _Locales, *subdivisions: Subdivision) -> Subdivisions: ... + def __init__(self, locales: _Locales, *subdivisions: Subdivision) -> None: ... + @property + def most_specific(self) -> Subdivision: ... + +class Traits(Record): + autonomous_system_number: int + autonomous_system_organization: Text + connection_type: Text + domain: Text + ip_address: Text + is_anonymous: bool + is_anonymous_proxy: bool + is_anonymous_vpn: bool + is_hosting_provider: bool + is_legitimate_proxy: bool + is_public_proxy: bool + is_satellite_provider: bool + is_tor_exit_node: bool + isp: Text + organization: Text + user_type: Text + def __init__(self, **kwargs: Any) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/gflags.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/gflags.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a4c487d310d47f07906c5ab3667613c4e3918602 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/gflags.pyi @@ -0,0 +1,312 @@ +from types import ModuleType +from typing import IO, Any, Callable, Dict, Iterable, List, Optional, Sequence, Text, Union + +class Error(Exception): ... + +FlagsError = Error + +class DuplicateFlag(FlagsError): ... +class CantOpenFlagFileError(FlagsError): ... +class DuplicateFlagCannotPropagateNoneToSwig(DuplicateFlag): ... + +class DuplicateFlagError(DuplicateFlag): + def __init__(self, flagname: str, flag_values: FlagValues, other_flag_values: FlagValues = ...) -> None: ... + +class IllegalFlagValueError(FlagsError): ... + +IllegalFlagValue = IllegalFlagValueError + +class UnrecognizedFlag(FlagsError): ... + +class UnrecognizedFlagError(UnrecognizedFlag): + def __init__(self, flagname: str, flagvalue: str = ...) -> None: ... + +def get_help_width() -> int: ... + +GetHelpWidth = get_help_width + +def text_wrap(text: str, length: int = ..., indent: str = ..., firstline_indent: str = ..., tabs: str = ...) -> str: ... + +TextWrap = text_wrap + +def doc_to_help(doc: str) -> str: ... + +DocToHelp = doc_to_help + +class FlagValues: + def __init__(self) -> None: ... + def UseGnuGetOpt(self, use_gnu_getopt: bool = ...) -> None: ... + def is_gnu_getopt(self) -> bool: ... + IsGnuGetOpt = is_gnu_getopt + # TODO dict type + def FlagDict(self) -> Dict[Any, Any]: ... + def flags_by_module_dict(self) -> Dict[str, List[Flag]]: ... + FlagsByModuleDict = flags_by_module_dict + def flags_by_module_id_dict(self) -> Dict[int, List[Flag]]: ... + FlagsByModuleIdDict = flags_by_module_id_dict + def key_flags_by_module_dict(self) -> Dict[str, List[Flag]]: ... + KeyFlagsByModuleDict = key_flags_by_module_dict + def find_module_defining_flag(self, flagname: str, default: str = ...) -> str: ... + FindModuleDefiningFlag = find_module_defining_flag + def find_module_id_defining_flag(self, flagname: str, default: int = ...) -> int: ... + FindModuleIdDefiningFlag = find_module_id_defining_flag + def append_flag_values(self, flag_values: FlagValues) -> None: ... + AppendFlagValues = append_flag_values + def remove_flag_values(self, flag_values: FlagValues) -> None: ... + RemoveFlagValues = remove_flag_values + def __setitem__(self, name: str, flag: Flag) -> None: ... + def __getitem__(self, name: str) -> Flag: ... + def __getattr__(self, name: str) -> Any: ... + def __setattr__(self, name: str, value: Any) -> None: ... + def __delattr__(self, flag_name: str) -> None: ... + def set_default(self, name: str, value: Any) -> None: ... + SetDefault = set_default + def __contains__(self, name: str) -> bool: ... + has_key = __contains__ + def __iter__(self) -> Iterable[str]: ... + def __call__(self, argv: List[str], known_only: bool = ...) -> List[str]: ... + def reset(self) -> None: ... + Reset = reset + def RegisteredFlags(self) -> List[str]: ... + def flag_values_dict(self) -> Dict[str, Any]: ... + FlagValuesDict = flag_values_dict + def __str__(self) -> str: ... + def GetHelp(self, prefix: str = ...) -> str: ... + def module_help(self, module: Union[ModuleType, str]) -> str: ... + ModuleHelp = module_help + def main_module_help(self) -> str: ... + MainModuleHelp = main_module_help + def get(self, name: str, default: Any) -> Any: ... + def ShortestUniquePrefixes(self, fl: Dict[str, Flag]) -> Dict[str, str]: ... + def ExtractFilename(self, flagfile_str: str) -> str: ... + def read_flags_from_files(self, argv: List[str], force_gnu: bool = ...) -> List[str]: ... + ReadFlagsFromFiles = read_flags_from_files + def flags_into_string(self) -> str: ... + FlagsIntoString = flags_into_string + def append_flags_into_file(self, filename: str) -> None: ... + AppendFlagsIntoFile = append_flags_into_file + def write_help_in_xml_format(self, outfile: IO[str] = ...) -> None: ... + WriteHelpInXMLFormat = write_help_in_xml_format + # TODO validator: gflags_validators.Validator + def AddValidator(self, validator: Any) -> None: ... + def is_parsed(self) -> bool: ... + IsParsed = is_parsed + +FLAGS: FlagValues + +class Flag: + name: str + default: Any + default_as_str: str + value: Any + help: str + short_name: str + boolean = False + present = False + parser: ArgumentParser + serializer: ArgumentSerializer + allow_override = False + def __init__( + self, + parser: ArgumentParser, + serializer: ArgumentSerializer, + name: str, + default: Optional[str], + help_string: str, + short_name: str = ..., + boolean: bool = ..., + allow_override: bool = ..., + ) -> None: ... + def Parse(self, argument: Any) -> Any: ... + def Unparse(self) -> None: ... + def Serialize(self) -> str: ... + def SetDefault(self, value: Any) -> None: ... + def Type(self) -> str: ... + def WriteInfoInXMLFormat(self, outfile: IO[str], module_name: str, is_key: bool = ..., indent: str = ...) -> None: ... + +class ArgumentParser(object): + syntactic_help: str + # TODO what is this + def parse(self, argument: Any) -> Any: ... + Parser = parse + def flag_type(self) -> str: ... + Type = flag_type + def WriteCustomInfoInXMLFormat(self, outfile: IO[str], indent: str) -> None: ... + +class ArgumentSerializer: + def Serialize(self, value: Any) -> Text: ... + +class ListSerializer(ArgumentSerializer): + def __init__(self, list_sep: str) -> None: ... + def Serialize(self, value: List[Any]) -> str: ... + +def register_validator( + flag_name: str, checker: Callable[[Any], bool], message: str = ..., flag_values: FlagValues = ... +) -> None: ... + +RegisterValidator = register_validator + +def mark_flag_as_required(flag_name: str, flag_values: FlagValues = ...) -> None: ... + +MarkFlagAsRequired = mark_flag_as_required + +def mark_flags_as_required(flag_names: Iterable[str], flag_values: FlagValues = ...) -> None: ... + +MarkFlagsAsRequired = mark_flags_as_required + +def mark_flags_as_mutual_exclusive(flag_names: Iterable[str], required: bool = ..., flag_values: FlagValues = ...) -> None: ... + +MarkFlagsAsMutualExclusive = mark_flags_as_mutual_exclusive + +def DEFINE( + parser: ArgumentParser, + name: str, + default: Any, + help: str, + flag_values: FlagValues = ..., + serializer: ArgumentSerializer = ..., + **args: Any, +) -> None: ... +def DEFINE_flag(flag: Flag, flag_values: FlagValues = ...) -> None: ... +def declare_key_flag(flag_name: str, flag_values: FlagValues = ...) -> None: ... + +DECLARE_key_flag = declare_key_flag + +def adopt_module_key_flags(module: ModuleType, flag_values: FlagValues = ...) -> None: ... + +ADOPT_module_key_flags = adopt_module_key_flags + +def DEFINE_string(name: str, default: Optional[str], help: str, flag_values: FlagValues = ..., **args: Any) -> None: ... + +class BooleanParser(ArgumentParser): + def Convert(self, argument: Any) -> bool: ... + def Parse(self, argument: Any) -> bool: ... + +class BooleanFlag(Flag): + def __init__(self, name: str, default: Optional[bool], help: str, short_name: str = ..., **args: Any) -> None: ... + +def DEFINE_boolean(name: str, default: Optional[bool], help: str, flag_values: FlagValues = ..., **args: Any) -> None: ... + +DEFINE_bool = DEFINE_boolean + +class HelpFlag(BooleanFlag): + def __init__(self) -> None: ... + def Parse(self, arg: Any) -> None: ... + +class HelpXMLFlag(BooleanFlag): + def __init__(self) -> None: ... + def Parse(self, arg: Any) -> None: ... + +class HelpshortFlag(BooleanFlag): + def __init__(self) -> None: ... + def Parse(self, arg: Any) -> None: ... + +class NumericParser(ArgumentParser): + def IsOutsideBounds(self, val: float) -> bool: ... + def Parse(self, argument: Any) -> float: ... + def WriteCustomInfoInXMLFormat(self, outfile: IO[str], indent: str) -> None: ... + def Convert(self, argument: Any) -> Any: ... + +class FloatParser(NumericParser): + number_article: str + number_name: str + syntactic_help: str + def __init__(self, lower_bound: float = ..., upper_bound: float = ...) -> None: ... + def Convert(self, argument: Any) -> float: ... + +def DEFINE_float( + name: str, + default: Optional[float], + help: str, + lower_bound: float = ..., + upper_bound: float = ..., + flag_values: FlagValues = ..., + **args: Any, +) -> None: ... + +class IntegerParser(NumericParser): + number_article: str + number_name: str + syntactic_help: str + def __init__(self, lower_bound: int = ..., upper_bound: int = ...) -> None: ... + def Convert(self, argument: Any) -> int: ... + +def DEFINE_integer( + name: str, + default: Optional[int], + help: str, + lower_bound: int = ..., + upper_bound: int = ..., + flag_values: FlagValues = ..., + **args: Any, +) -> None: ... + +class EnumParser(ArgumentParser): + def __init__(self, enum_values: List[str]) -> None: ... + def Parse(self, argument: Any) -> Any: ... + +class EnumFlag(Flag): + def __init__( + self, name: str, default: Optional[str], help: str, enum_values: List[str], short_name: str, **args: Any + ) -> None: ... + +def DEFINE_enum( + name: str, default: Optional[str], enum_values: Iterable[str], help: str, flag_values: FlagValues = ..., **args: Any +) -> None: ... + +class BaseListParser(ArgumentParser): + def __init__(self, token: str = ..., name: str = ...) -> None: ... + def Parse(self, argument: Any) -> List[Any]: ... + +class ListParser(BaseListParser): + def __init__(self) -> None: ... + +class WhitespaceSeparatedListParser(BaseListParser): + def __init__(self) -> None: ... + +def DEFINE_list(name: str, default: Optional[List[str]], help: str, flag_values: FlagValues = ..., **args: Any) -> None: ... +def DEFINE_spaceseplist( + name: str, default: Optional[List[str]], help: str, flag_values: FlagValues = ..., **args: Any +) -> None: ... + +class MultiFlag(Flag): + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def Parse(self, arguments: Any) -> None: ... + def Serialize(self) -> str: ... + +def DEFINE_multi_string( + name: str, default: Optional[Union[str, List[str]]], help: str, flag_values: FlagValues = ..., **args: Any +) -> None: ... + +DEFINE_multistring = DEFINE_multi_string + +def DEFINE_multi_integer( + name: str, + default: Optional[Union[int, List[int]]], + help: str, + lower_bound: int = ..., + upper_bound: int = ..., + flag_values: FlagValues = ..., + **args: Any, +) -> None: ... + +DEFINE_multi_int = DEFINE_multi_integer + +def DEFINE_multi_float( + name: str, + default: Optional[Union[float, List[float]]], + help: str, + lower_bound: float = ..., + upper_bound: float = ..., + flag_values: FlagValues = ..., + **args: Any, +) -> None: ... +def DEFINE_multi_enum( + name: str, + default: Optional[Union[Sequence[str], str]], + enum_values: Sequence[str], + help: str, + flag_values: FlagValues = ..., + case_sensitive: bool = ..., + **args: Any, +) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/google/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/google/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/itsdangerous.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/itsdangerous.pyi new file mode 100644 index 0000000000000000000000000000000000000000..84f596116d0e7f7a31375d8858153785156cf479 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/itsdangerous.pyi @@ -0,0 +1,184 @@ +from datetime import datetime +from typing import IO, Any, Callable, Generator, Mapping, MutableMapping, Optional, Text, Tuple, Union + +_serializer = Any # must be an object that has "dumps" and "loads" attributes (e.g. the json module) + +def want_bytes(s: Union[Text, bytes], encoding: Text = ..., errors: Text = ...) -> bytes: ... + +class BadData(Exception): + message: str + def __init__(self, message: str) -> None: ... + +class BadPayload(BadData): + original_error: Optional[Exception] + def __init__(self, message: str, original_error: Optional[Exception] = ...) -> None: ... + +class BadSignature(BadData): + payload: Optional[Any] + def __init__(self, message: str, payload: Optional[Any] = ...) -> None: ... + +class BadTimeSignature(BadSignature): + date_signed: Optional[int] + def __init__(self, message: str, payload: Optional[Any] = ..., date_signed: Optional[int] = ...) -> None: ... + +class BadHeader(BadSignature): + header: Any + original_error: Any + def __init__( + self, message: str, payload: Optional[Any] = ..., header: Optional[Any] = ..., original_error: Optional[Any] = ... + ) -> None: ... + +class SignatureExpired(BadTimeSignature): ... + +def base64_encode(string: Union[Text, bytes]) -> bytes: ... +def base64_decode(string: Union[Text, bytes]) -> bytes: ... + +class SigningAlgorithm(object): + def get_signature(self, key: bytes, value: bytes) -> bytes: ... + def verify_signature(self, key: bytes, value: bytes, sig: bytes) -> bool: ... + +class NoneAlgorithm(SigningAlgorithm): + def get_signature(self, key: bytes, value: bytes) -> bytes: ... + +class HMACAlgorithm(SigningAlgorithm): + default_digest_method: Callable[..., Any] + digest_method: Callable[..., Any] + def __init__(self, digest_method: Optional[Callable[..., Any]] = ...) -> None: ... + def get_signature(self, key: bytes, value: bytes) -> bytes: ... + +class Signer(object): + default_digest_method: Callable[..., Any] = ... + default_key_derivation: str = ... + + secret_key: bytes + sep: bytes + salt: Union[Text, bytes] + key_derivation: str + digest_method: Callable[..., Any] + algorithm: SigningAlgorithm + def __init__( + self, + secret_key: Union[Text, bytes], + salt: Optional[Union[Text, bytes]] = ..., + sep: Optional[Union[Text, bytes]] = ..., + key_derivation: Optional[str] = ..., + digest_method: Optional[Callable[..., Any]] = ..., + algorithm: Optional[SigningAlgorithm] = ..., + ) -> None: ... + def derive_key(self) -> bytes: ... + def get_signature(self, value: Union[Text, bytes]) -> bytes: ... + def sign(self, value: Union[Text, bytes]) -> bytes: ... + def verify_signature(self, value: bytes, sig: Union[Text, bytes]) -> bool: ... + def unsign(self, signed_value: Union[Text, bytes]) -> bytes: ... + def validate(self, signed_value: Union[Text, bytes]) -> bool: ... + +class TimestampSigner(Signer): + def get_timestamp(self) -> int: ... + def timestamp_to_datetime(self, ts: float) -> datetime: ... + def sign(self, value: Union[Text, bytes]) -> bytes: ... + def unsign( + self, value: Union[Text, bytes], max_age: Optional[int] = ..., return_timestamp: bool = ... + ) -> Any: ... # morally -> Union[bytes, Tuple[bytes, datetime]] + def validate(self, signed_value: Union[Text, bytes], max_age: Optional[int] = ...) -> bool: ... + +class Serializer(object): + default_serializer: _serializer = ... + default_signer: Callable[..., Signer] = ... + + secret_key: bytes + salt: bytes + serializer: _serializer + is_text_serializer: bool + signer: Callable[..., Signer] + signer_kwargs: MutableMapping[str, Any] + def __init__( + self, + secret_key: Union[Text, bytes], + salt: Optional[Union[Text, bytes]] = ..., + serializer: Optional[_serializer] = ..., + signer: Optional[Callable[..., Signer]] = ..., + signer_kwargs: Optional[MutableMapping[str, Any]] = ..., + ) -> None: ... + def load_payload(self, payload: bytes, serializer: Optional[_serializer] = ...) -> Any: ... + def dump_payload(self, obj: Any) -> bytes: ... + def make_signer(self, salt: Optional[Union[Text, bytes]] = ...) -> Signer: ... + def iter_unsigners(self, salt: Optional[Union[Text, bytes]] = ...) -> Generator[Any, None, None]: ... + def dumps(self, obj: Any, salt: Optional[Union[Text, bytes]] = ...) -> Any: ... # morally -> Union[str, bytes] + def dump(self, obj: Any, f: IO[Any], salt: Optional[Union[Text, bytes]] = ...) -> None: ... + def loads(self, s: Union[Text, bytes], salt: Optional[Union[Text, bytes]] = ...) -> Any: ... + def load(self, f: IO[Any], salt: Optional[Union[Text, bytes]] = ...) -> Any: ... + def loads_unsafe(self, s: Union[Text, bytes], salt: Optional[Union[Text, bytes]] = ...) -> Tuple[bool, Optional[Any]]: ... + def load_unsafe(self, f: IO[Any], salt: Optional[Union[Text, bytes]] = ...) -> Tuple[bool, Optional[Any]]: ... + +class TimedSerializer(Serializer): + def loads( + self, + s: Union[Text, bytes], + salt: Optional[Union[Text, bytes]] = ..., + max_age: Optional[int] = ..., + return_timestamp: bool = ..., + ) -> Any: ... # morally -> Union[Any, Tuple[Any, datetime]] + def loads_unsafe( + self, s: Union[Text, bytes], salt: Optional[Union[Text, bytes]] = ..., max_age: Optional[int] = ... + ) -> Tuple[bool, Any]: ... + +class JSONWebSignatureSerializer(Serializer): + jws_algorithms: MutableMapping[Text, SigningAlgorithm] = ... + default_algorithm: Text = ... + default_serializer: Any = ... + + algorithm_name: Text + algorithm: SigningAlgorithm + def __init__( + self, + secret_key: Union[Text, bytes], + salt: Optional[Union[Text, bytes]] = ..., + serializer: Optional[_serializer] = ..., + signer: Optional[Callable[..., Signer]] = ..., + signer_kwargs: Optional[MutableMapping[str, Any]] = ..., + algorithm_name: Optional[Text] = ..., + ) -> None: ... + def load_payload( + self, payload: Union[Text, bytes], serializer: Optional[_serializer] = ..., return_header: bool = ... + ) -> Any: ... # morally -> Union[Any, Tuple[Any, MutableMapping[str, Any]]] + def dump_payload(self, header: Mapping[str, Any], obj: Any) -> bytes: ... # type: ignore + def make_algorithm(self, algorithm_name: Text) -> SigningAlgorithm: ... + def make_signer(self, salt: Optional[Union[Text, bytes]] = ..., algorithm: SigningAlgorithm = ...) -> Signer: ... + def make_header(self, header_fields: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: ... + def dumps( + self, obj: Any, salt: Optional[Union[Text, bytes]] = ..., header_fields: Optional[Mapping[str, Any]] = ... + ) -> bytes: ... + def loads( + self, s: Union[Text, bytes], salt: Optional[Union[Text, bytes]] = ..., return_header: bool = ... + ) -> Any: ... # morally -> Union[Any, Tuple[Any, MutableMapping[str, Any]]] + def loads_unsafe( + self, s: Union[Text, bytes], salt: Optional[Union[Text, bytes]] = ..., return_header: bool = ... + ) -> Tuple[bool, Any]: ... + +class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer): + DEFAULT_EXPIRES_IN: int = ... + expires_in: int + def __init__( + self, + secret_key: Union[Text, bytes], + expires_in: Optional[int] = ..., + salt: Optional[Union[Text, bytes]] = ..., + serializer: Optional[_serializer] = ..., + signer: Optional[Callable[..., Signer]] = ..., + signer_kwargs: Optional[MutableMapping[str, Any]] = ..., + algorithm_name: Optional[Text] = ..., + ) -> None: ... + def make_header(self, header_fields: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: ... + def loads( + self, s: Union[Text, bytes], salt: Optional[Union[Text, bytes]] = ..., return_header: bool = ... + ) -> Any: ... # morally -> Union[Any, Tuple[Any, MutableMapping[str, Any]]] + def get_issue_date(self, header: Mapping[str, Any]) -> Optional[datetime]: ... + def now(self) -> int: ... + +class _URLSafeSerializerMixin(object): + default_serializer: _serializer = ... + def load_payload(self, payload: bytes, serializer: Optional[_serializer] = ...) -> Any: ... + def dump_payload(self, obj: Any) -> bytes: ... + +class URLSafeSerializer(_URLSafeSerializerMixin, Serializer): ... +class URLSafeTimedSerializer(_URLSafeSerializerMixin, TimedSerializer): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1121f8dee408fe404ab4bfaaabc1aecdcf6bcccb --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/__init__.pyi @@ -0,0 +1,45 @@ +from jinja2.bccache import ( + BytecodeCache as BytecodeCache, + FileSystemBytecodeCache as FileSystemBytecodeCache, + MemcachedBytecodeCache as MemcachedBytecodeCache, +) +from jinja2.environment import Environment as Environment, Template as Template +from jinja2.exceptions import ( + TemplateAssertionError as TemplateAssertionError, + TemplateError as TemplateError, + TemplateNotFound as TemplateNotFound, + TemplatesNotFound as TemplatesNotFound, + TemplateSyntaxError as TemplateSyntaxError, + UndefinedError as UndefinedError, +) +from jinja2.filters import ( + contextfilter as contextfilter, + environmentfilter as environmentfilter, + evalcontextfilter as evalcontextfilter, +) +from jinja2.loaders import ( + BaseLoader as BaseLoader, + ChoiceLoader as ChoiceLoader, + DictLoader as DictLoader, + FileSystemLoader as FileSystemLoader, + FunctionLoader as FunctionLoader, + ModuleLoader as ModuleLoader, + PackageLoader as PackageLoader, + PrefixLoader as PrefixLoader, +) +from jinja2.runtime import ( + DebugUndefined as DebugUndefined, + StrictUndefined as StrictUndefined, + Undefined as Undefined, + make_logging_undefined as make_logging_undefined, +) +from jinja2.utils import ( + Markup as Markup, + clear_caches as clear_caches, + contextfunction as contextfunction, + environmentfunction as environmentfunction, + escape as escape, + evalcontextfunction as evalcontextfunction, + is_undefined as is_undefined, + select_autoescape as select_autoescape, +) diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/_compat.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/_compat.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4edf9286b48f27f0b0c9cd012db525ea6ad25cbc --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/_compat.pyi @@ -0,0 +1,36 @@ +import sys +from typing import Any, Optional + +if sys.version_info >= (3,): + from urllib.parse import quote_from_bytes + + url_quote = quote_from_bytes +else: + import urllib + + url_quote = urllib.quote + +PY2: Any +PYPY: Any +unichr: Any +range_type: Any +text_type: Any +string_types: Any +integer_types: Any +iterkeys: Any +itervalues: Any +iteritems: Any +NativeStringIO: Any + +def reraise(tp, value, tb: Optional[Any] = ...): ... + +ifilter: Any +imap: Any +izip: Any +intern: Any +implements_iterator: Any +implements_to_string: Any +encode_filename: Any +get_next: Any + +def with_metaclass(meta, *bases): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/_stringdefs.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/_stringdefs.pyi new file mode 100644 index 0000000000000000000000000000000000000000..060f8881c3a54b78a448f849eed67eecbe90f81d --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/_stringdefs.pyi @@ -0,0 +1,40 @@ +from typing import Any + +Cc: str +Cf: str +Cn: str +Co: str +Cs: Any +Ll: str +Lm: str +Lo: str +Lt: str +Lu: str +Mc: str +Me: str +Mn: str +Nd: str +Nl: str +No: str +Pc: str +Pd: str +Pe: str +Pf: str +Pi: str +Po: str +Ps: str +Sc: str +Sk: str +Sm: str +So: str +Zl: str +Zp: str +Zs: str +cats: Any + +def combine(*args): ... + +xid_start: str +xid_continue: str + +def allexcept(*args): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/bccache.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/bccache.pyi new file mode 100644 index 0000000000000000000000000000000000000000..754736a6a860417c569bfee9c78b575b519aacea --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/bccache.pyi @@ -0,0 +1,44 @@ +from typing import Any, Optional + +marshal_dump: Any +marshal_load: Any +bc_version: int +bc_magic: Any + +class Bucket: + environment: Any + key: Any + checksum: Any + def __init__(self, environment, key, checksum) -> None: ... + code: Any + def reset(self): ... + def load_bytecode(self, f): ... + def write_bytecode(self, f): ... + def bytecode_from_string(self, string): ... + def bytecode_to_string(self): ... + +class BytecodeCache: + def load_bytecode(self, bucket): ... + def dump_bytecode(self, bucket): ... + def clear(self): ... + def get_cache_key(self, name, filename: Optional[Any] = ...): ... + def get_source_checksum(self, source): ... + def get_bucket(self, environment, name, filename, source): ... + def set_bucket(self, bucket): ... + +class FileSystemBytecodeCache(BytecodeCache): + directory: Any + pattern: Any + def __init__(self, directory: Optional[Any] = ..., pattern: str = ...) -> None: ... + def load_bytecode(self, bucket): ... + def dump_bytecode(self, bucket): ... + def clear(self): ... + +class MemcachedBytecodeCache(BytecodeCache): + client: Any + prefix: Any + timeout: Any + ignore_memcache_errors: Any + def __init__(self, client, prefix: str = ..., timeout: Optional[Any] = ..., ignore_memcache_errors: bool = ...) -> None: ... + def load_bytecode(self, bucket): ... + def dump_bytecode(self, bucket): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/compiler.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/compiler.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ce29f720928bc0d8b3f06234f8d1d2315061280e --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/compiler.pyi @@ -0,0 +1,177 @@ +from keyword import iskeyword as is_python_keyword +from typing import Any, Optional + +from jinja2.visitor import NodeVisitor + +operators: Any +dict_item_iter: str + +unoptimize_before_dead_code: bool + +def generate(node, environment, name, filename, stream: Optional[Any] = ..., defer_init: bool = ...): ... +def has_safe_repr(value): ... +def find_undeclared(nodes, names): ... + +class Identifiers: + declared: Any + outer_undeclared: Any + undeclared: Any + declared_locally: Any + declared_parameter: Any + def __init__(self) -> None: ... + def add_special(self, name): ... + def is_declared(self, name): ... + def copy(self): ... + +class Frame: + eval_ctx: Any + identifiers: Any + toplevel: bool + rootlevel: bool + require_output_check: Any + buffer: Any + block: Any + assigned_names: Any + parent: Any + def __init__(self, eval_ctx, parent: Optional[Any] = ...) -> None: ... + def copy(self): ... + def inspect(self, nodes): ... + def find_shadowed(self, extra: Any = ...): ... + def inner(self): ... + def soft(self): ... + __copy__: Any + +class VisitorExit(RuntimeError): ... + +class DependencyFinderVisitor(NodeVisitor): + filters: Any + tests: Any + def __init__(self) -> None: ... + def visit_Filter(self, node): ... + def visit_Test(self, node): ... + def visit_Block(self, node): ... + +class UndeclaredNameVisitor(NodeVisitor): + names: Any + undeclared: Any + def __init__(self, names) -> None: ... + def visit_Name(self, node): ... + def visit_Block(self, node): ... + +class FrameIdentifierVisitor(NodeVisitor): + identifiers: Any + def __init__(self, identifiers) -> None: ... + def visit_Name(self, node): ... + def visit_If(self, node): ... + def visit_Macro(self, node): ... + def visit_Import(self, node): ... + def visit_FromImport(self, node): ... + def visit_Assign(self, node): ... + def visit_For(self, node): ... + def visit_CallBlock(self, node): ... + def visit_FilterBlock(self, node): ... + def visit_AssignBlock(self, node): ... + def visit_Scope(self, node): ... + def visit_Block(self, node): ... + +class CompilerExit(Exception): ... + +class CodeGenerator(NodeVisitor): + environment: Any + name: Any + filename: Any + stream: Any + created_block_context: bool + defer_init: Any + import_aliases: Any + blocks: Any + extends_so_far: int + has_known_extends: bool + code_lineno: int + tests: Any + filters: Any + debug_info: Any + def __init__(self, environment, name, filename, stream: Optional[Any] = ..., defer_init: bool = ...) -> None: ... + def fail(self, msg, lineno): ... + def temporary_identifier(self): ... + def buffer(self, frame): ... + def return_buffer_contents(self, frame): ... + def indent(self): ... + def outdent(self, step: int = ...): ... + def start_write(self, frame, node: Optional[Any] = ...): ... + def end_write(self, frame): ... + def simple_write(self, s, frame, node: Optional[Any] = ...): ... + def blockvisit(self, nodes, frame): ... + def write(self, x): ... + def writeline(self, x, node: Optional[Any] = ..., extra: int = ...): ... + def newline(self, node: Optional[Any] = ..., extra: int = ...): ... + def signature(self, node, frame, extra_kwargs: Optional[Any] = ...): ... + def pull_locals(self, frame): ... + def pull_dependencies(self, nodes): ... + def unoptimize_scope(self, frame): ... + def push_scope(self, frame, extra_vars: Any = ...): ... + def pop_scope(self, aliases, frame): ... + def function_scoping(self, node, frame, children: Optional[Any] = ..., find_special: bool = ...): ... + def macro_body(self, node, frame, children: Optional[Any] = ...): ... + def macro_def(self, node, frame): ... + def position(self, node): ... + def visit_Template(self, node, frame: Optional[Any] = ...): ... + def visit_Block(self, node, frame): ... + def visit_Extends(self, node, frame): ... + def visit_Include(self, node, frame): ... + def visit_Import(self, node, frame): ... + def visit_FromImport(self, node, frame): ... + def visit_For(self, node, frame): ... + def visit_If(self, node, frame): ... + def visit_Macro(self, node, frame): ... + def visit_CallBlock(self, node, frame): ... + def visit_FilterBlock(self, node, frame): ... + def visit_ExprStmt(self, node, frame): ... + def visit_Output(self, node, frame): ... + def make_assignment_frame(self, frame): ... + def export_assigned_vars(self, frame, assignment_frame): ... + def visit_Assign(self, node, frame): ... + def visit_AssignBlock(self, node, frame): ... + def visit_Name(self, node, frame): ... + def visit_Const(self, node, frame): ... + def visit_TemplateData(self, node, frame): ... + def visit_Tuple(self, node, frame): ... + def visit_List(self, node, frame): ... + def visit_Dict(self, node, frame): ... + def binop(self, interceptable: bool = ...): ... + def uaop(self, interceptable: bool = ...): ... + visit_Add: Any + visit_Sub: Any + visit_Mul: Any + visit_Div: Any + visit_FloorDiv: Any + visit_Pow: Any + visit_Mod: Any + visit_And: Any + visit_Or: Any + visit_Pos: Any + visit_Neg: Any + visit_Not: Any + def visit_Concat(self, node, frame): ... + def visit_Compare(self, node, frame): ... + def visit_Operand(self, node, frame): ... + def visit_Getattr(self, node, frame): ... + def visit_Getitem(self, node, frame): ... + def visit_Slice(self, node, frame): ... + def visit_Filter(self, node, frame): ... + def visit_Test(self, node, frame): ... + def visit_CondExpr(self, node, frame): ... + def visit_Call(self, node, frame, forward_caller: bool = ...): ... + def visit_Keyword(self, node, frame): ... + def visit_MarkSafe(self, node, frame): ... + def visit_MarkSafeIfAutoescape(self, node, frame): ... + def visit_EnvironmentAttribute(self, node, frame): ... + def visit_ExtensionAttribute(self, node, frame): ... + def visit_ImportedName(self, node, frame): ... + def visit_InternalName(self, node, frame): ... + def visit_ContextReference(self, node, frame): ... + def visit_Continue(self, node, frame): ... + def visit_Break(self, node, frame): ... + def visit_Scope(self, node, frame): ... + def visit_EvalContextModifier(self, node, frame): ... + def visit_ScopedEvalContextModifier(self, node, frame): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/constants.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/constants.pyi new file mode 100644 index 0000000000000000000000000000000000000000..55ea3ea5e4816e1ad0d9eca023b05f685707c31b --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/constants.pyi @@ -0,0 +1 @@ +LOREM_IPSUM_WORDS: str diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/debug.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/debug.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f495a4d99baf7521aa240f4ca894203b6d834189 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/debug.pyi @@ -0,0 +1,37 @@ +from typing import Any, Optional + +tproxy: Any +raise_helper: str + +class TracebackFrameProxy: + tb: Any + def __init__(self, tb) -> None: ... + @property + def tb_next(self): ... + def set_next(self, next): ... + @property + def is_jinja_frame(self): ... + def __getattr__(self, name): ... + +def make_frame_proxy(frame): ... + +class ProcessedTraceback: + exc_type: Any + exc_value: Any + frames: Any + def __init__(self, exc_type, exc_value, frames) -> None: ... + def render_as_text(self, limit: Optional[Any] = ...): ... + def render_as_html(self, full: bool = ...): ... + @property + def is_template_syntax_error(self): ... + @property + def exc_info(self): ... + @property + def standard_exc_info(self): ... + +def make_traceback(exc_info, source_hint: Optional[Any] = ...): ... +def translate_syntax_error(error, source: Optional[Any] = ...): ... +def translate_exception(exc_info, initial_skip: int = ...): ... +def fake_exc_info(exc_info, filename, lineno): ... + +tb_set_next: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/defaults.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/defaults.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e311f828836a1d9e22d5370d8ed299ddaf877526 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/defaults.pyi @@ -0,0 +1,22 @@ +from typing import Any, Dict, Optional + +from jinja2.filters import FILTERS +from jinja2.tests import TESTS + +DEFAULT_FILTERS = FILTERS +DEFAULT_TESTS = TESTS + +BLOCK_START_STRING: str +BLOCK_END_STRING: str +VARIABLE_START_STRING: str +VARIABLE_END_STRING: str +COMMENT_START_STRING: str +COMMENT_END_STRING: str +LINE_STATEMENT_PREFIX: Optional[str] +LINE_COMMENT_PREFIX: Optional[str] +TRIM_BLOCKS: bool +LSTRIP_BLOCKS: bool +NEWLINE_SEQUENCE: str +KEEP_TRAILING_NEWLINE: bool +DEFAULT_NAMESPACE: Dict[str, Any] +DEFAULT_POLICIES = Dict[str, Any] diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/environment.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/environment.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f09c1b5e713fe7744ea5e7c3c7187e3a424bfd18 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/environment.pyi @@ -0,0 +1,224 @@ +import sys +from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Text, Type, Union + +from .bccache import BytecodeCache +from .loaders import BaseLoader +from .runtime import Context, Undefined + +if sys.version_info >= (3, 6): + from typing import AsyncIterator, Awaitable + +def get_spontaneous_environment(*args): ... +def create_cache(size): ... +def copy_cache(cache): ... +def load_extensions(environment, extensions): ... + +class Environment: + sandboxed: bool + overlayed: bool + linked_to: Any + shared: bool + exception_handler: Any + exception_formatter: Any + code_generator_class: Any + context_class: Any + block_start_string: Text + block_end_string: Text + variable_start_string: Text + variable_end_string: Text + comment_start_string: Text + comment_end_string: Text + line_statement_prefix: Text + line_comment_prefix: Text + trim_blocks: bool + lstrip_blocks: Any + newline_sequence: Text + keep_trailing_newline: bool + undefined: Type[Undefined] + optimized: bool + finalize: Callable[..., Any] + autoescape: Any + filters: Any + tests: Any + globals: Dict[str, Any] + loader: BaseLoader + cache: Any + bytecode_cache: BytecodeCache + auto_reload: bool + extensions: List[Any] + def __init__( + self, + block_start_string: Text = ..., + block_end_string: Text = ..., + variable_start_string: Text = ..., + variable_end_string: Text = ..., + comment_start_string: Any = ..., + comment_end_string: Text = ..., + line_statement_prefix: Text = ..., + line_comment_prefix: Text = ..., + trim_blocks: bool = ..., + lstrip_blocks: bool = ..., + newline_sequence: Text = ..., + keep_trailing_newline: bool = ..., + extensions: List[Any] = ..., + optimized: bool = ..., + undefined: Type[Undefined] = ..., + finalize: Optional[Callable[..., Any]] = ..., + autoescape: Union[bool, Callable[[str], bool]] = ..., + loader: Optional[BaseLoader] = ..., + cache_size: int = ..., + auto_reload: bool = ..., + bytecode_cache: Optional[BytecodeCache] = ..., + enable_async: bool = ..., + ) -> None: ... + def add_extension(self, extension): ... + def extend(self, **attributes): ... + def overlay( + self, + block_start_string: Text = ..., + block_end_string: Text = ..., + variable_start_string: Text = ..., + variable_end_string: Text = ..., + comment_start_string: Any = ..., + comment_end_string: Text = ..., + line_statement_prefix: Text = ..., + line_comment_prefix: Text = ..., + trim_blocks: bool = ..., + lstrip_blocks: bool = ..., + extensions: List[Any] = ..., + optimized: bool = ..., + undefined: Type[Undefined] = ..., + finalize: Callable[..., Any] = ..., + autoescape: bool = ..., + loader: Optional[BaseLoader] = ..., + cache_size: int = ..., + auto_reload: bool = ..., + bytecode_cache: Optional[BytecodeCache] = ..., + ): ... + lexer: Any + def iter_extensions(self): ... + def getitem(self, obj, argument): ... + def getattr(self, obj, attribute): ... + def call_filter( + self, + name, + value, + args: Optional[Any] = ..., + kwargs: Optional[Any] = ..., + context: Optional[Any] = ..., + eval_ctx: Optional[Any] = ..., + ): ... + def call_test(self, name, value, args: Optional[Any] = ..., kwargs: Optional[Any] = ...): ... + def parse(self, source, name: Optional[Any] = ..., filename: Optional[Any] = ...): ... + def lex(self, source, name: Optional[Any] = ..., filename: Optional[Any] = ...): ... + def preprocess(self, source: Text, name: Optional[Any] = ..., filename: Optional[Any] = ...): ... + def compile( + self, source, name: Optional[Any] = ..., filename: Optional[Any] = ..., raw: bool = ..., defer_init: bool = ... + ): ... + def compile_expression(self, source: Text, undefined_to_none: bool = ...): ... + def compile_templates( + self, + target, + extensions: Optional[Any] = ..., + filter_func: Optional[Any] = ..., + zip: str = ..., + log_function: Optional[Any] = ..., + ignore_errors: bool = ..., + py_compile: bool = ..., + ): ... + def list_templates(self, extensions: Optional[Any] = ..., filter_func: Optional[Any] = ...): ... + def handle_exception(self, exc_info: Optional[Any] = ..., rendered: bool = ..., source_hint: Optional[Any] = ...): ... + def join_path(self, template: Union[Template, Text], parent: Text) -> Text: ... + def get_template( + self, name: Union[Template, Text], parent: Optional[Text] = ..., globals: Optional[Any] = ... + ) -> Template: ... + def select_template( + self, names: Sequence[Union[Template, Text]], parent: Optional[Text] = ..., globals: Optional[Dict[str, Any]] = ... + ) -> Template: ... + def get_or_select_template( + self, + template_name_or_list: Union[Union[Template, Text], Sequence[Union[Template, Text]]], + parent: Optional[Text] = ..., + globals: Optional[Dict[str, Any]] = ..., + ) -> Template: ... + def from_string( + self, source: Text, globals: Optional[Dict[str, Any]] = ..., template_class: Optional[Type[Template]] = ... + ) -> Template: ... + def make_globals(self, d: Optional[Dict[str, Any]]) -> Dict[str, Any]: ... + # Frequently added extensions are included here: + # from InternationalizationExtension: + def install_gettext_translations(self, translations: Any, newstyle: Optional[bool] = ...): ... + def install_null_translations(self, newstyle: Optional[bool] = ...): ... + def install_gettext_callables( + self, gettext: Callable[..., Any], ngettext: Callable[..., Any], newstyle: Optional[bool] = ... + ): ... + def uninstall_gettext_translations(self, translations: Any): ... + def extract_translations(self, source: Any, gettext_functions: Any): ... + newstyle_gettext: bool + +class Template: + name: Optional[str] + filename: Optional[str] + def __new__( + cls, + source, + block_start_string: Any = ..., + block_end_string: Any = ..., + variable_start_string: Any = ..., + variable_end_string: Any = ..., + comment_start_string: Any = ..., + comment_end_string: Any = ..., + line_statement_prefix: Any = ..., + line_comment_prefix: Any = ..., + trim_blocks: Any = ..., + lstrip_blocks: Any = ..., + newline_sequence: Any = ..., + keep_trailing_newline: Any = ..., + extensions: Any = ..., + optimized: bool = ..., + undefined: Any = ..., + finalize: Optional[Any] = ..., + autoescape: bool = ..., + ): ... + environment: Environment = ... + @classmethod + def from_code(cls, environment, code, globals, uptodate: Optional[Any] = ...): ... + @classmethod + def from_module_dict(cls, environment, module_dict, globals): ... + def render(self, *args, **kwargs) -> Text: ... + def stream(self, *args, **kwargs) -> TemplateStream: ... + def generate(self, *args, **kwargs) -> Iterator[Text]: ... + def new_context( + self, vars: Optional[Dict[str, Any]] = ..., shared: bool = ..., locals: Optional[Dict[str, Any]] = ... + ) -> Context: ... + def make_module( + self, vars: Optional[Dict[str, Any]] = ..., shared: bool = ..., locals: Optional[Dict[str, Any]] = ... + ) -> Context: ... + @property + def module(self) -> Any: ... + def get_corresponding_lineno(self, lineno): ... + @property + def is_up_to_date(self) -> bool: ... + @property + def debug_info(self): ... + if sys.version_info >= (3, 6): + def render_async(self, *args, **kwargs) -> Awaitable[Text]: ... + def generate_async(self, *args, **kwargs) -> AsyncIterator[Text]: ... + +class TemplateModule: + __name__: Any + def __init__(self, template, context) -> None: ... + def __html__(self): ... + +class TemplateExpression: + def __init__(self, template, undefined_to_none) -> None: ... + def __call__(self, *args, **kwargs): ... + +class TemplateStream: + def __init__(self, gen) -> None: ... + def dump(self, fp, encoding: Optional[Text] = ..., errors: Text = ...): ... + buffered: bool + def disable_buffering(self) -> None: ... + def enable_buffering(self, size: int = ...) -> None: ... + def __iter__(self): ... + def __next__(self): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/exceptions.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/exceptions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8f6be75c6d95dc11a7d9bbdbfca67f3b51953e46 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/exceptions.pyi @@ -0,0 +1,31 @@ +from typing import Any, Optional, Text + +class TemplateError(Exception): + def __init__(self, message: Optional[Text] = ...) -> None: ... + @property + def message(self): ... + def __unicode__(self): ... + +class TemplateNotFound(IOError, LookupError, TemplateError): + message: Any + name: Any + templates: Any + def __init__(self, name, message: Optional[Text] = ...) -> None: ... + +class TemplatesNotFound(TemplateNotFound): + templates: Any + def __init__(self, names: Any = ..., message: Optional[Text] = ...) -> None: ... + +class TemplateSyntaxError(TemplateError): + lineno: int + name: Text + filename: Text + source: Text + translated: bool + def __init__(self, message: Text, lineno: int, name: Optional[Text] = ..., filename: Optional[Text] = ...) -> None: ... + +class TemplateAssertionError(TemplateSyntaxError): ... +class TemplateRuntimeError(TemplateError): ... +class UndefinedError(TemplateRuntimeError): ... +class SecurityError(TemplateRuntimeError): ... +class FilterArgumentError(TemplateRuntimeError): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/ext.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/ext.pyi new file mode 100644 index 0000000000000000000000000000000000000000..cdf28922b201aa3ccbe88cce153db6cfc6162f45 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/ext.pyi @@ -0,0 +1,66 @@ +from typing import Any, Optional + +GETTEXT_FUNCTIONS: Any + +class ExtensionRegistry(type): + def __new__(cls, name, bases, d): ... + +class Extension: + tags: Any + priority: int + environment: Any + def __init__(self, environment) -> None: ... + def bind(self, environment): ... + def preprocess(self, source, name, filename: Optional[Any] = ...): ... + def filter_stream(self, stream): ... + def parse(self, parser): ... + def attr(self, name, lineno: Optional[Any] = ...): ... + def call_method( + self, + name, + args: Optional[Any] = ..., + kwargs: Optional[Any] = ..., + dyn_args: Optional[Any] = ..., + dyn_kwargs: Optional[Any] = ..., + lineno: Optional[Any] = ..., + ): ... + +class InternationalizationExtension(Extension): + tags: Any + def __init__(self, environment) -> None: ... + def parse(self, parser): ... + +class ExprStmtExtension(Extension): + tags: Any + def parse(self, parser): ... + +class LoopControlExtension(Extension): + tags: Any + def parse(self, parser): ... + +class WithExtension(Extension): + tags: Any + def parse(self, parser): ... + +class AutoEscapeExtension(Extension): + tags: Any + def parse(self, parser): ... + +def extract_from_ast(node, gettext_functions: Any = ..., babel_style: bool = ...): ... + +class _CommentFinder: + tokens: Any + comment_tags: Any + offset: int + last_lineno: int + def __init__(self, tokens, comment_tags) -> None: ... + def find_backwards(self, offset): ... + def find_comments(self, lineno): ... + +def babel_extract(fileobj, keywords, comment_tags, options): ... + +i18n: Any +do: Any +loopcontrols: Any +with_: Any +autoescape: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/filters.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/filters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8f0fb210aa4d672c33c268c3a24a86d8036e36f5 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/filters.pyi @@ -0,0 +1,56 @@ +from typing import Any, NamedTuple, Optional + +def contextfilter(f): ... +def evalcontextfilter(f): ... +def environmentfilter(f): ... +def make_attrgetter(environment, attribute): ... +def do_forceescape(value): ... +def do_urlencode(value): ... +def do_replace(eval_ctx, s, old, new, count: Optional[Any] = ...): ... +def do_upper(s): ... +def do_lower(s): ... +def do_xmlattr(_eval_ctx, d, autospace: bool = ...): ... +def do_capitalize(s): ... +def do_title(s): ... +def do_dictsort(value, case_sensitive: bool = ..., by: str = ...): ... +def do_sort(environment, value, reverse: bool = ..., case_sensitive: bool = ..., attribute: Optional[Any] = ...): ... +def do_default(value, default_value: str = ..., boolean: bool = ...): ... +def do_join(eval_ctx, value, d: str = ..., attribute: Optional[Any] = ...): ... +def do_center(value, width: int = ...): ... +def do_first(environment, seq): ... +def do_last(environment, seq): ... +def do_random(environment, seq): ... +def do_filesizeformat(value, binary: bool = ...): ... +def do_pprint(value, verbose: bool = ...): ... +def do_urlize(eval_ctx, value, trim_url_limit: Optional[Any] = ..., nofollow: bool = ..., target: Optional[Any] = ...): ... +def do_indent(s, width: int = ..., indentfirst: bool = ...): ... +def do_truncate(s, length: int = ..., killwords: bool = ..., end: str = ...): ... +def do_wordwrap(environment, s, width: int = ..., break_long_words: bool = ..., wrapstring: Optional[Any] = ...): ... +def do_wordcount(s): ... +def do_int(value, default: int = ..., base: int = ...): ... +def do_float(value, default: float = ...): ... +def do_format(value, *args, **kwargs): ... +def do_trim(value): ... +def do_striptags(value): ... +def do_slice(value, slices, fill_with: Optional[Any] = ...): ... +def do_batch(value, linecount, fill_with: Optional[Any] = ...): ... +def do_round(value, precision: int = ..., method: str = ...): ... +def do_groupby(environment, value, attribute): ... + +class _GroupTuple(NamedTuple): + grouper: Any + list: Any + +def do_sum(environment, iterable, attribute: Optional[Any] = ..., start: int = ...): ... +def do_list(value): ... +def do_mark_safe(value): ... +def do_mark_unsafe(value): ... +def do_reverse(value): ... +def do_attr(environment, obj, name): ... +def do_map(*args, **kwargs): ... +def do_select(*args, **kwargs): ... +def do_reject(*args, **kwargs): ... +def do_selectattr(*args, **kwargs): ... +def do_rejectattr(*args, **kwargs): ... + +FILTERS: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/lexer.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/lexer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..88d80e69c92d454ae42fe9f201f90b08b0452bd7 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/lexer.pyi @@ -0,0 +1,117 @@ +from typing import Any, Optional, Tuple + +whitespace_re: Any +string_re: Any +integer_re: Any +name_re: Any +float_re: Any +newline_re: Any +TOKEN_ADD: Any +TOKEN_ASSIGN: Any +TOKEN_COLON: Any +TOKEN_COMMA: Any +TOKEN_DIV: Any +TOKEN_DOT: Any +TOKEN_EQ: Any +TOKEN_FLOORDIV: Any +TOKEN_GT: Any +TOKEN_GTEQ: Any +TOKEN_LBRACE: Any +TOKEN_LBRACKET: Any +TOKEN_LPAREN: Any +TOKEN_LT: Any +TOKEN_LTEQ: Any +TOKEN_MOD: Any +TOKEN_MUL: Any +TOKEN_NE: Any +TOKEN_PIPE: Any +TOKEN_POW: Any +TOKEN_RBRACE: Any +TOKEN_RBRACKET: Any +TOKEN_RPAREN: Any +TOKEN_SEMICOLON: Any +TOKEN_SUB: Any +TOKEN_TILDE: Any +TOKEN_WHITESPACE: Any +TOKEN_FLOAT: Any +TOKEN_INTEGER: Any +TOKEN_NAME: Any +TOKEN_STRING: Any +TOKEN_OPERATOR: Any +TOKEN_BLOCK_BEGIN: Any +TOKEN_BLOCK_END: Any +TOKEN_VARIABLE_BEGIN: Any +TOKEN_VARIABLE_END: Any +TOKEN_RAW_BEGIN: Any +TOKEN_RAW_END: Any +TOKEN_COMMENT_BEGIN: Any +TOKEN_COMMENT_END: Any +TOKEN_COMMENT: Any +TOKEN_LINESTATEMENT_BEGIN: Any +TOKEN_LINESTATEMENT_END: Any +TOKEN_LINECOMMENT_BEGIN: Any +TOKEN_LINECOMMENT_END: Any +TOKEN_LINECOMMENT: Any +TOKEN_DATA: Any +TOKEN_INITIAL: Any +TOKEN_EOF: Any +operators: Any +reverse_operators: Any +operator_re: Any +ignored_tokens: Any +ignore_if_empty: Any + +def describe_token(token): ... +def describe_token_expr(expr): ... +def count_newlines(value): ... +def compile_rules(environment): ... + +class Failure: + message: Any + error_class: Any + def __init__(self, message, cls: Any = ...) -> None: ... + def __call__(self, lineno, filename): ... + +class Token(Tuple[int, Any, Any]): + lineno: Any + type: Any + value: Any + def __new__(cls, lineno, type, value): ... + def test(self, expr): ... + def test_any(self, *iterable): ... + +class TokenStreamIterator: + stream: Any + def __init__(self, stream) -> None: ... + def __iter__(self): ... + def __next__(self): ... + +class TokenStream: + name: Any + filename: Any + closed: bool + current: Any + def __init__(self, generator, name, filename) -> None: ... + def __iter__(self): ... + def __bool__(self): ... + __nonzero__: Any + eos: Any + def push(self, token): ... + def look(self): ... + def skip(self, n: int = ...): ... + def next_if(self, expr): ... + def skip_if(self, expr): ... + def __next__(self): ... + def close(self): ... + def expect(self, expr): ... + +def get_lexer(environment): ... + +class Lexer: + newline_sequence: Any + keep_trailing_newline: Any + rules: Any + def __init__(self, environment) -> None: ... + def tokenize(self, source, name: Optional[Any] = ..., filename: Optional[Any] = ..., state: Optional[Any] = ...): ... + def wrap(self, stream, name: Optional[Any] = ..., filename: Optional[Any] = ...): ... + def tokeniter(self, source, name, filename: Optional[Any] = ..., state: Optional[Any] = ...): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/loaders.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/loaders.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9f5cd943c0c139c4a7c298b64bf64a5512514fc5 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/loaders.pyi @@ -0,0 +1,80 @@ +import sys +from types import ModuleType +from typing import Any, Callable, Iterable, List, Optional, Text, Tuple, Union + +from .environment import Environment + +if sys.version_info >= (3, 7): + from os import PathLike + + _SearchPath = Union[Text, PathLike[str], Iterable[Union[Text, PathLike[str]]]] +else: + _SearchPath = Union[Text, Iterable[Text]] + +def split_template_path(template: Text) -> List[Text]: ... + +class BaseLoader: + has_source_access: bool + def get_source(self, environment, template): ... + def list_templates(self): ... + def load(self, environment, name, globals: Optional[Any] = ...): ... + +class FileSystemLoader(BaseLoader): + searchpath: Text + encoding: Any + followlinks: Any + def __init__(self, searchpath: _SearchPath, encoding: Text = ..., followlinks: bool = ...) -> None: ... + def get_source(self, environment: Environment, template: Text) -> Tuple[Text, Text, Callable[..., Any]]: ... + def list_templates(self): ... + +class PackageLoader(BaseLoader): + encoding: Text + manager: Any + filesystem_bound: Any + provider: Any + package_path: Any + def __init__(self, package_name: Text, package_path: Text = ..., encoding: Text = ...) -> None: ... + def get_source(self, environment: Environment, template: Text) -> Tuple[Text, Text, Callable[..., Any]]: ... + def list_templates(self): ... + +class DictLoader(BaseLoader): + mapping: Any + def __init__(self, mapping) -> None: ... + def get_source(self, environment: Environment, template: Text) -> Tuple[Text, Text, Callable[..., Any]]: ... + def list_templates(self): ... + +class FunctionLoader(BaseLoader): + load_func: Any + def __init__(self, load_func) -> None: ... + def get_source( + self, environment: Environment, template: Text + ) -> Tuple[Text, Optional[Text], Optional[Callable[..., Any]]]: ... + +class PrefixLoader(BaseLoader): + mapping: Any + delimiter: Any + def __init__(self, mapping, delimiter: str = ...) -> None: ... + def get_loader(self, template): ... + def get_source(self, environment: Environment, template: Text) -> Tuple[Text, Text, Callable[..., Any]]: ... + def load(self, environment, name, globals: Optional[Any] = ...): ... + def list_templates(self): ... + +class ChoiceLoader(BaseLoader): + loaders: Any + def __init__(self, loaders) -> None: ... + def get_source(self, environment: Environment, template: Text) -> Tuple[Text, Text, Callable[..., Any]]: ... + def load(self, environment, name, globals: Optional[Any] = ...): ... + def list_templates(self): ... + +class _TemplateModule(ModuleType): ... + +class ModuleLoader(BaseLoader): + has_source_access: bool + module: Any + package_name: Any + def __init__(self, path) -> None: ... + @staticmethod + def get_template_key(name): ... + @staticmethod + def get_module_filename(name): ... + def load(self, environment, name, globals: Optional[Any] = ...): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/meta.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/meta.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8f0b50bec203d98866874631e97b2e8cba4ac5bf --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/meta.pyi @@ -0,0 +1,12 @@ +from typing import Any + +from jinja2.compiler import CodeGenerator + +class TrackingCodeGenerator(CodeGenerator): + undeclared_identifiers: Any + def __init__(self, environment) -> None: ... + def write(self, x): ... + def pull_locals(self, frame): ... + +def find_undeclared_variables(ast): ... +def find_referenced_templates(ast): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/nodes.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/nodes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..84871975d3e2c5d4ca8d23b989eb4cb427066bcd --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/nodes.pyi @@ -0,0 +1,255 @@ +import typing +from typing import Any, Optional + +class Impossible(Exception): ... + +class NodeType(type): + def __new__(cls, name, bases, d): ... + +class EvalContext: + environment: Any + autoescape: Any + volatile: bool + def __init__(self, environment, template_name: Optional[Any] = ...) -> None: ... + def save(self): ... + def revert(self, old): ... + +def get_eval_context(node, ctx): ... + +class Node: + fields: Any + attributes: Any + abstract: bool + def __init__(self, *fields, **attributes) -> None: ... + def iter_fields(self, exclude: Optional[Any] = ..., only: Optional[Any] = ...): ... + def iter_child_nodes(self, exclude: Optional[Any] = ..., only: Optional[Any] = ...): ... + def find(self, node_type): ... + def find_all(self, node_type): ... + def set_ctx(self, ctx): ... + def set_lineno(self, lineno, override: bool = ...): ... + def set_environment(self, environment): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + __hash__: Any + +class Stmt(Node): + abstract: bool + +class Helper(Node): + abstract: bool + +class Template(Node): + fields: Any + +class Output(Stmt): + fields: Any + +class Extends(Stmt): + fields: Any + +class For(Stmt): + fields: Any + +class If(Stmt): + fields: Any + +class Macro(Stmt): + fields: Any + name: str + args: typing.List[Any] + defaults: typing.List[Any] + body: typing.List[Any] + +class CallBlock(Stmt): + fields: Any + +class FilterBlock(Stmt): + fields: Any + +class Block(Stmt): + fields: Any + +class Include(Stmt): + fields: Any + +class Import(Stmt): + fields: Any + +class FromImport(Stmt): + fields: Any + +class ExprStmt(Stmt): + fields: Any + +class Assign(Stmt): + fields: Any + +class AssignBlock(Stmt): + fields: Any + +class Expr(Node): + abstract: bool + def as_const(self, eval_ctx: Optional[Any] = ...): ... + def can_assign(self): ... + +class BinExpr(Expr): + fields: Any + operator: Any + abstract: bool + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class UnaryExpr(Expr): + fields: Any + operator: Any + abstract: bool + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Name(Expr): + fields: Any + def can_assign(self): ... + +class Literal(Expr): + abstract: bool + +class Const(Literal): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + @classmethod + def from_untrusted(cls, value, lineno: Optional[Any] = ..., environment: Optional[Any] = ...): ... + +class TemplateData(Literal): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Tuple(Literal): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + def can_assign(self): ... + +class List(Literal): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Dict(Literal): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Pair(Helper): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Keyword(Helper): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class CondExpr(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Filter(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Test(Expr): + fields: Any + +class Call(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Getitem(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + def can_assign(self): ... + +class Getattr(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + def can_assign(self): ... + +class Slice(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Concat(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Compare(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Operand(Helper): + fields: Any + +class Mul(BinExpr): + operator: str + +class Div(BinExpr): + operator: str + +class FloorDiv(BinExpr): + operator: str + +class Add(BinExpr): + operator: str + +class Sub(BinExpr): + operator: str + +class Mod(BinExpr): + operator: str + +class Pow(BinExpr): + operator: str + +class And(BinExpr): + operator: str + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Or(BinExpr): + operator: str + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class Not(UnaryExpr): + operator: str + +class Neg(UnaryExpr): + operator: str + +class Pos(UnaryExpr): + operator: str + +class EnvironmentAttribute(Expr): + fields: Any + +class ExtensionAttribute(Expr): + fields: Any + +class ImportedName(Expr): + fields: Any + +class InternalName(Expr): + fields: Any + def __init__(self) -> None: ... + +class MarkSafe(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class MarkSafeIfAutoescape(Expr): + fields: Any + def as_const(self, eval_ctx: Optional[Any] = ...): ... + +class ContextReference(Expr): ... +class Continue(Stmt): ... +class Break(Stmt): ... + +class Scope(Stmt): + fields: Any + +class EvalContextModifier(Stmt): + fields: Any + +class ScopedEvalContextModifier(EvalContextModifier): + fields: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/optimizer.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/optimizer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d0056acf908e74791bf47152c23ba2dab42946de --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/optimizer.pyi @@ -0,0 +1,30 @@ +from typing import Any + +from jinja2.visitor import NodeTransformer + +def optimize(node, environment): ... + +class Optimizer(NodeTransformer): + environment: Any + def __init__(self, environment) -> None: ... + def visit_If(self, node): ... + def fold(self, node): ... + visit_Add: Any + visit_Sub: Any + visit_Mul: Any + visit_Div: Any + visit_FloorDiv: Any + visit_Pow: Any + visit_Mod: Any + visit_And: Any + visit_Or: Any + visit_Pos: Any + visit_Neg: Any + visit_Not: Any + visit_Compare: Any + visit_Getitem: Any + visit_Getattr: Any + visit_Call: Any + visit_Filter: Any + visit_Test: Any + visit_CondExpr: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/parser.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/parser.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ae5962f64d088b9b7e8f65ccb1ffb501eeaa2591 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/parser.pyi @@ -0,0 +1,68 @@ +from typing import Any, Optional + +class Parser: + environment: Any + stream: Any + name: Any + filename: Any + closed: bool + extensions: Any + def __init__( + self, environment, source, name: Optional[Any] = ..., filename: Optional[Any] = ..., state: Optional[Any] = ... + ) -> None: ... + def fail(self, msg, lineno: Optional[Any] = ..., exc: Any = ...): ... + def fail_unknown_tag(self, name, lineno: Optional[Any] = ...): ... + def fail_eof(self, end_tokens: Optional[Any] = ..., lineno: Optional[Any] = ...): ... + def is_tuple_end(self, extra_end_rules: Optional[Any] = ...): ... + def free_identifier(self, lineno: Optional[Any] = ...): ... + def parse_statement(self): ... + def parse_statements(self, end_tokens, drop_needle: bool = ...): ... + def parse_set(self): ... + def parse_for(self): ... + def parse_if(self): ... + def parse_block(self): ... + def parse_extends(self): ... + def parse_import_context(self, node, default): ... + def parse_include(self): ... + def parse_import(self): ... + def parse_from(self): ... + def parse_signature(self, node): ... + def parse_call_block(self): ... + def parse_filter_block(self): ... + def parse_macro(self): ... + def parse_print(self): ... + def parse_assign_target(self, with_tuple: bool = ..., name_only: bool = ..., extra_end_rules: Optional[Any] = ...): ... + def parse_expression(self, with_condexpr: bool = ...): ... + def parse_condexpr(self): ... + def parse_or(self): ... + def parse_and(self): ... + def parse_not(self): ... + def parse_compare(self): ... + def parse_add(self): ... + def parse_sub(self): ... + def parse_concat(self): ... + def parse_mul(self): ... + def parse_div(self): ... + def parse_floordiv(self): ... + def parse_mod(self): ... + def parse_pow(self): ... + def parse_unary(self, with_filter: bool = ...): ... + def parse_primary(self): ... + def parse_tuple( + self, + simplified: bool = ..., + with_condexpr: bool = ..., + extra_end_rules: Optional[Any] = ..., + explicit_parentheses: bool = ..., + ): ... + def parse_list(self): ... + def parse_dict(self): ... + def parse_postfix(self, node): ... + def parse_filter_expr(self, node): ... + def parse_subscript(self, node): ... + def parse_subscribed(self): ... + def parse_call(self, node): ... + def parse_filter(self, node, start_inline: bool = ...): ... + def parse_test(self, node): ... + def subparse(self, end_tokens: Optional[Any] = ...): ... + def parse(self): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/runtime.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/runtime.pyi new file mode 100644 index 0000000000000000000000000000000000000000..66ffe9d3e367f94c1c1fc80dec0838f2c6e3c0b0 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/runtime.pyi @@ -0,0 +1,132 @@ +from typing import Any, Dict, Optional, Text, Union + +from jinja2.environment import Environment +from jinja2.exceptions import TemplateNotFound as TemplateNotFound, TemplateRuntimeError as TemplateRuntimeError +from jinja2.utils import Markup as Markup, concat as concat, escape as escape, missing as missing + +to_string: Any +identity: Any + +def markup_join(seq): ... +def unicode_join(seq): ... + +class TemplateReference: + def __init__(self, context) -> None: ... + def __getitem__(self, name): ... + +class Context: + parent: Union[Context, Dict[str, Any]] + vars: Dict[str, Any] + environment: Environment + eval_ctx: Any + exported_vars: Any + name: Text + blocks: Dict[str, Any] + def __init__( + self, environment: Environment, parent: Union[Context, Dict[str, Any]], name: Text, blocks: Dict[str, Any] + ) -> None: ... + def super(self, name, current): ... + def get(self, key, default: Optional[Any] = ...): ... + def resolve(self, key): ... + def get_exported(self): ... + def get_all(self): ... + def call(__self, __obj, *args, **kwargs): ... + def derived(self, locals: Optional[Any] = ...): ... + keys: Any + values: Any + items: Any + iterkeys: Any + itervalues: Any + iteritems: Any + def __contains__(self, name): ... + def __getitem__(self, key): ... + +class BlockReference: + name: Any + def __init__(self, name, context, stack, depth) -> None: ... + @property + def super(self): ... + def __call__(self): ... + +class LoopContext: + index0: int + depth0: Any + def __init__(self, iterable, recurse: Optional[Any] = ..., depth0: int = ...) -> None: ... + def cycle(self, *args): ... + first: Any + last: Any + index: Any + revindex: Any + revindex0: Any + depth: Any + def __len__(self): ... + def __iter__(self): ... + def loop(self, iterable): ... + __call__: Any + @property + def length(self): ... + +class LoopContextIterator: + context: Any + def __init__(self, context) -> None: ... + def __iter__(self): ... + def __next__(self): ... + +class Macro: + name: Any + arguments: Any + defaults: Any + catch_kwargs: Any + catch_varargs: Any + caller: Any + def __init__(self, environment, func, name, arguments, defaults, catch_kwargs, catch_varargs, caller) -> None: ... + def __call__(self, *args, **kwargs): ... + +class Undefined: + def __init__(self, hint: Optional[Any] = ..., obj: Any = ..., name: Optional[Any] = ..., exc: Any = ...) -> None: ... + def __getattr__(self, name): ... + __add__: Any + __radd__: Any + __mul__: Any + __rmul__: Any + __div__: Any + __rdiv__: Any + __truediv__: Any + __rtruediv__: Any + __floordiv__: Any + __rfloordiv__: Any + __mod__: Any + __rmod__: Any + __pos__: Any + __neg__: Any + __call__: Any + __getitem__: Any + __lt__: Any + __le__: Any + __gt__: Any + __ge__: Any + __int__: Any + __float__: Any + __complex__: Any + __pow__: Any + __rpow__: Any + def __eq__(self, other): ... + def __ne__(self, other): ... + def __hash__(self): ... + def __len__(self): ... + def __iter__(self): ... + def __nonzero__(self): ... + __bool__: Any + +def make_logging_undefined(logger: Optional[Any] = ..., base: Optional[Any] = ...): ... + +class DebugUndefined(Undefined): ... + +class StrictUndefined(Undefined): + __iter__: Any + __len__: Any + __nonzero__: Any + __eq__: Any + __ne__: Any + __bool__: Any + __hash__: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/sandbox.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/sandbox.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1fc319cfd76c9fee662f9dafddfd6f8f2c7b6390 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/sandbox.pyi @@ -0,0 +1,35 @@ +from typing import Any + +from jinja2.environment import Environment + +MAX_RANGE: int +UNSAFE_FUNCTION_ATTRIBUTES: Any +UNSAFE_METHOD_ATTRIBUTES: Any +UNSAFE_GENERATOR_ATTRIBUTES: Any + +def safe_range(*args): ... +def unsafe(f): ... +def is_internal_attribute(obj, attr): ... +def modifies_known_mutable(obj, attr): ... + +class SandboxedEnvironment(Environment): + sandboxed: bool + default_binop_table: Any + default_unop_table: Any + intercepted_binops: Any + intercepted_unops: Any + def intercept_unop(self, operator): ... + binop_table: Any + unop_table: Any + def __init__(self, *args, **kwargs) -> None: ... + def is_safe_attribute(self, obj, attr, value): ... + def is_safe_callable(self, obj): ... + def call_binop(self, context, operator, left, right): ... + def call_unop(self, context, operator, arg): ... + def getitem(self, obj, argument): ... + def getattr(self, obj, attribute): ... + def unsafe_undefined(self, obj, attribute): ... + def call(__self, __context, __obj, *args, **kwargs): ... + +class ImmutableSandboxedEnvironment(SandboxedEnvironment): + def is_safe_attribute(self, obj, attr, value): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/tests.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/tests.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2645fe9ff41c853fc53c4e2cfdb98273067a6aa2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/tests.pyi @@ -0,0 +1,24 @@ +from typing import Any + +number_re: Any +regex_type: Any +test_callable: Any + +def test_odd(value): ... +def test_even(value): ... +def test_divisibleby(value, num): ... +def test_defined(value): ... +def test_undefined(value): ... +def test_none(value): ... +def test_lower(value): ... +def test_upper(value): ... +def test_string(value): ... +def test_mapping(value): ... +def test_number(value): ... +def test_sequence(value): ... +def test_equalto(value, other): ... +def test_sameas(value, other): ... +def test_iterable(value): ... +def test_escaped(value): ... + +TESTS: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/utils.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3d1b1cccf2d10c41e32637dddbf57a6232d16572 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/utils.pyi @@ -0,0 +1,87 @@ +from _typeshed import AnyPath +from typing import IO, Any, Callable, Iterable, Optional, Protocol, Text, TypeVar, Union +from typing_extensions import Literal + +from markupsafe import Markup as Markup, escape as escape, soft_unicode as soft_unicode + +missing: Any +internal_code: Any +concat: Any + +_CallableT = TypeVar("_CallableT", bound=Callable[..., Any]) + +class _ContextFunction(Protocol[_CallableT]): + contextfunction: Literal[True] + __call__: _CallableT + +class _EvalContextFunction(Protocol[_CallableT]): + evalcontextfunction: Literal[True] + __call__: _CallableT + +class _EnvironmentFunction(Protocol[_CallableT]): + environmentfunction: Literal[True] + __call__: _CallableT + +def contextfunction(f: _CallableT) -> _ContextFunction[_CallableT]: ... +def evalcontextfunction(f: _CallableT) -> _EvalContextFunction[_CallableT]: ... +def environmentfunction(f: _CallableT) -> _EnvironmentFunction[_CallableT]: ... +def internalcode(f: _CallableT) -> _CallableT: ... +def is_undefined(obj: object) -> bool: ... +def select_autoescape( + enabled_extensions: Iterable[str] = ..., + disabled_extensions: Iterable[str] = ..., + default_for_string: bool = ..., + default: bool = ..., +) -> Callable[[str], bool]: ... +def consume(iterable: Iterable[object]) -> None: ... +def clear_caches() -> None: ... +def import_string(import_name: str, silent: bool = ...) -> Any: ... +def open_if_exists(filename: AnyPath, mode: str = ...) -> Optional[IO[Any]]: ... +def object_type_repr(obj: object) -> str: ... +def pformat(obj: object, verbose: bool = ...) -> str: ... +def urlize( + text: Union[Markup, Text], + trim_url_limit: Optional[int] = ..., + rel: Optional[Union[Markup, Text]] = ..., + target: Optional[Union[Markup, Text]] = ..., +) -> str: ... +def generate_lorem_ipsum(n: int = ..., html: bool = ..., min: int = ..., max: int = ...) -> Union[Markup, str]: ... +def unicode_urlencode(obj: object, charset: str = ..., for_qs: bool = ...) -> str: ... + +class LRUCache: + capacity: Any + def __init__(self, capacity) -> None: ... + def __getnewargs__(self): ... + def copy(self): ... + def get(self, key, default: Optional[Any] = ...): ... + def setdefault(self, key, default: Optional[Any] = ...): ... + def clear(self): ... + def __contains__(self, key): ... + def __len__(self): ... + def __getitem__(self, key): ... + def __setitem__(self, key, value): ... + def __delitem__(self, key): ... + def items(self): ... + def iteritems(self): ... + def values(self): ... + def itervalue(self): ... + def keys(self): ... + def iterkeys(self): ... + __iter__: Any + def __reversed__(self): ... + __copy__: Any + +class Cycler: + items: Any + def __init__(self, *items) -> None: ... + pos: int + def reset(self): ... + @property + def current(self): ... + def __next__(self): ... + +class Joiner: + sep: Any + used: bool + def __init__(self, sep: str = ...) -> None: ... + def __call__(self): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/visitor.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/visitor.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ef34328dfe16a3ac7d53cffd27b9055d9dd9444c --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/jinja2/visitor.pyi @@ -0,0 +1,8 @@ +class NodeVisitor: + def get_visitor(self, node): ... + def visit(self, node, *args, **kwargs): ... + def generic_visit(self, node, *args, **kwargs): ... + +class NodeTransformer(NodeVisitor): + def generic_visit(self, node, *args, **kwargs): ... + def visit_list(self, node, *args, **kwargs): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3f57adcc43a2c86132c46b3091f710a471deed88 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/__init__.pyi @@ -0,0 +1,2 @@ +from .core import Markdown as Markdown, markdown as markdown, markdownFromFile as markdownFromFile +from .extensions import Extension as Extension diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/__meta__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/__meta__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4a5eacaf90cb9858abc26a643b137a65d7b2ba2e --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/__meta__.pyi @@ -0,0 +1,3 @@ +from typing import Any + +__version_info__: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/blockparser.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/blockparser.pyi new file mode 100644 index 0000000000000000000000000000000000000000..602ecca0b5040f42bb50eb2ab14c89fc055689bf --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/blockparser.pyi @@ -0,0 +1,18 @@ +from typing import Any + +class State(list): + def set(self, state) -> None: ... + def reset(self) -> None: ... + def isstate(self, state): ... + +class BlockParser: + blockprocessors: Any + state: Any + md: Any + def __init__(self, md) -> None: ... + @property + def markdown(self): ... + root: Any + def parseDocument(self, lines): ... + def parseChunk(self, parent, text) -> None: ... + def parseBlocks(self, parent, blocks) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/blockprocessors.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/blockprocessors.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0637d278de3f95b203bcc4b1c1f473d79da234b2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/blockprocessors.pyi @@ -0,0 +1,59 @@ +from typing import Any, Pattern + +logger: Any + +def build_block_parser(md, **kwargs): ... + +class BlockProcessor: + parser: Any + tab_length: Any + def __init__(self, parser) -> None: ... + def lastChild(self, parent): ... + def detab(self, text): ... + def looseDetab(self, text, level: int = ...): ... + def test(self, parent, block) -> None: ... + def run(self, parent, blocks) -> None: ... + +class ListIndentProcessor(BlockProcessor): + ITEM_TYPES: Any + LIST_TYPES: Any + INDENT_RE: Pattern + def __init__(self, *args) -> None: ... + def create_item(self, parent, block) -> None: ... + def get_level(self, parent, block): ... + +class CodeBlockProcessor(BlockProcessor): ... + +class BlockQuoteProcessor(BlockProcessor): + RE: Pattern + def clean(self, line): ... + +class OListProcessor(BlockProcessor): + TAG: str = ... + STARTSWITH: str = ... + LAZY_OL: bool = ... + SIBLING_TAGS: Any + RE: Pattern + CHILD_RE: Pattern + INDENT_RE: Pattern + def __init__(self, parser) -> None: ... + def get_items(self, block): ... + +class UListProcessor(OListProcessor): + TAG: str = ... + RE: Pattern + def __init__(self, parser) -> None: ... + +class HashHeaderProcessor(BlockProcessor): + RE: Pattern + +class SetextHeaderProcessor(BlockProcessor): + RE: Pattern + +class HRProcessor(BlockProcessor): + RE: str = ... + SEARCH_RE: Pattern + match: Any + +class EmptyBlockProcessor(BlockProcessor): ... +class ParagraphProcessor(BlockProcessor): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/core.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/core.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4d8dadfaac86d8d5045eb714fd8d4d6bde90f79f --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/core.pyi @@ -0,0 +1,63 @@ +from typing import Any, BinaryIO, Callable, ClassVar, Dict, List, Mapping, Optional, Sequence, Text, TextIO, Union +from typing_extensions import Literal +from xml.etree.ElementTree import Element + +from .blockparser import BlockParser +from .extensions import Extension +from .util import HtmlStash, Registry + +class Markdown: + preprocessors: Registry + inlinePatterns: Registry + treeprocessors: Registry + postprocessors: Registry + parser: BlockParser + htmlStash: HtmlStash + output_formats: ClassVar[Dict[Literal["xhtml", "html"], Callable[[Element], Text]]] + output_format: Literal["xhtml", "html"] + serializer: Callable[[Element], Text] + tab_length: int + block_level_elements: List[str] + def __init__( + self, + *, + extensions: Optional[Sequence[Union[str, Extension]]] = ..., + extension_configs: Optional[Mapping[str, Mapping[str, Any]]] = ..., + output_format: Optional[Literal["xhtml", "html"]] = ..., + tab_length: Optional[int] = ..., + ) -> None: ... + def build_parser(self) -> Markdown: ... + def registerExtensions( + self, extensions: Sequence[Union[Extension, str]], configs: Mapping[str, Mapping[str, Any]] + ) -> Markdown: ... + def build_extension(self, ext_name: Text, configs: Mapping[str, str]) -> Extension: ... + def registerExtension(self, extension: Extension) -> Markdown: ... + def reset(self: Markdown) -> Markdown: ... + def set_output_format(self, format: Literal["xhtml", "html"]) -> Markdown: ... + def is_block_level(self, tag: str) -> bool: ... + def convert(self, source: Text) -> Text: ... + def convertFile( + self, + input: Optional[Union[str, TextIO, BinaryIO]] = ..., + output: Optional[Union[str, TextIO, BinaryIO]] = ..., + encoding: Optional[str] = ..., + ) -> Markdown: ... + +def markdown( + text: Text, + *, + extensions: Optional[Sequence[Union[str, Extension]]] = ..., + extension_configs: Optional[Mapping[str, Mapping[str, Any]]] = ..., + output_format: Optional[Literal["xhtml", "html"]] = ..., + tab_length: Optional[int] = ..., +) -> Text: ... +def markdownFromFile( + *, + input: Optional[Union[str, TextIO, BinaryIO]] = ..., + output: Optional[Union[str, TextIO, BinaryIO]] = ..., + encoding: Optional[str] = ..., + extensions: Optional[Sequence[Union[str, Extension]]] = ..., + extension_configs: Optional[Mapping[str, Mapping[str, Any]]] = ..., + output_format: Optional[Literal["xhtml", "html"]] = ..., + tab_length: Optional[int] = ..., +) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/inlinepatterns.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/inlinepatterns.pyi new file mode 100644 index 0000000000000000000000000000000000000000..70f469ac3ca12320aa149db3b138c9977a8cd6bc --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/inlinepatterns.pyi @@ -0,0 +1,103 @@ +from typing import Any, Match, Optional, Tuple, Union +from xml.etree.ElementTree import Element + +def build_inlinepatterns(md, **kwargs): ... + +NOIMG: str +BACKTICK_RE: str +ESCAPE_RE: str +EMPHASIS_RE: str +STRONG_RE: str +SMART_STRONG_RE: str +SMART_EMPHASIS_RE: str +SMART_STRONG_EM_RE: str +EM_STRONG_RE: str +EM_STRONG2_RE: str +STRONG_EM_RE: str +STRONG_EM2_RE: str +STRONG_EM3_RE: str +LINK_RE: str +IMAGE_LINK_RE: str +REFERENCE_RE: str +IMAGE_REFERENCE_RE: str +NOT_STRONG_RE: str +AUTOLINK_RE: str +AUTOMAIL_RE: str +HTML_RE: str +ENTITY_RE: str +LINE_BREAK_RE: str + +def dequote(string): ... + +class EmStrongItem: ... + +class Pattern: + ANCESTOR_EXCLUDES: Any + pattern: Any + compiled_re: Any + md: Any + def __init__(self, pattern, md: Optional[Any] = ...) -> None: ... + @property + def markdown(self): ... + def getCompiledRegExp(self): ... + def handleMatch(self, m: Match) -> Optional[Union[str, Element]]: ... + def type(self): ... + def unescape(self, text): ... + +class InlineProcessor(Pattern): + safe_mode: bool = ... + def __init__(self, pattern, md: Optional[Any] = ...) -> None: ... + def handleMatch(self, m: Match, data) -> Union[Tuple[Element, int, int], Tuple[None, None, None]]: ... # type: ignore + +class SimpleTextPattern(Pattern): ... +class SimpleTextInlineProcessor(InlineProcessor): ... +class EscapeInlineProcessor(InlineProcessor): ... + +class SimpleTagPattern(Pattern): + tag: Any + def __init__(self, pattern, tag) -> None: ... + +class SimpleTagInlineProcessor(InlineProcessor): + tag: Any + def __init__(self, pattern, tag) -> None: ... + +class SubstituteTagPattern(SimpleTagPattern): ... +class SubstituteTagInlineProcessor(SimpleTagInlineProcessor): ... + +class BacktickInlineProcessor(InlineProcessor): + ESCAPED_BSLASH: Any + tag: str = ... + def __init__(self, pattern) -> None: ... + +class DoubleTagPattern(SimpleTagPattern): ... +class DoubleTagInlineProcessor(SimpleTagInlineProcessor): ... +class HtmlInlineProcessor(InlineProcessor): ... + +class AsteriskProcessor(InlineProcessor): + PATTERNS: Any + def build_single(self, m, tag, idx): ... + def build_double(self, m, tags, idx): ... + def build_double2(self, m, tags, idx): ... + def parse_sub_patterns(self, data, parent, last, idx) -> None: ... + def build_element(self, m, builder, tags, index): ... + +class UnderscoreProcessor(AsteriskProcessor): + PATTERNS: Any + +class LinkInlineProcessor(InlineProcessor): + RE_LINK: Any + RE_TITLE_CLEAN: Any + def getLink(self, data, index): ... + def getText(self, data, index): ... + +class ImageInlineProcessor(LinkInlineProcessor): ... + +class ReferenceInlineProcessor(LinkInlineProcessor): + NEWLINE_CLEANUP_RE: Pattern + def evalId(self, data, index, text): ... + def makeTag(self, href, title, text): ... + +class ShortReferenceInlineProcessor(ReferenceInlineProcessor): ... +class ImageReferenceInlineProcessor(ReferenceInlineProcessor): ... +class AutolinkInlineProcessor(InlineProcessor): ... +class AutomailInlineProcessor(InlineProcessor): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/pep562.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/pep562.pyi new file mode 100644 index 0000000000000000000000000000000000000000..398bf66b83138a6252449e8d6b3cbbf11a6258c4 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/pep562.pyi @@ -0,0 +1,9 @@ +from typing import Any + +class Version: + def __new__(cls, major, minor, micro, release: str = ..., pre: int = ..., post: int = ..., dev: int = ...): ... + +class Pep562: + def __init__(self, name) -> None: ... + def __dir__(self): ... + def __getattr__(self, name): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/postprocessors.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/postprocessors.pyi new file mode 100644 index 0000000000000000000000000000000000000000..42cddc124a447d522be5c29eedbde1083ac1f591 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/postprocessors.pyi @@ -0,0 +1,17 @@ +from typing import Any, Pattern + +from . import util + +def build_postprocessors(md, **kwargs): ... + +class Postprocessor(util.Processor): + def run(self, text) -> None: ... + +class RawHtmlPostprocessor(Postprocessor): + def isblocklevel(self, html): ... + +class AndSubstitutePostprocessor(Postprocessor): ... + +class UnescapePostprocessor(Postprocessor): + RE: Pattern + def unescape(self, m): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/preprocessors.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/preprocessors.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b3ab45f5506c42003bea71c4e2f3e9fa7137b996 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/preprocessors.pyi @@ -0,0 +1,23 @@ +from typing import Any, Iterable, List, Pattern + +from . import util + +def build_preprocessors(md, **kwargs): ... + +class Preprocessor(util.Processor): + def run(self, lines: List[str]) -> List[str]: ... + +class NormalizeWhitespace(Preprocessor): ... + +class HtmlBlockPreprocessor(Preprocessor): + right_tag_patterns: Any + attrs_pattern: str = ... + left_tag_pattern: Any + attrs_re: Any + left_tag_re: Any + markdown_in_raw: bool = ... + +class ReferencePreprocessor(Preprocessor): + TITLE: str = ... + RE: Pattern + TITLE_RE: Pattern diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/serializers.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/serializers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..cdad4b1b613d8a23714b87f106be66e072e6b4ad --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/serializers.pyi @@ -0,0 +1,4 @@ +from typing import Any + +def to_html_string(element): ... +def to_xhtml_string(element): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/treeprocessors.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/treeprocessors.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a213600a6159ba75fd01daf6c617dc4191c95c47 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/treeprocessors.pyi @@ -0,0 +1,19 @@ +from typing import Any, Optional + +from . import util + +def build_treeprocessors(md, **kwargs): ... +def isString(s): ... + +class Treeprocessor(util.Processor): + def run(self, root) -> None: ... + +class InlineProcessor(Treeprocessor): + inlinePatterns: Any + ancestors: Any + def __init__(self, md) -> None: ... + stashed_nodes: Any + parent_map: Any + def run(self, tree, ancestors: Optional[Any] = ...): ... + +class PrettifyTreeprocessor(Treeprocessor): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/util.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/util.pyi new file mode 100644 index 0000000000000000000000000000000000000000..66a6d7adf268b7755483e6672d24f26878ce7de2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markdown/util.pyi @@ -0,0 +1,56 @@ +from collections import namedtuple +from typing import Any, Optional, Pattern + +PY37: Any +__deprecated__: Any +BLOCK_LEVEL_ELEMENTS: Any +STX: str +ETX: str +INLINE_PLACEHOLDER_PREFIX: Any +INLINE_PLACEHOLDER: Any +INLINE_PLACEHOLDER_RE: Pattern +AMP_SUBSTITUTE: Any +HTML_PLACEHOLDER: Any +HTML_PLACEHOLDER_RE: Pattern +TAG_PLACEHOLDER: Any +INSTALLED_EXTENSIONS: Any +RTL_BIDI_RANGES: Any + +def deprecated(message, stacklevel: int = ...): ... +def isBlockLevel(tag): ... +def parseBoolValue(value, fail_on_errors: bool = ..., preserve_none: bool = ...): ... +def code_escape(text): ... + +class AtomicString(str): ... + +class Processor: + md: Any + def __init__(self, md: Optional[Any] = ...) -> None: ... + @property + def markdown(self): ... + +class HtmlStash: + html_counter: int = ... + rawHtmlBlocks: Any + tag_counter: int = ... + tag_data: Any + def __init__(self) -> None: ... + def store(self, html): ... + def reset(self) -> None: ... + def get_placeholder(self, key): ... + def store_tag(self, tag, attrs, left_index, right_index): ... + +class Registry: + def __init__(self) -> None: ... + def __contains__(self, item): ... + def __iter__(self) -> Any: ... + def __getitem__(self, key): ... + def __len__(self): ... + def get_index_for_name(self, name): ... + def register(self, item, name, priority) -> None: ... + def deregister(self, name, strict: bool = ...) -> None: ... + def __setitem__(self, key, value) -> None: ... + def __delitem__(self, key) -> None: ... + def add(self, key, value, location) -> None: ... + +def __getattr__(name): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_compat.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_compat.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a3c2bc7fed2529930eb80caaa5aaf3154ade829d --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_compat.pyi @@ -0,0 +1,21 @@ +import sys +from typing import Iterator, Mapping, Tuple, TypeVar + +_K = TypeVar("_K") +_V = TypeVar("_V") + +PY2: bool + +def iteritems(d: Mapping[_K, _V]) -> Iterator[Tuple[_K, _V]]: ... + +if sys.version_info >= (3,): + text_type = str + string_types = (str,) + unichr = chr + int_types = (int,) +else: + from __builtin__ import unichr as unichr + + text_type = unicode + string_types = (str, unicode) + int_types = (int, long) diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_constants.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_constants.pyi new file mode 100644 index 0000000000000000000000000000000000000000..cd051158d7a15b3f4d642f8327d9a91e60f24783 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_constants.pyi @@ -0,0 +1,3 @@ +from typing import Dict, Text + +HTML_ENTITIES: Dict[Text, int] diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_native.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_native.pyi new file mode 100644 index 0000000000000000000000000000000000000000..01eb723f4ad69588791ef0d796858718c012aa4d --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/markupsafe/_native.pyi @@ -0,0 +1,8 @@ +from typing import Text, Union + +from . import Markup +from ._compat import text_type + +def escape(s: Union[Markup, Text]) -> Markup: ... +def escape_silent(s: Union[None, Markup, Text]) -> Markup: ... +def soft_unicode(s: Text) -> text_type: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/mock.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/mock.pyi new file mode 100644 index 0000000000000000000000000000000000000000..013a22a2d570c04261edcea5a1e88026410e1cd3 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/mock.pyi @@ -0,0 +1,441 @@ +import sys +from typing import Any, Callable, Generic, List, Mapping, Optional, Sequence, Text, Tuple, Type, TypeVar, Union, overload + +_F = TypeVar("_F", bound=Callable[..., Any]) +_T = TypeVar("_T") +_TT = TypeVar("_TT", bound=Type[Any]) +_R = TypeVar("_R") + +__all__ = [ + "Mock", + "MagicMock", + "patch", + "sentinel", + "DEFAULT", + "ANY", + "call", + "create_autospec", + "AsyncMock", + "FILTER_DIR", + "NonCallableMock", + "NonCallableMagicMock", + "mock_open", + "PropertyMock", + "seal", +] +__version__: str + +FILTER_DIR: Any + +class _slotted: ... + +class _SentinelObject: + name: Any + def __init__(self, name: Any) -> None: ... + +class _Sentinel: + def __init__(self) -> None: ... + def __getattr__(self, name: str) -> Any: ... + +sentinel: Any +DEFAULT: Any + +class _Call(Tuple[Any, ...]): + def __new__( + cls, value: Any = ..., name: Optional[Any] = ..., parent: Optional[Any] = ..., two: bool = ..., from_kall: bool = ... + ) -> Any: ... + name: Any + parent: Any + from_kall: Any + def __init__( + self, value: Any = ..., name: Optional[Any] = ..., parent: Optional[Any] = ..., two: bool = ..., from_kall: bool = ... + ) -> None: ... + def __eq__(self, other: Any) -> bool: ... + __ne__: Any + def __call__(self, *args: Any, **kwargs: Any) -> _Call: ... + def __getattr__(self, attr: Any) -> Any: ... + def count(self, *args: Any, **kwargs: Any) -> Any: ... + def index(self, *args: Any, **kwargs: Any) -> Any: ... + def call_list(self) -> Any: ... + +call: _Call + +class _CallList(List[_Call]): + def __contains__(self, value: Any) -> bool: ... + +class _MockIter: + obj: Any + def __init__(self, obj: Any) -> None: ... + def __iter__(self) -> Any: ... + def __next__(self) -> Any: ... + +class Base: + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + +class NonCallableMock(Base, Any): # type: ignore + def __new__(__cls, *args: Any, **kw: Any) -> NonCallableMock: ... + def __init__( + self, + spec: Union[List[str], object, Type[object], None] = ..., + wraps: Optional[Any] = ..., + name: Optional[str] = ..., + spec_set: Union[List[str], object, Type[object], None] = ..., + parent: Optional[NonCallableMock] = ..., + _spec_state: Optional[Any] = ..., + _new_name: str = ..., + _new_parent: Optional[NonCallableMock] = ..., + _spec_as_instance: bool = ..., + _eat_self: Optional[bool] = ..., + unsafe: bool = ..., + **kwargs: Any, + ) -> None: ... + def __getattr__(self, name: str) -> Any: ... + if sys.version_info >= (3, 8): + def _calls_repr(self, prefix: str = ...) -> str: ... + def assert_called_with(self, *args: Any, **kwargs: Any) -> None: ... + def assert_not_called(self) -> None: ... + def assert_called_once_with(self, *args: Any, **kwargs: Any) -> None: ... + def _format_mock_failure_message(self, args: Any, kwargs: Any, action: str = ...) -> str: ... + elif sys.version_info >= (3, 5): + def assert_called_with(_mock_self, *args: Any, **kwargs: Any) -> None: ... + def assert_not_called(_mock_self) -> None: ... + def assert_called_once_with(_mock_self, *args: Any, **kwargs: Any) -> None: ... + def _format_mock_failure_message(self, args: Any, kwargs: Any) -> str: ... + if sys.version_info >= (3, 8): + def assert_called(self) -> None: ... + def assert_called_once(self) -> None: ... + elif sys.version_info >= (3, 6): + def assert_called(_mock_self) -> None: ... + def assert_called_once(_mock_self) -> None: ... + if sys.version_info >= (3, 6): + def reset_mock(self, visited: Any = ..., *, return_value: bool = ..., side_effect: bool = ...) -> None: ... + elif sys.version_info >= (3, 5): + def reset_mock(self, visited: Any = ...) -> None: ... + if sys.version_info >= (3, 7): + def _extract_mock_name(self) -> str: ... + def _get_call_signature_from_name(self, name: str) -> Any: ... + def assert_any_call(self, *args: Any, **kwargs: Any) -> None: ... + def assert_has_calls(self, calls: Sequence[_Call], any_order: bool = ...) -> None: ... + def mock_add_spec(self, spec: Any, spec_set: bool = ...) -> None: ... + def _mock_add_spec(self, spec: Any, spec_set: bool, _spec_as_instance: bool = ..., _eat_self: bool = ...) -> None: ... + def attach_mock(self, mock: NonCallableMock, attribute: str) -> None: ... + def configure_mock(self, **kwargs: Any) -> None: ... + return_value: Any + side_effect: Any + called: bool + call_count: int + call_args: Any + call_args_list: _CallList + mock_calls: _CallList + def _format_mock_call_signature(self, args: Any, kwargs: Any) -> str: ... + def _call_matcher(self, _call: Tuple[_Call, ...]) -> _Call: ... + def _get_child_mock(self, **kw: Any) -> NonCallableMock: ... + +class CallableMixin(Base): + side_effect: Any + def __init__( + self, + spec: Optional[Any] = ..., + side_effect: Optional[Any] = ..., + return_value: Any = ..., + wraps: Optional[Any] = ..., + name: Optional[Any] = ..., + spec_set: Optional[Any] = ..., + parent: Optional[Any] = ..., + _spec_state: Optional[Any] = ..., + _new_name: Any = ..., + _new_parent: Optional[Any] = ..., + **kwargs: Any, + ) -> None: ... + def __call__(_mock_self, *args: Any, **kwargs: Any) -> Any: ... + +class Mock(CallableMixin, NonCallableMock): ... + +class _patch(Generic[_T]): + attribute_name: Any + getter: Callable[[], Any] + attribute: str + new: _T + new_callable: Any + spec: Any + create: bool + has_local: Any + spec_set: Any + autospec: Any + kwargs: Mapping[str, Any] + additional_patchers: Any + if sys.version_info >= (3, 8): + @overload + def __init__( + self: _patch[Union[MagicMock, AsyncMock]], + getter: Callable[[], Any], + attribute: str, + *, + spec: Optional[Any], + create: bool, + spec_set: Optional[Any], + autospec: Optional[Any], + new_callable: Optional[Any], + kwargs: Mapping[str, Any], + ) -> None: ... + # This overload also covers the case, where new==DEFAULT. In this case, self is _patch[Any]. + # Ideally we'd be able to add an overload for it so that self is _patch[MagicMock], + # but that's impossible with the current type system. + @overload + def __init__( + self: _patch[_T], + getter: Callable[[], Any], + attribute: str, + new: _T, + spec: Optional[Any], + create: bool, + spec_set: Optional[Any], + autospec: Optional[Any], + new_callable: Optional[Any], + kwargs: Mapping[str, Any], + ) -> None: ... + else: + @overload + def __init__( + self: _patch[MagicMock], + getter: Callable[[], Any], + attribute: str, + *, + spec: Optional[Any], + create: bool, + spec_set: Optional[Any], + autospec: Optional[Any], + new_callable: Optional[Any], + kwargs: Mapping[str, Any], + ) -> None: ... + @overload + def __init__( + self: _patch[_T], + getter: Callable[[], Any], + attribute: str, + new: _T, + spec: Optional[Any], + create: bool, + spec_set: Optional[Any], + autospec: Optional[Any], + new_callable: Optional[Any], + kwargs: Mapping[str, Any], + ) -> None: ... + def copy(self) -> _patch[_T]: ... + def __call__(self, func: Callable[..., _R]) -> Callable[..., _R]: ... + def decorate_class(self, klass: _TT) -> _TT: ... + def decorate_callable(self, func: _F) -> _F: ... + def get_original(self) -> Tuple[Any, bool]: ... + target: Any + temp_original: Any + is_local: bool + def __enter__(self) -> _T: ... + def __exit__(self, *exc_info: Any) -> None: ... + def start(self) -> _T: ... + def stop(self) -> None: ... + +class _patch_dict: + in_dict: Any + values: Any + clear: Any + def __init__(self, in_dict: Any, values: Any = ..., clear: Any = ..., **kwargs: Any) -> None: ... + def __call__(self, f: Any) -> Any: ... + def decorate_class(self, klass: Any) -> Any: ... + def __enter__(self) -> Any: ... + def __exit__(self, *args: Any) -> Any: ... + start: Any + stop: Any + +class _patcher: + TEST_PREFIX: str + dict: Type[_patch_dict] + if sys.version_info >= (3, 8): + @overload + def __call__( # type: ignore + self, + target: Any, + *, + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: Any, + ) -> _patch[Union[MagicMock, AsyncMock]]: ... + # This overload also covers the case, where new==DEFAULT. In this case, the return type is _patch[Any]. + # Ideally we'd be able to add an overload for it so that the return type is _patch[MagicMock], + # but that's impossible with the current type system. + @overload + def __call__( + self, + target: Any, + new: _T, + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: Any, + ) -> _patch[_T]: ... + else: + @overload + def __call__( # type: ignore + self, + target: Any, + *, + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: Any, + ) -> _patch[MagicMock]: ... + @overload + def __call__( + self, + target: Any, + new: _T, + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: Any, + ) -> _patch[_T]: ... + if sys.version_info >= (3, 8): + @overload + def object( # type: ignore + self, + target: Any, + attribute: Text, + *, + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: Any, + ) -> _patch[Union[MagicMock, AsyncMock]]: ... + @overload + def object( + self, + target: Any, + attribute: Text, + new: _T = ..., + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: Any, + ) -> _patch[_T]: ... + else: + @overload + def object( # type: ignore + self, + target: Any, + attribute: Text, + *, + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: Any, + ) -> _patch[MagicMock]: ... + @overload + def object( + self, + target: Any, + attribute: Text, + new: _T = ..., + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: Any, + ) -> _patch[_T]: ... + def multiple( + self, + target: Any, + spec: Optional[Any] = ..., + create: bool = ..., + spec_set: Optional[Any] = ..., + autospec: Optional[Any] = ..., + new_callable: Optional[Any] = ..., + **kwargs: _T, + ) -> _patch[_T]: ... + def stopall(self) -> None: ... + +patch: _patcher + +class MagicMixin: + def __init__(self, *args: Any, **kw: Any) -> None: ... + +class NonCallableMagicMock(MagicMixin, NonCallableMock): + def mock_add_spec(self, spec: Any, spec_set: bool = ...) -> None: ... + +class MagicMock(MagicMixin, Mock): + def mock_add_spec(self, spec: Any, spec_set: bool = ...) -> None: ... + +if sys.version_info >= (3, 8): + class AsyncMockMixin(Base): + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + async def _execute_mock_call(self, *args: Any, **kwargs: Any) -> Any: ... + def assert_awaited(self) -> None: ... + def assert_awaited_once(self) -> None: ... + def assert_awaited_with(self, *args: Any, **kwargs: Any) -> None: ... + def assert_awaited_once_with(self, *args: Any, **kwargs: Any) -> None: ... + def assert_any_await(self, *args: Any, **kwargs: Any) -> None: ... + def assert_has_awaits(self, calls: _CallList, any_order: bool = ...) -> None: ... + def assert_not_awaited(self) -> None: ... + def reset_mock(self, *args, **kwargs) -> None: ... + await_count: int + await_args: Optional[_Call] + await_args_list: _CallList + class AsyncMagicMixin(MagicMixin): + def __init__(self, *args: Any, **kw: Any) -> None: ... + class AsyncMock(AsyncMockMixin, AsyncMagicMixin, Mock): ... + +class MagicProxy: + name: Any + parent: Any + def __init__(self, name: Any, parent: Any) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + def create_mock(self) -> Any: ... + def __get__(self, obj: Any, _type: Optional[Any] = ...) -> Any: ... + +class _ANY: + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + +ANY: Any + +def create_autospec( + spec: Any, spec_set: Any = ..., instance: Any = ..., _parent: Optional[Any] = ..., _name: Optional[Any] = ..., **kwargs: Any +) -> Any: ... + +class _SpecState: + spec: Any + ids: Any + spec_set: Any + parent: Any + instance: Any + name: Any + def __init__( + self, + spec: Any, + spec_set: Any = ..., + parent: Optional[Any] = ..., + name: Optional[Any] = ..., + ids: Optional[Any] = ..., + instance: Any = ..., + ) -> None: ... + +def mock_open(mock: Optional[Any] = ..., read_data: Any = ...) -> Any: ... + +PropertyMock = Any + +if sys.version_info >= (3, 7): + def seal(mock: Any) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/mypy_extensions.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/mypy_extensions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e5df53ff7dd2971b1360c1f1f3a77714f1d1b2b4 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/mypy_extensions.pyi @@ -0,0 +1,46 @@ +import abc +import sys +from typing import Any, Callable, Dict, Generic, ItemsView, KeysView, Mapping, Optional, Type, TypeVar, Union, ValuesView + +_T = TypeVar("_T") +_U = TypeVar("_U") + +# Internal mypy fallback type for all typed dicts (does not exist at runtime) +class _TypedDict(Mapping[str, object], metaclass=abc.ABCMeta): + def copy(self: _T) -> _T: ... + # Using NoReturn so that only calls using mypy plugin hook that specialize the signature + # can go through. + def setdefault(self, k: NoReturn, default: object) -> object: ... + # Mypy plugin hook for 'pop' expects that 'default' has a type variable type. + def pop(self, k: NoReturn, default: _T = ...) -> object: ... + def update(self: _T, __m: _T) -> None: ... + if sys.version_info < (3, 0): + def has_key(self, k: str) -> bool: ... + def viewitems(self) -> ItemsView[str, object]: ... + def viewkeys(self) -> KeysView[str]: ... + def viewvalues(self) -> ValuesView[object]: ... + else: + def items(self) -> ItemsView[str, object]: ... + def keys(self) -> KeysView[str]: ... + def values(self) -> ValuesView[object]: ... + def __delitem__(self, k: NoReturn) -> None: ... + +def TypedDict(typename: str, fields: Dict[str, Type[_T]], total: bool = ...) -> Type[Dict[str, Any]]: ... +def Arg(type: _T = ..., name: Optional[str] = ...) -> _T: ... +def DefaultArg(type: _T = ..., name: Optional[str] = ...) -> _T: ... +def NamedArg(type: _T = ..., name: Optional[str] = ...) -> _T: ... +def DefaultNamedArg(type: _T = ..., name: Optional[str] = ...) -> _T: ... +def VarArg(type: _T = ...) -> _T: ... +def KwArg(type: _T = ...) -> _T: ... + +# Return type that indicates a function does not return. +# This type is equivalent to the None type, but the no-op Union is necessary to +# distinguish the None type from the None value. +NoReturn = Union[None] # Deprecated: Use typing.NoReturn instead. + +# This is intended as a class decorator, but mypy rejects abstract classes +# when a Type[_T] is expected, so we can't give it the type we want +def trait(cls: Any) -> Any: ... +def mypyc_attr(*attrs: str, **kwattrs: object) -> Callable[[_T], _T]: ... + +class FlexibleAlias(Generic[_T, _U]): ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/polib.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/polib.pyi new file mode 100644 index 0000000000000000000000000000000000000000..267e47e227b0ff509e094d4c39d89dceb16fc3e5 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/polib.pyi @@ -0,0 +1,156 @@ +import textwrap +from typing import IO, Any, Callable, Dict, Generic, List, Optional, Text, Tuple, Type, TypeVar, Union, overload + +_TB = TypeVar("_TB", bound="_BaseEntry") +_TP = TypeVar("_TP", bound="POFile") +_TM = TypeVar("_TM", bound="MOFile") + +default_encoding: str + +# wrapwidth: int +# encoding: str +# check_for_duplicates: bool +@overload +def pofile(pofile: Text, *, klass: Type[_TP], **kwargs: Any) -> _TP: ... +@overload +def pofile(pofile: Text, **kwargs: Any) -> POFile: ... +@overload +def mofile(mofile: Text, *, klass: Type[_TM], **kwargs: Any) -> _TM: ... +@overload +def mofile(mofile: Text, **kwargs: Any) -> MOFile: ... +def detect_encoding(file: Union[bytes, Text], binary_mode: bool = ...) -> str: ... +def escape(st: Text) -> Text: ... +def unescape(st: Text) -> Text: ... + +class _BaseFile(List[_TB]): + fpath: Text + wrapwidth: int + encoding: Text + check_for_duplicates: bool + header: Text + metadata: Dict[Text, Text] + metadata_is_fuzzy: bool + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def __unicode__(self) -> Text: ... + def __contains__(self, entry: _TB) -> bool: ... # type: ignore # AttributeError otherwise + def __eq__(self, other: object) -> bool: ... + def append(self, entry: _TB) -> None: ... + def insert(self, index: int, entry: _TB) -> None: ... + def metadata_as_entry(self) -> POEntry: ... + def save(self, fpath: Optional[Text] = ..., repr_method: str = ...) -> None: ... + def find(self, st: Text, by: str = ..., include_obsolete_entries: bool = ..., msgctxt: bool = ...) -> Optional[_TB]: ... + def ordered_metadata(self) -> List[Text]: ... + def to_binary(self) -> bytes: ... + +class POFile(_BaseFile): + def __unicode__(self) -> Text: ... + def save_as_mofile(self, fpath: Text) -> None: ... + def percent_translated(self) -> int: ... + def translated_entries(self) -> List[POEntry]: ... + def untranslated_entries(self) -> List[POEntry]: ... + def fuzzy_entries(self) -> List[POEntry]: ... + def obsolete_entries(self) -> List[POEntry]: ... + def merge(self, refpot: POFile) -> None: ... + +class MOFile(_BaseFile): + MAGIC: int + MAGIC_SWAPPED: int + magic_number: Optional[int] + version: int + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def save_as_pofile(self, fpath: str) -> None: ... + def save(self, fpath: Optional[Text] = ...) -> None: ... # type: ignore # binary file does not allow argument repr_method + def percent_translated(self) -> int: ... + def translated_entries(self) -> List[MOEntry]: ... + def untranslated_entries(self) -> List[MOEntry]: ... + def fuzzy_entries(self) -> List[MOEntry]: ... + def obsolete_entries(self) -> List[MOEntry]: ... + +class _BaseEntry(object): + msgid: Text + msgstr: Text + msgid_plural: List[Text] + msgstr_plural: List[Text] + msgctxt: Text + obsolete: bool + encoding: str + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def __unicode__(self, wrapwidth: int = ...) -> Text: ... + def __eq__(self, other: object) -> bool: ... + +class POEntry(_BaseEntry): + comment: Text + tcomment: Text + occurrences: List[Tuple[str, int]] + flags: List[Text] + previous_msgctxt: Optional[Text] + previous_msgid: Optional[Text] + previous_msgid_plural: Optional[Text] + linenum: Optional[int] + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def __unicode__(self, wrapwidth: int = ...) -> Text: ... + def __cmp__(self, other: POEntry) -> int: ... + def __gt__(self, other: POEntry) -> bool: ... + def __lt__(self, other: POEntry) -> bool: ... + def __ge__(self, other: POEntry) -> bool: ... + def __le__(self, other: POEntry) -> bool: ... + def __eq__(self, other: Any) -> bool: ... + def __ne__(self, other: Any) -> bool: ... + def translated(self) -> bool: ... + def merge(self, other: POEntry) -> None: ... + @property + def fuzzy(self) -> bool: ... + @property + def msgid_with_context(self) -> Text: ... + def __hash__(self) -> int: ... + +class MOEntry(_BaseEntry): + comment: Text + tcomment: Text + occurrences: List[Tuple[str, int]] + flags: List[Text] + previous_msgctxt: Optional[Text] + previous_msgid: Optional[Text] + previous_msgid_plural: Optional[Text] + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + def __hash__(self) -> int: ... + +class _POFileParser(Generic[_TP]): + fhandle: IO[Text] + instance: _TP + transitions: Dict[Tuple[str, str], Tuple[Callable[[], bool], str]] + current_line: int + current_entry: POEntry + current_state: str + current_token: Optional[str] + msgstr_index: int + entry_obsolete: int + def __init__(self, pofile: Text, *args: Any, **kwargs: Any) -> None: ... + def parse(self) -> _TP: ... + def add(self, symbol: str, states: List[str], next_state: str) -> None: ... + def process(self, symbol: str) -> None: ... + def handle_he(self) -> bool: ... + def handle_tc(self) -> bool: ... + def handle_gc(self) -> bool: ... + def handle_oc(self) -> bool: ... + def handle_fl(self) -> bool: ... + def handle_pp(self) -> bool: ... + def handle_pm(self) -> bool: ... + def handle_pc(self) -> bool: ... + def handle_ct(self) -> bool: ... + def handle_mi(self) -> bool: ... + def handle_mp(self) -> bool: ... + def handle_ms(self) -> bool: ... + def handle_mx(self) -> bool: ... + def handle_mc(self) -> bool: ... + +class _MOFileParser(Generic[_TM]): + fhandle: IO[bytes] + instance: _TM + def __init__(self, mofile: Text, *args: Any, **kwargs: Any) -> None: ... + def __del__(self) -> None: ... + def parse(self) -> _TM: ... + +class TextWrapper(textwrap.TextWrapper): + drop_whitespace: bool + def __init__(self, *args: Any, **kwargs: Any) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pycurl.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pycurl.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ca6538d3274fded11c3a32b0412083dbf0aa7d44 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pycurl.pyi @@ -0,0 +1,644 @@ +# TODO(MichalPokorny): more precise types + +from typing import Any, List, Text, Tuple + +GLOBAL_ACK_EINTR: int +GLOBAL_ALL: int +GLOBAL_DEFAULT: int +GLOBAL_NOTHING: int +GLOBAL_SSL: int +GLOBAL_WIN32: int + +def global_init(option: int) -> None: ... +def global_cleanup() -> None: ... + +version: str + +def version_info() -> Tuple[int, str, int, str, int, str, int, str, Tuple[str, ...], Any, int, Any]: ... + +class error(Exception): ... + +class Curl(object): + def close(self) -> None: ... + def setopt(self, option: int, value: Any) -> None: ... + def setopt_string(self, option: int, value: str) -> None: ... + def perform(self) -> None: ... + def perform_rb(self) -> bytes: ... + def perform_rs(self) -> Text: ... + def getinfo(self, info: Any) -> Any: ... + def getinfo_raw(self, info: Any) -> Any: ... + def reset(self) -> None: ... + def unsetopt(self, option: int) -> Any: ... + def pause(self, bitmask: Any) -> Any: ... + def errstr(self) -> str: ... + # TODO(MichalPokorny): wat? + USERPWD: int + +class CurlMulti(object): + def close(self) -> None: ... + def add_handle(self, obj: Curl) -> None: ... + def remove_handle(self, obj: Curl) -> None: ... + def perform(self) -> Tuple[Any, int]: ... + def fdset(self) -> Tuple[List[Any], List[Any], List[Any]]: ... + def select(self, timeout: float = ...) -> int: ... + def info_read(self, max_objects: int = ...) -> Tuple[int, List[Any], List[Any]]: ... + def socket_action(self, sockfd: int, ev_bitmask: int) -> Tuple[int, int]: ... + +class CurlShare(object): + def close(self) -> None: ... + def setopt(self, option: int, value: Any) -> Any: ... + +ACCEPTTIMEOUT_MS: int +ACCEPT_ENCODING: int +ADDRESS_SCOPE: int +APPCONNECT_TIME: int +APPEND: int +AUTOREFERER: int +BUFFERSIZE: int +CAINFO: int +CAPATH: int +CLOSESOCKETFUNCTION: int +COMPILE_LIBCURL_VERSION_NUM: int +COMPILE_PY_VERSION_HEX: int +CONDITION_UNMET: int +CONNECTTIMEOUT: int +CONNECTTIMEOUT_MS: int +CONNECT_ONLY: int +CONNECT_TIME: int +CONNECT_TO: int +CONTENT_LENGTH_DOWNLOAD: int +CONTENT_LENGTH_UPLOAD: int +CONTENT_TYPE: int +COOKIE: int +COOKIEFILE: int +COOKIEJAR: int +COOKIELIST: int +COOKIESESSION: int +COPYPOSTFIELDS: int +CRLF: int +CRLFILE: int +CSELECT_ERR: int +CSELECT_IN: int +CSELECT_OUT: int +CURL_HTTP_VERSION_1_0: int +CURL_HTTP_VERSION_1_1: int +CURL_HTTP_VERSION_2: int +CURL_HTTP_VERSION_2_0: int +CURL_HTTP_VERSION_LAST: int +CURL_HTTP_VERSION_NONE: int +CUSTOMREQUEST: int +DEBUGFUNCTION: int +DEFAULT_PROTOCOL: int +DIRLISTONLY: int +DNS_CACHE_TIMEOUT: int +DNS_SERVERS: int +DNS_USE_GLOBAL_CACHE: int +EFFECTIVE_URL: int +EGDSOCKET: int +ENCODING: int +EXPECT_100_TIMEOUT_MS: int +FAILONERROR: int +FILE: int +FOLLOWLOCATION: int +FORBID_REUSE: int +FORM_BUFFER: int +FORM_BUFFERPTR: int +FORM_CONTENTS: int +FORM_CONTENTTYPE: int +FORM_FILE: int +FORM_FILENAME: int +FRESH_CONNECT: int +FTPAPPEND: int +FTPAUTH_DEFAULT: int +FTPAUTH_SSL: int +FTPAUTH_TLS: int +FTPLISTONLY: int +FTPMETHOD_DEFAULT: int +FTPMETHOD_MULTICWD: int +FTPMETHOD_NOCWD: int +FTPMETHOD_SINGLECWD: int +FTPPORT: int +FTPSSLAUTH: int +FTPSSL_ALL: int +FTPSSL_CONTROL: int +FTPSSL_NONE: int +FTPSSL_TRY: int +FTP_ACCOUNT: int +FTP_ALTERNATIVE_TO_USER: int +FTP_CREATE_MISSING_DIRS: int +FTP_ENTRY_PATH: int +FTP_FILEMETHOD: int +FTP_RESPONSE_TIMEOUT: int +FTP_SKIP_PASV_IP: int +FTP_SSL: int +FTP_SSL_CCC: int +FTP_USE_EPRT: int +FTP_USE_EPSV: int +FTP_USE_PRET: int +GSSAPI_DELEGATION: int +GSSAPI_DELEGATION_FLAG: int +GSSAPI_DELEGATION_NONE: int +GSSAPI_DELEGATION_POLICY_FLAG: int +HEADER: int +HEADERFUNCTION: int +HEADEROPT: int +HEADER_SEPARATE: int +HEADER_SIZE: int +HEADER_UNIFIED: int +HTTP200ALIASES: int +HTTPAUTH: int +HTTPAUTH_ANY: int +HTTPAUTH_ANYSAFE: int +HTTPAUTH_AVAIL: int +HTTPAUTH_BASIC: int +HTTPAUTH_DIGEST: int +HTTPAUTH_DIGEST_IE: int +HTTPAUTH_GSSNEGOTIATE: int +HTTPAUTH_NEGOTIATE: int +HTTPAUTH_NONE: int +HTTPAUTH_NTLM: int +HTTPAUTH_NTLM_WB: int +HTTPAUTH_ONLY: int +HTTPGET: int +HTTPHEADER: int +HTTPPOST: int +HTTPPROXYTUNNEL: int +HTTP_CODE: int +HTTP_CONNECTCODE: int +HTTP_CONTENT_DECODING: int +HTTP_TRANSFER_DECODING: int +HTTP_VERSION: int +IGNORE_CONTENT_LENGTH: int +INFILE: int +INFILESIZE: int +INFILESIZE_LARGE: int +INFOTYPE_DATA_IN: int +INFOTYPE_DATA_OUT: int +INFOTYPE_HEADER_IN: int +INFOTYPE_HEADER_OUT: int +INFOTYPE_SSL_DATA_IN: int +INFOTYPE_SSL_DATA_OUT: int +INFOTYPE_TEXT: int +INFO_CERTINFO: int +INFO_COOKIELIST: int +INFO_FILETIME: int +INFO_HTTP_VERSION: int +INFO_RTSP_CLIENT_CSEQ: int +INFO_RTSP_CSEQ_RECV: int +INFO_RTSP_SERVER_CSEQ: int +INFO_RTSP_SESSION_ID: int +INTERFACE: int +IOCMD_NOP: int +IOCMD_RESTARTREAD: int +IOCTLFUNCTION: int +IOE_FAILRESTART: int +IOE_OK: int +IOE_UNKNOWNCMD: int +IPRESOLVE: int +IPRESOLVE_V4: int +IPRESOLVE_V6: int +IPRESOLVE_WHATEVER: int +ISSUERCERT: int +KEYPASSWD: int +KHMATCH_MISMATCH: int +KHMATCH_MISSING: int +KHMATCH_OK: int +KHSTAT_DEFER: int +KHSTAT_FINE: int +KHSTAT_FINE_ADD_TO_FILE: int +KHSTAT_REJECT: int +KHTYPE_DSS: int +KHTYPE_RSA: int +KHTYPE_RSA1: int +KHTYPE_UNKNOWN: int +KRB4LEVEL: int +KRBLEVEL: int +LASTSOCKET: int +LOCALPORT: int +LOCALPORTRANGE: int +LOCAL_IP: int +LOCAL_PORT: int +LOCK_DATA_COOKIE: int +LOCK_DATA_DNS: int +LOCK_DATA_SSL_SESSION: int +LOGIN_OPTIONS: int +LOW_SPEED_LIMIT: int +LOW_SPEED_TIME: int +MAIL_AUTH: int +MAIL_FROM: int +MAIL_RCPT: int +MAXCONNECTS: int +MAXFILESIZE: int +MAXFILESIZE_LARGE: int +MAXREDIRS: int +MAX_RECV_SPEED_LARGE: int +MAX_SEND_SPEED_LARGE: int +M_CHUNK_LENGTH_PENALTY_SIZE: int +M_CONTENT_LENGTH_PENALTY_SIZE: int +M_MAXCONNECTS: int +M_MAX_HOST_CONNECTIONS: int +M_MAX_PIPELINE_LENGTH: int +M_MAX_TOTAL_CONNECTIONS: int +M_PIPELINING: int +M_PIPELINING_SERVER_BL: int +M_PIPELINING_SITE_BL: int +M_SOCKETFUNCTION: int +M_TIMERFUNCTION: int +NAMELOOKUP_TIME: int +NETRC: int +NETRC_FILE: int +NETRC_IGNORED: int +NETRC_OPTIONAL: int +NETRC_REQUIRED: int +NEW_DIRECTORY_PERMS: int +NEW_FILE_PERMS: int +NOBODY: int +NOPROGRESS: int +NOPROXY: int +NOSIGNAL: int +NUM_CONNECTS: int +OPENSOCKETFUNCTION: int +OPT_CERTINFO: int +OPT_COOKIELIST: int +OPT_FILETIME: int +OPT_RTSP_CLIENT_CSEQ: int +OPT_RTSP_REQUEST: int +OPT_RTSP_SERVER_CSEQ: int +OPT_RTSP_SESSION_ID: int +OPT_RTSP_STREAM_URI: int +OPT_RTSP_TRANSPORT: int +OS_ERRNO: int +PASSWORD: int +PATH_AS_IS: int +PAUSE_ALL: int +PAUSE_CONT: int +PAUSE_RECV: int +PAUSE_SEND: int +PINNEDPUBLICKEY: int +PIPEWAIT: int +PIPE_MULTIPLEX: int +PIPE_NOTHING: int +POLL_IN: int +POLL_INOUT: int +POLL_NONE: int +POLL_OUT: int +POLL_REMOVE: int +PORT: int +POST: int +POST301: int +POSTFIELDS: int +POSTFIELDSIZE: int +POSTFIELDSIZE_LARGE: int +POSTQUOTE: int +POSTREDIR: int +PREQUOTE: int +PRETRANSFER_TIME: int +PRE_PROXY: int +PRIMARY_IP: int +PRIMARY_PORT: int +PROGRESSFUNCTION: int +PROTOCOLS: int +PROTO_ALL: int +PROTO_DICT: int +PROTO_FILE: int +PROTO_FTP: int +PROTO_FTPS: int +PROTO_GOPHER: int +PROTO_HTTP: int +PROTO_HTTPS: int +PROTO_IMAP: int +PROTO_IMAPS: int +PROTO_LDAP: int +PROTO_LDAPS: int +PROTO_POP3: int +PROTO_POP3S: int +PROTO_RTMP: int +PROTO_RTMPE: int +PROTO_RTMPS: int +PROTO_RTMPT: int +PROTO_RTMPTE: int +PROTO_RTMPTS: int +PROTO_RTSP: int +PROTO_SCP: int +PROTO_SFTP: int +PROTO_SMB: int +PROTO_SMBS: int +PROTO_SMTP: int +PROTO_SMTPS: int +PROTO_TELNET: int +PROTO_TFTP: int +PROXY: int +PROXYAUTH: int +PROXYAUTH_AVAIL: int +PROXYHEADER: int +PROXYPASSWORD: int +PROXYPORT: int +PROXYTYPE: int +PROXYTYPE_HTTP: int +PROXYTYPE_HTTP_1_0: int +PROXYTYPE_SOCKS4: int +PROXYTYPE_SOCKS4A: int +PROXYTYPE_SOCKS5: int +PROXYTYPE_SOCKS5_HOSTNAME: int +PROXYUSERNAME: int +PROXYUSERPWD: int +PROXY_CAINFO: int +PROXY_CAPATH: int +PROXY_SERVICE_NAME: int +PROXY_SSLCERT: int +PROXY_SSLCERTTYPE: int +PROXY_SSLKEY: int +PROXY_SSLKEYTYPE: int +PROXY_SSL_VERIFYHOST: int +PROXY_SSL_VERIFYPEER: int +PROXY_TRANSFER_MODE: int +PUT: int +QUOTE: int +RANDOM_FILE: int +RANGE: int +READDATA: int +READFUNCTION: int +READFUNC_ABORT: int +READFUNC_PAUSE: int +REDIRECT_COUNT: int +REDIRECT_TIME: int +REDIRECT_URL: int +REDIR_POST_301: int +REDIR_POST_302: int +REDIR_POST_303: int +REDIR_POST_ALL: int +REDIR_PROTOCOLS: int +REFERER: int +REQUEST_SIZE: int +RESOLVE: int +RESPONSE_CODE: int +RESUME_FROM: int +RESUME_FROM_LARGE: int +RTSPREQ_ANNOUNCE: int +RTSPREQ_DESCRIBE: int +RTSPREQ_GET_PARAMETER: int +RTSPREQ_LAST: int +RTSPREQ_NONE: int +RTSPREQ_OPTIONS: int +RTSPREQ_PAUSE: int +RTSPREQ_PLAY: int +RTSPREQ_RECEIVE: int +RTSPREQ_RECORD: int +RTSPREQ_SETUP: int +RTSPREQ_SET_PARAMETER: int +RTSPREQ_TEARDOWN: int +SASL_IR: int +SEEKFUNCTION: int +SEEKFUNC_CANTSEEK: int +SEEKFUNC_FAIL: int +SEEKFUNC_OK: int +SERVICE_NAME: int +SHARE: int +SH_SHARE: int +SH_UNSHARE: int +SIZE_DOWNLOAD: int +SIZE_UPLOAD: int +SOCKET_BAD: int +SOCKET_TIMEOUT: int +SOCKOPTFUNCTION: int +SOCKOPT_ALREADY_CONNECTED: int +SOCKOPT_ERROR: int +SOCKOPT_OK: int +SOCKS5_GSSAPI_NEC: int +SOCKS5_GSSAPI_SERVICE: int +SOCKTYPE_ACCEPT: int +SOCKTYPE_IPCXN: int +SPEED_DOWNLOAD: int +SPEED_UPLOAD: int +SSH_AUTH_AGENT: int +SSH_AUTH_ANY: int +SSH_AUTH_DEFAULT: int +SSH_AUTH_HOST: int +SSH_AUTH_KEYBOARD: int +SSH_AUTH_NONE: int +SSH_AUTH_PASSWORD: int +SSH_AUTH_PUBLICKEY: int +SSH_AUTH_TYPES: int +SSH_HOST_PUBLIC_KEY_MD5: int +SSH_KEYFUNCTION: int +SSH_KNOWNHOSTS: int +SSH_PRIVATE_KEYFILE: int +SSH_PUBLIC_KEYFILE: int +SSLCERT: int +SSLCERTPASSWD: int +SSLCERTTYPE: int +SSLENGINE: int +SSLENGINE_DEFAULT: int +SSLKEY: int +SSLKEYPASSWD: int +SSLKEYTYPE: int +SSLOPT_ALLOW_BEAST: int +SSLOPT_NO_REVOKE: int +SSLVERSION: int +SSLVERSION_DEFAULT: int +SSLVERSION_SSLv2: int +SSLVERSION_SSLv3: int +SSLVERSION_TLSv1: int +SSLVERSION_TLSv1_0: int +SSLVERSION_TLSv1_1: int +SSLVERSION_TLSv1_2: int +SSLVERSION_MAX_DEFAULT: int +SSL_CIPHER_LIST: int +SSL_ENABLE_ALPN: int +SSL_ENABLE_NPN: int +SSL_ENGINES: int +SSL_FALSESTART: int +SSL_OPTIONS: int +SSL_SESSIONID_CACHE: int +SSL_VERIFYHOST: int +SSL_VERIFYPEER: int +SSL_VERIFYRESULT: int +SSL_VERIFYSTATUS: int +STARTTRANSFER_TIME: int +STDERR: int +TCP_FASTOPEN: int +TCP_KEEPALIVE: int +TCP_KEEPIDLE: int +TCP_KEEPINTVL: int +TCP_NODELAY: int +TELNETOPTIONS: int +TFTP_BLKSIZE: int +TIMECONDITION: int +TIMECONDITION_IFMODSINCE: int +TIMECONDITION_IFUNMODSINCE: int +TIMECONDITION_LASTMOD: int +TIMECONDITION_NONE: int +TIMEOUT: int +TIMEOUT_MS: int +TIMEVALUE: int +TLSAUTH_PASSWORD: int +TLSAUTH_TYPE: int +TLSAUTH_USERNAME: int +TOTAL_TIME: int +TRANSFERTEXT: int +TRANSFER_ENCODING: int +UNIX_SOCKET_PATH: int +UNRESTRICTED_AUTH: int +UPLOAD: int +URL: int +USERAGENT: int +USERNAME: int +USERPWD: int +USESSL_ALL: int +USESSL_CONTROL: int +USESSL_NONE: int +USESSL_TRY: int +USE_SSL: int +VERBOSE: int +VERSION_ASYNCHDNS: int +VERSION_CONV: int +VERSION_CURLDEBUG: int +VERSION_DEBUG: int +VERSION_FIRST: int +VERSION_GSSAPI: int +VERSION_GSSNEGOTIATE: int +VERSION_HTTP2: int +VERSION_IDN: int +VERSION_IPV6: int +VERSION_KERBEROS4: int +VERSION_KERBEROS5: int +VERSION_LARGEFILE: int +VERSION_LIBZ: int +VERSION_NTLM: int +VERSION_NTLM_WB: int +VERSION_PSL: int +VERSION_SPNEGO: int +VERSION_SSL: int +VERSION_SSPI: int +VERSION_TLSAUTH_SRP: int +VERSION_UNIX_SOCKETS: int +WILDCARDMATCH: int +WRITEDATA: int +WRITEFUNCTION: int +WRITEFUNC_PAUSE: int +WRITEHEADER: int +XFERINFOFUNCTION: int +XOAUTH2_BEARER: int + +E_ABORTED_BY_CALLBACK: int +E_AGAIN: int +E_ALREADY_COMPLETE: int +E_BAD_CALLING_ORDER: int +E_BAD_CONTENT_ENCODING: int +E_BAD_DOWNLOAD_RESUME: int +E_BAD_FUNCTION_ARGUMENT: int +E_BAD_PASSWORD_ENTERED: int +E_CALL_MULTI_PERFORM: int +E_CHUNK_FAILED: int +E_CONV_FAILED: int +E_CONV_REQD: int +E_COULDNT_CONNECT: int +E_COULDNT_RESOLVE_HOST: int +E_COULDNT_RESOLVE_PROXY: int +E_FAILED_INIT: int +E_FILESIZE_EXCEEDED: int +E_FILE_COULDNT_READ_FILE: int +E_FTP_ACCEPT_FAILED: int +E_FTP_ACCEPT_TIMEOUT: int +E_FTP_ACCESS_DENIED: int +E_FTP_BAD_DOWNLOAD_RESUME: int +E_FTP_BAD_FILE_LIST: int +E_FTP_CANT_GET_HOST: int +E_FTP_CANT_RECONNECT: int +E_FTP_COULDNT_GET_SIZE: int +E_FTP_COULDNT_RETR_FILE: int +E_FTP_COULDNT_SET_ASCII: int +E_FTP_COULDNT_SET_BINARY: int +E_FTP_COULDNT_SET_TYPE: int +E_FTP_COULDNT_STOR_FILE: int +E_FTP_COULDNT_USE_REST: int +E_FTP_PARTIAL_FILE: int +E_FTP_PORT_FAILED: int +E_FTP_PRET_FAILED: int +E_FTP_QUOTE_ERROR: int +E_FTP_SSL_FAILED: int +E_FTP_USER_PASSWORD_INCORRECT: int +E_FTP_WEIRD_227_FORMAT: int +E_FTP_WEIRD_PASS_REPLY: int +E_FTP_WEIRD_PASV_REPLY: int +E_FTP_WEIRD_SERVER_REPLY: int +E_FTP_WEIRD_USER_REPLY: int +E_FTP_WRITE_ERROR: int +E_FUNCTION_NOT_FOUND: int +E_GOT_NOTHING: int +E_HTTP2: int +E_HTTP_NOT_FOUND: int +E_HTTP_PORT_FAILED: int +E_HTTP_POST_ERROR: int +E_HTTP_RANGE_ERROR: int +E_HTTP_RETURNED_ERROR: int +E_INTERFACE_FAILED: int +E_LDAP_CANNOT_BIND: int +E_LDAP_INVALID_URL: int +E_LDAP_SEARCH_FAILED: int +E_LIBRARY_NOT_FOUND: int +E_LOGIN_DENIED: int +E_MALFORMAT_USER: int +E_MULTI_ADDED_ALREADY: int +E_MULTI_BAD_EASY_HANDLE: int +E_MULTI_BAD_HANDLE: int +E_MULTI_BAD_SOCKET: int +E_MULTI_CALL_MULTI_PERFORM: int +E_MULTI_CALL_MULTI_SOCKET: int +E_MULTI_INTERNAL_ERROR: int +E_MULTI_OK: int +E_MULTI_OUT_OF_MEMORY: int +E_MULTI_UNKNOWN_OPTION: int +E_NOT_BUILT_IN: int +E_NO_CONNECTION_AVAILABLE: int +E_OK: int +E_OPERATION_TIMEDOUT: int +E_OPERATION_TIMEOUTED: int +E_OUT_OF_MEMORY: int +E_PARTIAL_FILE: int +E_PEER_FAILED_VERIFICATION: int +E_QUOTE_ERROR: int +E_RANGE_ERROR: int +E_READ_ERROR: int +E_RECV_ERROR: int +E_REMOTE_ACCESS_DENIED: int +E_REMOTE_DISK_FULL: int +E_REMOTE_FILE_EXISTS: int +E_REMOTE_FILE_NOT_FOUND: int +E_RTSP_CSEQ_ERROR: int +E_RTSP_SESSION_ERROR: int +E_SEND_ERROR: int +E_SEND_FAIL_REWIND: int +E_SHARE_IN_USE: int +E_SSH: int +E_SSL_CACERT: int +E_SSL_CACERT_BADFILE: int +E_SSL_CERTPROBLEM: int +E_SSL_CIPHER: int +E_SSL_CONNECT_ERROR: int +E_SSL_CRL_BADFILE: int +E_SSL_ENGINE_INITFAILED: int +E_SSL_ENGINE_NOTFOUND: int +E_SSL_ENGINE_SETFAILED: int +E_SSL_INVALIDCERTSTATUS: int +E_SSL_ISSUER_ERROR: int +E_SSL_PEER_CERTIFICATE: int +E_SSL_PINNEDPUBKEYNOTMATCH: int +E_SSL_SHUTDOWN_FAILED: int +E_TELNET_OPTION_SYNTAX: int +E_TFTP_DISKFULL: int +E_TFTP_EXISTS: int +E_TFTP_ILLEGAL: int +E_TFTP_NOSUCHUSER: int +E_TFTP_NOTFOUND: int +E_TFTP_PERM: int +E_TFTP_UNKNOWNID: int +E_TOO_MANY_REDIRECTS: int +E_UNKNOWN_OPTION: int +E_UNKNOWN_TELNET_OPTION: int +E_UNSUPPORTED_PROTOCOL: int +E_UPLOAD_FAILED: int +E_URL_MALFORMAT: int +E_URL_MALFORMAT_USER: int +E_USE_SSL_FAILED: int +E_WRITE_ERROR: int diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyre_extensions.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyre_extensions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..742b880132561e7631ab643accc54c07a61924cc --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/pyre_extensions.pyi @@ -0,0 +1,7 @@ +from typing import Any, List, Optional, Type, TypeVar + +_T = TypeVar("_T") + +def none_throws(optional: Optional[_T], message: str = ...) -> _T: ... +def safe_cast(new_type: Type[_T], value: Any) -> _T: ... +def ParameterSpecification(__name: str) -> List[Type[Any]]: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/singledispatch.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/singledispatch.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f264047ac201ae50561b16c10519014636caec1b --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/singledispatch.pyi @@ -0,0 +1,15 @@ +from typing import Any, Callable, Generic, Mapping, TypeVar, overload + +_T = TypeVar("_T") + +class _SingleDispatchCallable(Generic[_T]): + registry: Mapping[Any, Callable[..., _T]] + def dispatch(self, cls: Any) -> Callable[..., _T]: ... + @overload + def register(self, cls: Any) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... + @overload + def register(self, cls: Any, func: Callable[..., _T]) -> Callable[..., _T]: ... + def _clear_cache(self) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> _T: ... + +def singledispatch(func: Callable[..., _T]) -> _SingleDispatchCallable[_T]: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/tabulate.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/tabulate.pyi new file mode 100644 index 0000000000000000000000000000000000000000..dc935400e6c5542bde02ddc2a30d9d9d1592a847 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/tabulate.pyi @@ -0,0 +1,43 @@ +from typing import Any, Callable, Container, Dict, Iterable, List, Mapping, NamedTuple, Optional, Sequence, Union + +PRESERVE_WHITESPACE: bool +WIDE_CHARS_MODE: bool +tabulate_formats: List[str] + +class Line(NamedTuple): + begin: str + hline: str + sep: str + end: str + +class DataRow(NamedTuple): + begin: str + sep: str + end: str + +_TableFormatLine = Union[None, Line, Callable[[List[int], List[str]], str]] +_TableFormatRow = Union[None, DataRow, Callable[[List[Any], List[int], List[str]], str]] + +class TableFormat(NamedTuple): + lineabove: _TableFormatLine + linebelowheader: _TableFormatLine + linebetweenrows: _TableFormatLine + linebelow: _TableFormatLine + headerrow: _TableFormatRow + datarow: _TableFormatRow + padding: int + with_header_hide: Optional[Container[str]] + +def simple_separated_format(separator: str) -> TableFormat: ... +def tabulate( + tabular_data: Union[Mapping[str, Iterable[Any]], Iterable[Iterable[Any]]], + headers: Union[str, Dict[str, str], Sequence[str]] = ..., + tablefmt: Union[str, TableFormat] = ..., + floatfmt: Union[str, Iterable[str]] = ..., + numalign: Optional[str] = ..., + stralign: Optional[str] = ..., + missingval: Union[str, Iterable[str]] = ..., + showindex: Union[str, bool, Iterable[Any]] = ..., + disable_numparse: Union[bool, Iterable[int]] = ..., + colalign: Optional[Iterable[Optional[str]]] = ..., +) -> str: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/termcolor.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/termcolor.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b890dcd1b82c3972fcbc95dbc108c74a501b819a --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/termcolor.pyi @@ -0,0 +1,8 @@ +from typing import Any, Iterable, Optional, Text + +def colored( + text: Text, color: Optional[Text] = ..., on_color: Optional[Text] = ..., attrs: Optional[Iterable[Text]] = ... +) -> Text: ... +def cprint( + text: Text, color: Optional[Text] = ..., on_color: Optional[Text] = ..., attrs: Optional[Iterable[Text]] = ..., **kwargs: Any +) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/toml.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/toml.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e19bc775f423e9766e11d353cbbd4e8340da50c0 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/toml.pyi @@ -0,0 +1,19 @@ +import sys +from _typeshed import StrPath, SupportsWrite +from typing import IO, Any, List, Mapping, MutableMapping, Text, Type, Union + +if sys.version_info >= (3, 6): + _PathLike = StrPath +elif sys.version_info >= (3, 4): + import pathlib + + _PathLike = Union[StrPath, pathlib.PurePath] +else: + _PathLike = StrPath + +class TomlDecodeError(Exception): ... + +def load(f: Union[_PathLike, List[Text], IO[str]], _dict: Type[MutableMapping[str, Any]] = ...) -> MutableMapping[str, Any]: ... +def loads(s: Text, _dict: Type[MutableMapping[str, Any]] = ...) -> MutableMapping[str, Any]: ... +def dump(o: Mapping[str, Any], f: SupportsWrite[str]) -> str: ... +def dumps(o: Mapping[str, Any]) -> str: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/typing_extensions.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/typing_extensions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..aec0f9440a8c5a85a62be736a7fc9abbc5d54057 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/typing_extensions.pyi @@ -0,0 +1,114 @@ +import abc +import sys +from typing import ( + TYPE_CHECKING as TYPE_CHECKING, + Any, + Callable, + ClassVar as ClassVar, + ContextManager as ContextManager, + Counter as Counter, + DefaultDict as DefaultDict, + Deque as Deque, + Dict, + ItemsView, + KeysView, + Mapping, + NewType as NewType, + NoReturn as NoReturn, + Optional, + Text as Text, + Tuple, + Type as Type, + TypeVar, + ValuesView, + overload as overload, +) + +_T = TypeVar("_T") +_F = TypeVar("_F", bound=Callable[..., Any]) +_TC = TypeVar("_TC", bound=Type[object]) + +class _SpecialForm: + def __getitem__(self, typeargs: Any) -> Any: ... + +def runtime_checkable(cls: _TC) -> _TC: ... + +# This alias for above is kept here for backwards compatibility. +runtime = runtime_checkable +Protocol: _SpecialForm = ... +Final: _SpecialForm = ... + +def final(f: _F) -> _F: ... + +Literal: _SpecialForm = ... + +def IntVar(__name: str) -> Any: ... # returns a new TypeVar + +# Internal mypy fallback type for all typed dicts (does not exist at runtime) +class _TypedDict(Mapping[str, object], metaclass=abc.ABCMeta): + def copy(self: _T) -> _T: ... + # Using NoReturn so that only calls using mypy plugin hook that specialize the signature + # can go through. + def setdefault(self, k: NoReturn, default: object) -> object: ... + # Mypy plugin hook for 'pop' expects that 'default' has a type variable type. + def pop(self, k: NoReturn, default: _T = ...) -> object: ... + def update(self: _T, __m: _T) -> None: ... + if sys.version_info < (3, 0): + def has_key(self, k: str) -> bool: ... + def viewitems(self) -> ItemsView[str, object]: ... + def viewkeys(self) -> KeysView[str]: ... + def viewvalues(self) -> ValuesView[object]: ... + else: + def items(self) -> ItemsView[str, object]: ... + def keys(self) -> KeysView[str]: ... + def values(self) -> ValuesView[object]: ... + def __delitem__(self, k: NoReturn) -> None: ... + +# TypedDict is a (non-subscriptable) special form. +TypedDict: object = ... + +if sys.version_info >= (3, 3): + from typing import ChainMap as ChainMap + +if sys.version_info >= (3, 5): + from typing import ( + AsyncContextManager as AsyncContextManager, + AsyncIterable as AsyncIterable, + AsyncIterator as AsyncIterator, + Awaitable as Awaitable, + Coroutine as Coroutine, + ) + +if sys.version_info >= (3, 6): + from typing import AsyncGenerator as AsyncGenerator + +def get_type_hints( + obj: Callable[..., Any], + globalns: Optional[Dict[str, Any]] = ..., + localns: Optional[Dict[str, Any]] = ..., + include_extras: bool = ..., +) -> Dict[str, Any]: ... + +if sys.version_info >= (3, 7): + def get_args(tp: Any) -> Tuple[Any, ...]: ... + def get_origin(tp: Any) -> Optional[Any]: ... + +Annotated: _SpecialForm = ... +_AnnotatedAlias: Any = ... # undocumented + +# TypeAlias is a (non-subscriptable) special form. +class TypeAlias: ... + +@runtime_checkable +class SupportsIndex(Protocol, metaclass=abc.ABCMeta): + @abc.abstractmethod + def __index__(self) -> int: ... + +# PEP 612 support for Python < 3.9 +if sys.version_info >= (3, 10): + from typing import Concatenate as Concatenate, ParamSpec as ParamSpec +else: + class ParamSpec: + __name__: str + def __init__(self, name: str) -> None: ... + Concatenate: _SpecialForm = ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/ujson.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/ujson.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f96d4754b9da8296c44effd4d2b60a5c9d81762b --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/2and3/ujson.pyi @@ -0,0 +1,35 @@ +from typing import IO, Any, AnyStr + +__version__: str + +def encode( + obj: Any, + ensure_ascii: bool = ..., + double_precision: int = ..., + encode_html_chars: bool = ..., + escape_forward_slashes: bool = ..., + sort_keys: bool = ..., + indent: int = ..., +) -> str: ... +def dumps( + obj: Any, + ensure_ascii: bool = ..., + double_precision: int = ..., + encode_html_chars: bool = ..., + escape_forward_slashes: bool = ..., + sort_keys: bool = ..., + indent: int = ..., +) -> str: ... +def dump( + obj: Any, + fp: IO[str], + ensure_ascii: bool = ..., + double_precision: int = ..., + encode_html_chars: bool = ..., + escape_forward_slashes: bool = ..., + sort_keys: bool = ..., + indent: int = ..., +) -> None: ... +def decode(s: AnyStr, precise_float: bool = ...) -> Any: ... +def loads(s: AnyStr, precise_float: bool = ...) -> Any: ... +def load(fp: IO[AnyStr], precise_float: bool = ...) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2bbbc29a428f2abe79a347256ef0aeaad2ce54d8 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/__init__.pyi @@ -0,0 +1 @@ +from .threadpool import open as open diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/base.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/base.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0f5f99a2e467d7f0aea612292709e51989b12383 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/base.pyi @@ -0,0 +1,35 @@ +from types import CodeType, FrameType, TracebackType, coroutine +from typing import Any, Coroutine, Generator, Generic, Iterator, Optional, Type, TypeVar, Union + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_V_co = TypeVar("_V_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) + +class AsyncBase(Generic[_T]): + def __init__(self, file: str, loop: Any, executor: Any) -> None: ... + async def __aiter__(self) -> Iterator[_T]: ... + async def __anext__(self) -> _T: ... + +class AiofilesContextManager(Generic[_T_co, _T_contra, _V_co]): + def __init__(self, coro: Coroutine[_T_co, _T_contra, _V_co]) -> None: ... + def send(self, value: _T_contra) -> _T_co: ... + def throw( + self, typ: Type[BaseException], val: Union[BaseException, object] = ..., tb: Optional[TracebackType] = ... + ) -> _T_co: ... + def close(self) -> None: ... + @property + def gi_frame(self) -> FrameType: ... + @property + def gi_running(self) -> bool: ... + @property + def gi_code(self) -> CodeType: ... + def __next__(self) -> _T_co: ... + @coroutine + def __iter__(self) -> Iterator[Coroutine[_T_co, _T_contra, _V_co]]: ... + def __await__(self) -> Generator[Any, None, _V_co]: ... + async def __anext__(self) -> _V_co: ... + async def __aenter__(self) -> _V_co: ... + async def __aexit__( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType] + ) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/os.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/os.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5144abe98cb42fe6eba2d592e2c23f5de900650f --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/os.pyi @@ -0,0 +1,26 @@ +import sys +from _typeshed import AnyPath +from os import stat_result +from typing import Optional, Sequence, Union, overload + +_FdOrAnyPath = Union[int, AnyPath] + +async def stat(path: _FdOrAnyPath, *, dir_fd: Optional[int] = ..., follow_symlinks: bool = ...) -> stat_result: ... +async def rename(src: AnyPath, dst: AnyPath, *, src_dir_fd: Optional[int] = ..., dst_dir_fd: Optional[int] = ...) -> None: ... +async def remove(path: AnyPath, *, dir_fd: Optional[int] = ...) -> None: ... +async def mkdir(path: AnyPath, mode: int = ..., *, dir_fd: Optional[int] = ...) -> None: ... +async def rmdir(path: AnyPath, *, dir_fd: Optional[int] = ...) -> None: ... + +if sys.platform != "win32": + @overload + async def sendfile(__out_fd: int, __in_fd: int, offset: Optional[int], count: int) -> int: ... + @overload + async def sendfile( + __out_fd: int, + __in_fd: int, + offset: int, + count: int, + headers: Sequence[bytes] = ..., + trailers: Sequence[bytes] = ..., + flags: int = ..., + ) -> int: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bde6ad8a36ce16bc0d4cf6aaf70c6d6f388d7ab2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/__init__.pyi @@ -0,0 +1,91 @@ +from _typeshed import AnyPath, OpenBinaryMode, OpenBinaryModeReading, OpenBinaryModeUpdating, OpenBinaryModeWriting, OpenTextMode +from asyncio import AbstractEventLoop +from typing import Any, Callable, Optional, TypeVar, Union, overload +from typing_extensions import Literal + +from ..base import AiofilesContextManager +from .binary import AsyncBufferedIOBase, AsyncBufferedReader, AsyncFileIO, _UnknownAsyncBinaryIO +from .text import AsyncTextIOWrapper + +_OpenFile = TypeVar("_OpenFile", bound=Union[AnyPath, int]) +_Opener = Callable[[str, int], int] + +# Text mode: always returns AsyncTextIOWrapper +@overload +def open( + file: _OpenFile, + mode: OpenTextMode = ..., + buffering: int = ..., + encoding: Optional[str] = ..., + errors: Optional[str] = ..., + newline: Optional[str] = ..., + closefd: bool = ..., + opener: Optional[_Opener] = ..., + *, + loop: Optional[AbstractEventLoop] = ..., + executor: Optional[Any] = ..., +) -> AiofilesContextManager[None, None, AsyncTextIOWrapper]: ... + +# Unbuffered binary: returns a FileIO +@overload +def open( + file: _OpenFile, + mode: OpenBinaryMode, + buffering: Literal[0], + encoding: None = ..., + errors: None = ..., + newline: None = ..., + closefd: bool = ..., + opener: Optional[_Opener] = ..., + *, + loop: Optional[AbstractEventLoop] = ..., + executor: Optional[Any] = ..., +) -> AiofilesContextManager[None, None, AsyncFileIO]: ... + +# Buffered binary reading/updating: AsyncBufferedReader +@overload +def open( + file: _OpenFile, + mode: Union[OpenBinaryModeReading, OpenBinaryModeUpdating], + buffering: Literal[-1, 1] = ..., + encoding: None = ..., + errors: None = ..., + newline: None = ..., + closefd: bool = ..., + opener: Optional[_Opener] = ..., + *, + loop: Optional[AbstractEventLoop] = ..., + executor: Optional[Any] = ..., +) -> AiofilesContextManager[None, None, AsyncBufferedReader]: ... + +# Buffered binary writing: AsyncBufferedIOBase +@overload +def open( + file: _OpenFile, + mode: OpenBinaryModeWriting, + buffering: Literal[-1, 1] = ..., + encoding: None = ..., + errors: None = ..., + newline: None = ..., + closefd: bool = ..., + opener: Optional[_Opener] = ..., + *, + loop: Optional[AbstractEventLoop] = ..., + executor: Optional[Any] = ..., +) -> AiofilesContextManager[None, None, AsyncBufferedIOBase]: ... + +# Buffering cannot be determined: fall back to _UnknownAsyncBinaryIO +@overload +def open( + file: _OpenFile, + mode: OpenBinaryMode, + buffering: int, + encoding: None = ..., + errors: None = ..., + newline: None = ..., + closefd: bool = ..., + opener: Optional[_Opener] = ..., + *, + loop: Optional[AbstractEventLoop] = ..., + executor: Optional[Any] = ..., +) -> AiofilesContextManager[None, None, _UnknownAsyncBinaryIO]: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/binary.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/binary.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ecfd4ed12f8c56765603e1e95fecc6360be624d1 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/binary.pyi @@ -0,0 +1,41 @@ +from _typeshed import AnyPath, ReadableBuffer, WriteableBuffer +from io import FileIO +from typing import Iterable, List, Optional, Union + +from ..base import AsyncBase + +class _UnknownAsyncBinaryIO(AsyncBase[bytes]): + async def close(self) -> None: ... + async def flush(self) -> None: ... + async def isatty(self) -> bool: ... + async def read(self, __size: int = ...) -> bytes: ... + async def readinto(self, __buffer: WriteableBuffer) -> Optional[int]: ... + async def readline(self, __size: Optional[int] = ...) -> bytes: ... + async def readlines(self, __hint: int = ...) -> List[bytes]: ... + async def seek(self, __offset: int, __whence: int = ...) -> int: ... + async def seekable(self) -> bool: ... + async def tell(self) -> int: ... + async def truncate(self, __size: Optional[int] = ...) -> int: ... + async def writable(self) -> bool: ... + async def write(self, __b: ReadableBuffer) -> int: ... + async def writelines(self, __lines: Iterable[ReadableBuffer]) -> None: ... + def fileno(self) -> int: ... + def readable(self) -> bool: ... + @property + def closed(self) -> bool: ... + @property + def mode(self) -> str: ... + @property + def name(self) -> Union[AnyPath, int]: ... + +class AsyncBufferedIOBase(_UnknownAsyncBinaryIO): + async def read1(self, __size: int = ...) -> bytes: ... + def detach(self) -> FileIO: ... + @property + def raw(self) -> FileIO: ... + +class AsyncBufferedReader(AsyncBufferedIOBase): + async def peek(self, __size: int = ...) -> bytes: ... + +class AsyncFileIO(_UnknownAsyncBinaryIO): + async def readall(self) -> bytes: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/text.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/text.pyi new file mode 100644 index 0000000000000000000000000000000000000000..39588f8a3695f14280462b1181f0645dc3dfe270 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/aiofiles/threadpool/text.pyi @@ -0,0 +1,38 @@ +from _typeshed import AnyPath +from typing import BinaryIO, Iterable, List, Optional, Tuple, Union + +from ..base import AsyncBase + +class AsyncTextIOWrapper(AsyncBase[str]): + async def close(self) -> None: ... + async def flush(self) -> None: ... + async def isatty(self) -> bool: ... + async def read(self, __size: Optional[int] = ...) -> str: ... + async def readline(self, __size: int = ...) -> str: ... + async def readlines(self, __hint: int = ...) -> List[str]: ... + async def seek(self, __offset: int, __whence: int = ...) -> int: ... + async def seekable(self) -> bool: ... + async def tell(self) -> int: ... + async def truncate(self, __size: Optional[int] = ...) -> int: ... + async def writable(self) -> bool: ... + async def write(self, __b: str) -> int: ... + async def writelines(self, __lines: Iterable[str]) -> None: ... + def detach(self) -> BinaryIO: ... + def fileno(self) -> int: ... + def readable(self) -> bool: ... + @property + def buffer(self) -> BinaryIO: ... + @property + def closed(self) -> bool: ... + @property + def encoding(self) -> str: ... + @property + def errors(self) -> Optional[str]: ... + @property + def line_buffering(self) -> bool: ... + @property + def newlines(self) -> Union[str, Tuple[str, ...], None]: ... + @property + def name(self) -> Union[AnyPath, int]: ... + @property + def mode(self) -> str: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/contextvars.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/contextvars.pyi new file mode 100644 index 0000000000000000000000000000000000000000..429d2037afb2bbbc813696f1941f7fcdce6e34ca --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/contextvars.pyi @@ -0,0 +1,38 @@ +import sys +from typing import Any, Callable, ClassVar, Generic, Iterator, Mapping, TypeVar + +if sys.version_info >= (3, 9): + from types import GenericAlias + +_T = TypeVar("_T") + +class ContextVar(Generic[_T]): + def __init__(self, name: str, *, default: _T = ...) -> None: ... + @property + def name(self) -> str: ... + def get(self, default: _T = ...) -> _T: ... + def set(self, value: _T) -> Token[_T]: ... + def reset(self, token: Token[_T]) -> None: ... + if sys.version_info >= (3, 9): + def __class_getitem__(cls, item: Any) -> GenericAlias: ... + +class Token(Generic[_T]): + @property + def var(self) -> ContextVar[_T]: ... + @property + def old_value(self) -> Any: ... # returns either _T or MISSING, but that's hard to express + MISSING: ClassVar[object] + if sys.version_info >= (3, 9): + def __class_getitem__(cls, item: Any) -> GenericAlias: ... + +def copy_context() -> Context: ... + +# It doesn't make sense to make this generic, because for most Contexts each ContextVar will have +# a different value. +class Context(Mapping[ContextVar[Any], Any]): + def __init__(self) -> None: ... + def run(self, callable: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: ... + def copy(self) -> Context: ... + def __getitem__(self, key: ContextVar[Any]) -> Any: ... + def __iter__(self) -> Iterator[ContextVar[Any]]: ... + def __len__(self) -> int: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/dataclasses.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/dataclasses.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1f5b6b4d35b923e3c1a444573cb26623cc820eb0 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/dataclasses.pyi @@ -0,0 +1,95 @@ +import sys +from typing import Any, Callable, Dict, Generic, Iterable, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload + +if sys.version_info >= (3, 9): + from types import GenericAlias + +_T = TypeVar("_T") + +class _MISSING_TYPE: ... + +MISSING: _MISSING_TYPE +@overload +def asdict(obj: Any) -> Dict[str, Any]: ... +@overload +def asdict(obj: Any, *, dict_factory: Callable[[List[Tuple[str, Any]]], _T]) -> _T: ... +@overload +def astuple(obj: Any) -> Tuple[Any, ...]: ... +@overload +def astuple(obj: Any, *, tuple_factory: Callable[[List[Any]], _T]) -> _T: ... +@overload +def dataclass(_cls: Type[_T]) -> Type[_T]: ... +@overload +def dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]: ... +@overload +def dataclass( + *, init: bool = ..., repr: bool = ..., eq: bool = ..., order: bool = ..., unsafe_hash: bool = ..., frozen: bool = ... +) -> Callable[[Type[_T]], Type[_T]]: ... + +class Field(Generic[_T]): + name: str + type: Type[_T] + default: _T + default_factory: Callable[[], _T] + repr: bool + hash: Optional[bool] + init: bool + compare: bool + metadata: Mapping[str, Any] + if sys.version_info >= (3, 9): + def __class_getitem__(cls, item: Any) -> GenericAlias: ... + +# NOTE: Actual return type is 'Field[_T]', but we want to help type checkers +# to understand the magic that happens at runtime. +@overload # `default` and `default_factory` are optional and mutually exclusive. +def field( + *, + default: _T, + init: bool = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + compare: bool = ..., + metadata: Optional[Mapping[str, Any]] = ..., +) -> _T: ... +@overload +def field( + *, + default_factory: Callable[[], _T], + init: bool = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + compare: bool = ..., + metadata: Optional[Mapping[str, Any]] = ..., +) -> _T: ... +@overload +def field( + *, + init: bool = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + compare: bool = ..., + metadata: Optional[Mapping[str, Any]] = ..., +) -> Any: ... +def fields(class_or_instance: Any) -> Tuple[Field[Any], ...]: ... +def is_dataclass(obj: Any) -> bool: ... + +class FrozenInstanceError(AttributeError): ... + +class InitVar(Generic[_T]): + if sys.version_info >= (3, 9): + def __class_getitem__(cls, type: Any) -> GenericAlias: ... + +def make_dataclass( + cls_name: str, + fields: Iterable[Union[str, Tuple[str, type], Tuple[str, type, Field[Any]]]], + *, + bases: Tuple[type, ...] = ..., + namespace: Optional[Dict[str, Any]] = ..., + init: bool = ..., + repr: bool = ..., + eq: bool = ..., + order: bool = ..., + unsafe_hash: bool = ..., + frozen: bool = ..., +) -> type: ... +def replace(obj: _T, **changes: Any) -> _T: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/__init__.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/examples.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/examples.pyi new file mode 100644 index 0000000000000000000000000000000000000000..581ebba7e61e4a3d2473342beab51f70c76384c7 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/examples.pyi @@ -0,0 +1,5 @@ +from typing import Any + +html_parts: Any + +def __getattr__(name: str) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/nodes.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/nodes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..11773347ff8759548a696ad7a3d3d4972c9fcb5e --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/nodes.pyi @@ -0,0 +1,6 @@ +from typing import Any, List + +class reference: + def __init__(self, rawsource: str = ..., text: str = ..., *children: List[Any], **attributes: Any) -> None: ... + +def __getattr__(name: str) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/__init__.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/__init__.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/nodes.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/nodes.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e27843e5338213713e26973127c738c14313ff98 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/nodes.pyi @@ -0,0 +1,3 @@ +from typing import Any + +def __getattr__(name: str) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/roles.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/roles.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2c3d65b68c949b9bbbf70904f096a5c4df00287f --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/roles.pyi @@ -0,0 +1,12 @@ +from typing import Any, Callable, Dict, List, Tuple + +import docutils.nodes +import docutils.parsers.rst.states + +_RoleFn = Callable[ + [str, str, str, int, docutils.parsers.rst.states.Inliner, Dict[str, Any], List[str]], + Tuple[List[docutils.nodes.reference], List[docutils.nodes.reference]], +] + +def register_local_role(name: str, role_fn: _RoleFn) -> None: ... +def __getattr__(name: str) -> Any: ... # incomplete diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/states.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/states.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ac0872693442b8a85e61cf3f6a1cfcd220316563 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/docutils/parsers/rst/states.pyi @@ -0,0 +1,6 @@ +from typing import Any + +class Inliner: + def __init__(self) -> None: ... + +def __getattr__(name: str) -> Any: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/filelock/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/filelock/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..84a648f432e58a49474fde7837280068be2b68a8 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/filelock/__init__.pyi @@ -0,0 +1,54 @@ +import sys +from logging import Logger +from types import TracebackType +from typing import Optional, Type, Union + +def logger() -> Logger: ... + +class Timeout(TimeoutError): + def __init__(self, lock_file: str) -> None: ... + def __str__(self) -> str: ... + +class _Acquire_ReturnProxy: + def __init__(self, lock: str) -> None: ... + def __enter__(self) -> str: ... + def __exit__( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], traceback: Optional[TracebackType] + ) -> None: ... + +class BaseFileLock: + def __init__(self, lock_file: str, timeout: Union[float, int, str] = ...) -> None: ... + @property + def lock_file(self) -> str: ... + @property + def timeout(self) -> float: ... + @timeout.setter + def timeout(self, value: Union[int, str, float]) -> None: ... + @property + def is_locked(self) -> bool: ... + def acquire(self, timeout: Optional[float] = ..., poll_intervall: float = ...) -> _Acquire_ReturnProxy: ... + def release(self, force: bool = ...) -> None: ... + def __enter__(self) -> BaseFileLock: ... + def __exit__( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], traceback: Optional[TracebackType] + ) -> None: ... + def __del__(self) -> None: ... + +class WindowsFileLock(BaseFileLock): + def _acquire(self) -> None: ... + def _release(self) -> None: ... + +class UnixFileLock(BaseFileLock): + def _acquire(self) -> None: ... + def _release(self) -> None: ... + +class SoftFileLock(BaseFileLock): + def _acquire(self) -> None: ... + def _release(self) -> None: ... + +if sys.platform == "win32": + FileLock = WindowsFileLock +elif sys.platform == "linux" or sys.platform == "darwin": + FileLock = UnixFileLock +else: + FileLock = SoftFileLock diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/freezegun/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/freezegun/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1de0916f79df748b8784ee5f4e6a6e561293ef70 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/freezegun/__init__.pyi @@ -0,0 +1 @@ +from .api import freeze_time as freeze_time diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/freezegun/api.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/freezegun/api.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a0d50ed7f49df409cbf19b3874d534f5d178e2c7 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/freezegun/api.pyi @@ -0,0 +1,56 @@ +from datetime import date, datetime, timedelta +from numbers import Real +from typing import Any, Awaitable, Callable, Iterator, Optional, Sequence, Type, TypeVar, Union, overload + +_T = TypeVar("_T") +_Freezable = Union[str, datetime, date, timedelta] + +class TickingDateTimeFactory(object): + def __init__(self, time_to_freeze: datetime, start: datetime) -> None: ... + def __call__(self) -> datetime: ... + +class FrozenDateTimeFactory(object): + def __init__(self, time_to_freeze: datetime) -> None: ... + def __call__(self) -> datetime: ... + def tick(self, delta: Union[float, Real, timedelta] = ...) -> None: ... + def move_to(self, target_datetime: Optional[_Freezable]) -> None: ... + +class StepTickTimeFactory(object): + def __init__(self, time_to_freeze: datetime, step_width: float) -> None: ... + def __call__(self) -> datetime: ... + def tick(self, delta: Optional[timedelta] = ...) -> None: ... + def update_step_width(self, step_width: float) -> None: ... + def move_to(self, target_datetime: Optional[_Freezable]) -> None: ... + +class _freeze_time: + def __init__( + self, + time_to_freeze_str: Optional[_Freezable], + tz_offset: float, + ignore: Sequence[str], + tick: bool, + as_arg: bool, + auto_tick_seconds: float, + ) -> None: ... + @overload + def __call__(self, func: Type[_T]) -> Type[_T]: ... + @overload + def __call__(self, func: Callable[..., Awaitable[_T]]) -> Callable[..., Awaitable[_T]]: ... + @overload + def __call__(self, func: Callable[..., _T]) -> Callable[..., _T]: ... + def __enter__(self) -> Any: ... + def __exit__(self, *args: Any) -> None: ... + def start(self) -> Any: ... + def stop(self) -> None: ... + def decorate_class(self, klass: Type[_T]) -> _T: ... + def decorate_coroutine(self, coroutine: _T) -> _T: ... + def decorate_callable(self, func: Callable[..., _T]) -> Callable[..., _T]: ... + +def freeze_time( + time_to_freeze: Optional[Union[_Freezable, Callable[..., _Freezable], Iterator[_Freezable]]] = ..., + tz_offset: Optional[float] = ..., + ignore: Optional[Sequence[str]] = ..., + tick: Optional[bool] = ..., + as_arg: Optional[bool] = ..., + auto_tick_seconds: Optional[float] = ..., +) -> _freeze_time: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/frozendict.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/frozendict.pyi new file mode 100644 index 0000000000000000000000000000000000000000..704a44c135c8ae6b5226540250de750dc1cd66b2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/frozendict.pyi @@ -0,0 +1,27 @@ +import collections +from typing import Any, Dict, Generic, Iterable, Iterator, Mapping, Tuple, Type, TypeVar, overload + +_S = TypeVar("_S") +_KT = TypeVar("_KT") +_VT = TypeVar("_VT") + +class frozendict(Mapping[_KT, _VT], Generic[_KT, _VT]): + + dict_cls: Type[Dict] = ... + @overload + def __init__(self, **kwargs: _VT) -> None: ... + @overload + def __init__(self, mapping: Mapping[_KT, _VT]) -> None: ... + @overload + def __init__(self, iterable: Iterable[Tuple[_KT, _VT]]) -> None: ... + def __getitem__(self, key: _KT) -> _VT: ... + def __contains__(self, key: object) -> bool: ... + def copy(self: _S, **add_or_replace: _VT) -> _S: ... + def __iter__(self) -> Iterator[_KT]: ... + def __len__(self) -> int: ... + def __repr__(self) -> str: ... + def __hash__(self) -> int: ... + +class FrozenOrderedDict(frozendict): + + dict_cls: Type[collections.OrderedDict] = ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d2b45a2de6ceafff854a79e7051cffbc94ee1855 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/__init__.pyi @@ -0,0 +1,49 @@ +from typing import Any, Dict, Mapping, Optional, Union + +from cryptography.hazmat.primitives.asymmetric import rsa + +from . import algorithms + +def decode( + jwt: Union[str, bytes], + key: Union[str, bytes, rsa.RSAPublicKey, rsa.RSAPrivateKey] = ..., + verify: bool = ..., + algorithms: Optional[Any] = ..., + options: Optional[Mapping[Any, Any]] = ..., + **kwargs: Any, +) -> Dict[str, Any]: ... +def encode( + payload: Mapping[str, Any], + key: Union[str, bytes, rsa.RSAPublicKey, rsa.RSAPrivateKey], + algorithm: str = ..., + headers: Optional[Mapping[str, Any]] = ..., + json_encoder: Optional[Any] = ..., +) -> bytes: ... +def register_algorithm(alg_id: str, alg_obj: algorithms.Algorithm[Any]) -> None: ... +def unregister_algorithm(alg_id: str) -> None: ... + +class PyJWTError(Exception): ... +class InvalidTokenError(PyJWTError): ... +class DecodeError(InvalidTokenError): ... +class ExpiredSignatureError(InvalidTokenError): ... +class InvalidAudienceError(InvalidTokenError): ... +class InvalidIssuerError(InvalidTokenError): ... +class InvalidIssuedAtError(InvalidTokenError): ... +class ImmatureSignatureError(InvalidTokenError): ... +class InvalidKeyError(PyJWTError): ... +class InvalidAlgorithmError(InvalidTokenError): ... +class MissingRequiredClaimError(InvalidTokenError): ... +class InvalidSignatureError(DecodeError): ... + +# Compatibility aliases (deprecated) +ExpiredSignature = ExpiredSignatureError +InvalidAudience = InvalidAudienceError +InvalidIssuer = InvalidIssuerError + +# These aren't actually documented, but the package +# exports them in __init__.py, so we should at least +# make sure that mypy doesn't raise spurious errors +# if they're used. +get_unverified_header: Any +PyJWT: Any +PyJWS: Any diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/algorithms.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/algorithms.pyi new file mode 100644 index 0000000000000000000000000000000000000000..fe83e56b6be6d8605c473d05cfabbd3468697a32 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/algorithms.pyi @@ -0,0 +1,101 @@ +import sys +from hashlib import _Hash +from typing import Any, ClassVar, Dict, Generic, Optional, Set, TypeVar, Union + +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.asymmetric.ec import ( + EllipticCurvePrivateKey, + EllipticCurvePrivateKeyWithSerialization, + EllipticCurvePrivateNumbers, + EllipticCurvePublicKey, + EllipticCurvePublicKeyWithSerialization, + EllipticCurvePublicNumbers, +) +from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey, Ed25519PublicKey +from cryptography.hazmat.primitives.asymmetric.rsa import ( + RSAPrivateKey, + RSAPrivateKeyWithSerialization, + RSAPrivateNumbers, + RSAPublicKey, + RSAPublicKeyWithSerialization, + RSAPublicNumbers, +) +from cryptography.hazmat.primitives.asymmetric.utils import Prehashed +from cryptography.hazmat.primitives.hashes import HashAlgorithm + +requires_cryptography = Set[str] + +def get_default_algorithms() -> Dict[str, Algorithm[Any]]: ... + +_K = TypeVar("_K") + +class Algorithm(Generic[_K]): + def prepare_key(self, key: _K) -> _K: ... + def sign(self, msg: bytes, key: _K) -> bytes: ... + def verify(self, msg: bytes, key: _K, sig: bytes) -> bool: ... + @staticmethod + def to_jwk(key_obj: _K) -> str: ... + @staticmethod + def from_jwk(jwk: str) -> _K: ... + +class NoneAlgorithm(Algorithm[None]): + def prepare_key(self, key: Optional[str]) -> None: ... + +class _HashAlg: + def __call__(self, arg: Union[bytes, bytearray, memoryview] = ...) -> _Hash: ... + +class HMACAlgorithm(Algorithm[bytes]): + SHA256: ClassVar[_HashAlg] + SHA384: ClassVar[_HashAlg] + SHA512: ClassVar[_HashAlg] + hash_alg: _HashAlg + def __init__(self, hash_alg: _HashAlg) -> None: ... + def prepare_key(self, key: Union[str, bytes]) -> bytes: ... + @staticmethod + def to_jwk(key_obj: Union[str, bytes]) -> str: ... + @staticmethod + def from_jwk(jwk: Union[str, bytes]) -> bytes: ... + +# Only defined if cryptography is installed. +class RSAAlgorithm(Algorithm[Any]): + SHA256: ClassVar[hashes.SHA256] + SHA384: ClassVar[hashes.SHA384] + SHA512: ClassVar[hashes.SHA512] + hash_alg: Union[HashAlgorithm, Prehashed] + def __init__(self, hash_alg: Union[HashAlgorithm, Prehashed]) -> None: ... + def prepare_key(self, key: Union[bytes, str, RSAPrivateKey, RSAPublicKey]) -> Union[RSAPrivateKey, RSAPublicKey]: ... + @staticmethod + def from_jwk(jwk: Union[str, bytes, Dict[str, Any]]) -> Union[RSAPrivateKey, RSAPublicKey]: ... + def sign(self, msg: bytes, key: RSAPrivateKey) -> bytes: ... + def verify(self, msg: bytes, key: RSAPublicKey, sig: bytes) -> bool: ... + +# Only defined if cryptography is installed. +class ECAlgorithm(Algorithm[Any]): + SHA256: ClassVar[hashes.SHA256] + SHA384: ClassVar[hashes.SHA384] + SHA512: ClassVar[hashes.SHA512] + hash_alg: Union[HashAlgorithm, Prehashed] + def __init__(self, hash_alg: Union[HashAlgorithm, Prehashed]) -> None: ... + def prepare_key( + self, key: Union[bytes, str, EllipticCurvePrivateKey, EllipticCurvePublicKey] + ) -> Union[EllipticCurvePrivateKey, EllipticCurvePublicKey]: ... + @staticmethod + def to_jwk(key_obj: Union[EllipticCurvePrivateKeyWithSerialization, EllipticCurvePublicKeyWithSerialization]) -> str: ... + @staticmethod + def from_jwk(jwk: Union[str, bytes]) -> Union[EllipticCurvePrivateKey, EllipticCurvePublicKey]: ... + def sign(self, msg: bytes, key: EllipticCurvePrivateKey) -> bytes: ... + def verify(self, msg: bytes, key: EllipticCurvePublicKey, sig: bytes) -> bool: ... + +# Only defined if cryptography is installed. Types should be tightened when +# cryptography gets type hints. +# See https://github.com/python/typeshed/issues/2542 +class RSAPSSAlgorithm(RSAAlgorithm): + def sign(self, msg: bytes, key: Any) -> bytes: ... + def verify(self, msg: bytes, key: Any, sig: bytes) -> bool: ... + +# Only defined if cryptography is installed. +class Ed25519Algorithm(Algorithm[Any]): + def __init__(self, **kwargs: Any) -> None: ... + def prepare_key(self, key: Union[str, bytes, Ed25519PrivateKey, Ed25519PublicKey]) -> Any: ... + def sign(self, msg: Union[str, bytes], key: Ed25519PrivateKey) -> bytes: ... + def verify(self, msg: Union[str, bytes], key: Ed25519PublicKey, sig: Union[str, bytes]) -> bool: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/py_ecdsa.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/py_ecdsa.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0f63de0a6cd0dcc21db8b1a6116260c443cd70a1 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/py_ecdsa.pyi @@ -0,0 +1,10 @@ +import hashlib +from typing import Any + +from jwt.algorithms import Algorithm + +class ECAlgorithm(Algorithm[Any]): + SHA256: hashlib._Hash + SHA384: hashlib._Hash + SHA512: hashlib._Hash + def __init__(self, hash_alg: hashlib._Hash) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/pycrypto.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/pycrypto.pyi new file mode 100644 index 0000000000000000000000000000000000000000..077684c67734f55a2c55bf58d0e3ae36e1f99e8c --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/jwt/contrib/algorithms/pycrypto.pyi @@ -0,0 +1,10 @@ +import hashlib +from typing import Any + +from jwt.algorithms import Algorithm + +class RSAAlgorithm(Algorithm[Any]): + SHA256: hashlib._Hash + SHA384: hashlib._Hash + SHA512: hashlib._Hash + def __init__(self, hash_alg: hashlib._Hash) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/orjson.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/orjson.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3fcbd7b668ce295ef6fd06369b84a433c0b8020a --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/orjson.pyi @@ -0,0 +1,24 @@ +from typing import Any, Callable, Optional, Union + +__version__: str + +def dumps(__obj: Any, default: Optional[Callable[[Any], Any]] = ..., option: Optional[int] = ...) -> bytes: ... +def loads(__obj: Union[bytes, bytearray, str]) -> Any: ... + +class JSONDecodeError(ValueError): ... +class JSONEncodeError(TypeError): ... + +OPT_APPEND_NEWLINE: int +OPT_INDENT_2: int +OPT_NAIVE_UTC: int +OPT_NON_STR_KEYS: int +OPT_OMIT_MICROSECONDS: int +OPT_PASSTHROUGH_DATACLASS: int +OPT_PASSTHROUGH_DATETIME: int +OPT_PASSTHROUGH_SUBCLASS: int +OPT_SERIALIZE_DATACLASS: int +OPT_SERIALIZE_NUMPY: int +OPT_SERIALIZE_UUID: int +OPT_SORT_KEYS: int +OPT_STRICT_INTEGER: int +OPT_UTC_Z: int diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pkg_resources/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pkg_resources/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7f827747eac8d7983c0420164cbc42ab9cbd208f --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pkg_resources/__init__.pyi @@ -0,0 +1,271 @@ +import importlib.abc +import types +import zipimport +from abc import ABCMeta +from typing import IO, Any, Callable, Dict, Generator, Iterable, List, Optional, Sequence, Set, Tuple, TypeVar, Union, overload + +_T = TypeVar("_T") +_NestedStr = Union[str, Iterable[Union[str, Iterable[Any]]]] +_InstallerType = Callable[[Requirement], Optional[Distribution]] +_EPDistType = Union[Distribution, Requirement, str] +_MetadataType = Optional[IResourceProvider] +_PkgReqType = Union[str, Requirement] +_DistFinderType = Callable[[_Importer, str, bool], Generator[Distribution, None, None]] +_NSHandlerType = Callable[[_Importer, str, str, types.ModuleType], str] + +def declare_namespace(name: str) -> None: ... +def fixup_namespace_packages(path_item: str) -> None: ... + +class WorkingSet: + entries: List[str] + def __init__(self, entries: Optional[Iterable[str]] = ...) -> None: ... + def require(self, *requirements: _NestedStr) -> Sequence[Distribution]: ... + def run_script(self, requires: str, script_name: str) -> None: ... + def iter_entry_points(self, group: str, name: Optional[str] = ...) -> Generator[EntryPoint, None, None]: ... + def add_entry(self, entry: str) -> None: ... + def __contains__(self, dist: Distribution) -> bool: ... + def __iter__(self) -> Generator[Distribution, None, None]: ... + def find(self, req: Requirement) -> Optional[Distribution]: ... + def resolve( + self, requirements: Iterable[Requirement], env: Optional[Environment] = ..., installer: Optional[_InstallerType] = ... + ) -> List[Distribution]: ... + def add(self, dist: Distribution, entry: Optional[str] = ..., insert: bool = ..., replace: bool = ...) -> None: ... + def subscribe(self, callback: Callable[[Distribution], None]) -> None: ... + def find_plugins( + self, plugin_env: Environment, full_env: Optional[Environment] = ..., fallback: bool = ... + ) -> Tuple[List[Distribution], Dict[Distribution, Exception]]: ... + +working_set: WorkingSet + +def require(*requirements: _NestedStr) -> Sequence[Distribution]: ... +def run_script(requires: str, script_name: str) -> None: ... +def iter_entry_points(group: str, name: Optional[str] = ...) -> Generator[EntryPoint, None, None]: ... +def add_activation_listener(callback: Callable[[Distribution], None]) -> None: ... + +class Environment: + def __init__( + self, search_path: Optional[Sequence[str]] = ..., platform: Optional[str] = ..., python: Optional[str] = ... + ) -> None: ... + def __getitem__(self, project_name: str) -> List[Distribution]: ... + def __iter__(self) -> Generator[str, None, None]: ... + def add(self, dist: Distribution) -> None: ... + def remove(self, dist: Distribution) -> None: ... + def can_add(self, dist: Distribution) -> bool: ... + def __add__(self, other: Union[Distribution, Environment]) -> Environment: ... + def __iadd__(self, other: Union[Distribution, Environment]) -> Environment: ... + @overload + def best_match(self, req: Requirement, working_set: WorkingSet) -> Distribution: ... + @overload + def best_match(self, req: Requirement, working_set: WorkingSet, installer: Callable[[Requirement], _T] = ...) -> _T: ... + @overload + def obtain(self, requirement: Requirement) -> None: ... + @overload + def obtain(self, requirement: Requirement, installer: Callable[[Requirement], _T] = ...) -> _T: ... + def scan(self, search_path: Optional[Sequence[str]] = ...) -> None: ... + +def parse_requirements(strs: Union[str, Iterable[str]]) -> Generator[Requirement, None, None]: ... + +class Requirement: + unsafe_name: str + project_name: str + key: str + extras: Tuple[str, ...] + specs: List[Tuple[str, str]] + # TODO: change this to Optional[packaging.markers.Marker] once we can import + # packaging.markers + marker: Optional[Any] + @staticmethod + def parse(s: Union[str, Iterable[str]]) -> Requirement: ... + def __contains__(self, item: Union[Distribution, str, Tuple[str, ...]]) -> bool: ... + def __eq__(self, other_requirement: Any) -> bool: ... + +def load_entry_point(dist: _EPDistType, group: str, name: str) -> Any: ... +def get_entry_info(dist: _EPDistType, group: str, name: str) -> Optional[EntryPoint]: ... +@overload +def get_entry_map(dist: _EPDistType) -> Dict[str, Dict[str, EntryPoint]]: ... +@overload +def get_entry_map(dist: _EPDistType, group: str) -> Dict[str, EntryPoint]: ... + +class EntryPoint: + name: str + module_name: str + attrs: Tuple[str, ...] + extras: Tuple[str, ...] + dist: Optional[Distribution] + def __init__( + self, + name: str, + module_name: str, + attrs: Tuple[str, ...] = ..., + extras: Tuple[str, ...] = ..., + dist: Optional[Distribution] = ..., + ) -> None: ... + @classmethod + def parse(cls, src: str, dist: Optional[Distribution] = ...) -> EntryPoint: ... + @classmethod + def parse_group( + cls, group: str, lines: Union[str, Sequence[str]], dist: Optional[Distribution] = ... + ) -> Dict[str, EntryPoint]: ... + @classmethod + def parse_map( + cls, data: Union[Dict[str, Union[str, Sequence[str]]], str, Sequence[str]], dist: Optional[Distribution] = ... + ) -> Dict[str, EntryPoint]: ... + def load(self, require: bool = ..., env: Optional[Environment] = ..., installer: Optional[_InstallerType] = ...) -> Any: ... + def require(self, env: Optional[Environment] = ..., installer: Optional[_InstallerType] = ...) -> None: ... + def resolve(self) -> Any: ... + +def find_distributions(path_item: str, only: bool = ...) -> Generator[Distribution, None, None]: ... +def get_distribution(dist: Union[Requirement, str, Distribution]) -> Distribution: ... + +class Distribution(IResourceProvider, IMetadataProvider): + PKG_INFO: str + location: str + project_name: str + key: str + extras: List[str] + version: str + parsed_version: Tuple[str, ...] + py_version: str + platform: Optional[str] + precedence: int + def __init__( + self, + location: Optional[str] = ..., + metadata: _MetadataType = ..., + project_name: Optional[str] = ..., + version: Optional[str] = ..., + py_version: str = ..., + platform: Optional[str] = ..., + precedence: int = ..., + ) -> None: ... + @classmethod + def from_location( + cls, location: str, basename: str, metadata: _MetadataType = ..., **kw: Union[str, None, int] + ) -> Distribution: ... + @classmethod + def from_filename(cls, filename: str, metadata: _MetadataType = ..., **kw: Union[str, None, int]) -> Distribution: ... + def activate(self, path: Optional[List[str]] = ...) -> None: ... + def as_requirement(self) -> Requirement: ... + def requires(self, extras: Tuple[str, ...] = ...) -> List[Requirement]: ... + def clone(self, **kw: Union[str, int, None]) -> Requirement: ... + def egg_name(self) -> str: ... + def __cmp__(self, other: Any) -> bool: ... + def get_entry_info(self, group: str, name: str) -> Optional[EntryPoint]: ... + @overload + def get_entry_map(self) -> Dict[str, Dict[str, EntryPoint]]: ... + @overload + def get_entry_map(self, group: str) -> Dict[str, EntryPoint]: ... + def load_entry_point(self, group: str, name: str) -> Any: ... + +EGG_DIST: int +BINARY_DIST: int +SOURCE_DIST: int +CHECKOUT_DIST: int +DEVELOP_DIST: int + +def resource_exists(package_or_requirement: _PkgReqType, resource_name: str) -> bool: ... +def resource_stream(package_or_requirement: _PkgReqType, resource_name: str) -> IO[bytes]: ... +def resource_string(package_or_requirement: _PkgReqType, resource_name: str) -> bytes: ... +def resource_isdir(package_or_requirement: _PkgReqType, resource_name: str) -> bool: ... +def resource_listdir(package_or_requirement: _PkgReqType, resource_name: str) -> List[str]: ... +def resource_filename(package_or_requirement: _PkgReqType, resource_name: str) -> str: ... +def set_extraction_path(path: str) -> None: ... +def cleanup_resources(force: bool = ...) -> List[str]: ... + +class IResourceManager: + def resource_exists(self, package_or_requirement: _PkgReqType, resource_name: str) -> bool: ... + def resource_stream(self, package_or_requirement: _PkgReqType, resource_name: str) -> IO[bytes]: ... + def resource_string(self, package_or_requirement: _PkgReqType, resource_name: str) -> bytes: ... + def resource_isdir(self, package_or_requirement: _PkgReqType, resource_name: str) -> bool: ... + def resource_listdir(self, package_or_requirement: _PkgReqType, resource_name: str) -> List[str]: ... + def resource_filename(self, package_or_requirement: _PkgReqType, resource_name: str) -> str: ... + def set_extraction_path(self, path: str) -> None: ... + def cleanup_resources(self, force: bool = ...) -> List[str]: ... + def get_cache_path(self, archive_name: str, names: Iterable[str] = ...) -> str: ... + def extraction_error(self) -> None: ... + def postprocess(self, tempname: str, filename: str) -> None: ... + +@overload +def get_provider(package_or_requirement: str) -> IResourceProvider: ... +@overload +def get_provider(package_or_requirement: Requirement) -> Distribution: ... + +class IMetadataProvider: + def has_metadata(self, name: str) -> bool: ... + def metadata_isdir(self, name: str) -> bool: ... + def metadata_listdir(self, name: str) -> List[str]: ... + def get_metadata(self, name: str) -> str: ... + def get_metadata_lines(self, name: str) -> Generator[str, None, None]: ... + def run_script(self, script_name: str, namespace: Dict[str, Any]) -> None: ... + +class ResolutionError(Exception): ... + +class DistributionNotFound(ResolutionError): + @property + def req(self) -> Requirement: ... + @property + def requirers(self) -> Set[str]: ... + @property + def requirers_str(self) -> str: ... + def report(self) -> str: ... + +class VersionConflict(ResolutionError): + @property + def dist(self) -> Any: ... + @property + def req(self) -> Any: ... + def report(self) -> str: ... + def with_context(self, required_by: Set[Union[Distribution, str]]) -> VersionConflict: ... + +class ContextualVersionConflict(VersionConflict): + @property + def required_by(self) -> Set[Union[Distribution, str]]: ... + +class UnknownExtra(ResolutionError): ... + +class ExtractionError(Exception): + manager: IResourceManager + cache_path: str + original_error: Exception + +class _Importer(importlib.abc.MetaPathFinder, importlib.abc.InspectLoader, metaclass=ABCMeta): ... + +def register_finder(importer_type: type, distribution_finder: _DistFinderType) -> None: ... +def register_loader_type(loader_type: type, provider_factory: Callable[[types.ModuleType], IResourceProvider]) -> None: ... +def register_namespace_handler(importer_type: type, namespace_handler: _NSHandlerType) -> None: ... + +class IResourceProvider(IMetadataProvider): ... +class NullProvider: ... +class EggProvider(NullProvider): ... +class DefaultProvider(EggProvider): ... + +class PathMetadata(DefaultProvider, IResourceProvider): + def __init__(self, path: str, egg_info: str) -> None: ... + +class ZipProvider(EggProvider): ... + +class EggMetadata(ZipProvider, IResourceProvider): + def __init__(self, zipimporter: zipimport.zipimporter) -> None: ... + +class EmptyProvider(NullProvider): ... + +empty_provider: EmptyProvider + +class FileMetadata(EmptyProvider, IResourceProvider): + def __init__(self, path_to_pkg_info: str) -> None: ... + +def parse_version(v: str) -> Tuple[str, ...]: ... +def yield_lines(strs: _NestedStr) -> Generator[str, None, None]: ... +def split_sections(strs: _NestedStr) -> Generator[Tuple[Optional[str], str], None, None]: ... +def safe_name(name: str) -> str: ... +def safe_version(version: str) -> str: ... +def safe_extra(extra: str) -> str: ... +def to_filename(name_or_version: str) -> str: ... +def get_build_platform() -> str: ... +def get_platform() -> str: ... +def get_supported_platform() -> str: ... +def compatible_platforms(provided: Optional[str], required: Optional[str]) -> bool: ... +def get_default_cache() -> str: ... +def get_importer(path_item: str) -> _Importer: ... +def ensure_directory(path: str) -> None: ... +def normalize_path(filename: str) -> str: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pkg_resources/py31compat.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pkg_resources/py31compat.pyi new file mode 100644 index 0000000000000000000000000000000000000000..162da65e0e67604a9ddff219d27585df9ead3078 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pkg_resources/py31compat.pyi @@ -0,0 +1,5 @@ +import os + +needs_makedirs: bool + +makedirs = os.makedirs diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d8c1ca5e88c7765707a24a40378fc8695c7e6e65 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/__init__.pyi @@ -0,0 +1,2 @@ +from .generator import generate as generate +from .parser import parse as parse diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/generator.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/generator.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9375b5b6110964a30627f3bb55fc3f30da449e64 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/generator.pyi @@ -0,0 +1,3 @@ +from datetime import datetime + +def generate(dt: datetime, utc: bool = ..., accept_naive: bool = ..., microseconds: bool = ...) -> str: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/parser.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/parser.pyi new file mode 100644 index 0000000000000000000000000000000000000000..445b542c38cd2e203959fcdf531d338783700880 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/parser.pyi @@ -0,0 +1,3 @@ +from datetime import datetime + +def parse(timestamp: str, utc: bool = ..., produce_naive: bool = ...) -> datetime: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/utils.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f49a8c06850240dcf62890a42f9e40fcedc3857d --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/pyrfc3339/utils.pyi @@ -0,0 +1,11 @@ +from datetime import datetime, timedelta, tzinfo +from typing import Optional + +class FixedOffset(tzinfo): + def __init__(self, hours: float, minutes: float) -> None: ... + def dst(self, dt: Optional[datetime]) -> timedelta: ... + def utcoffset(self, dt: Optional[datetime]) -> timedelta: ... + def tzname(self, dt: Optional[datetime]) -> str: ... + +def timedelta_seconds(td: timedelta) -> int: ... +def timezone(utcoffset: float) -> str: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bd055bbca80a9768bf336a55a945c9f73635950f --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/__init__.pyi @@ -0,0 +1,125 @@ +from __future__ import print_function + +import types +import typing +import unittest +from builtins import next as next +from functools import wraps as wraps +from io import BytesIO as BytesIO, StringIO as StringIO +from typing import ( + Any, + AnyStr, + Callable, + Dict, + ItemsView, + Iterable, + KeysView, + Mapping, + NoReturn, + Optional, + Pattern, + Tuple, + Type, + TypeVar, + Union, + ValuesView, + overload, +) + +from . import moves as moves + +_T = TypeVar("_T") +_K = TypeVar("_K") +_V = TypeVar("_V") + +__version__: str + +# TODO make constant, then move this stub to 2and3 +# https://github.com/python/typeshed/issues/17 +PY2 = False +PY3 = True +PY34: bool + +string_types = (str,) +integer_types = (int,) +class_types = (type,) +text_type = str +binary_type = bytes + +MAXSIZE: int + +def callable(obj: object) -> bool: ... +def get_unbound_function(unbound: types.FunctionType) -> types.FunctionType: ... +def create_bound_method(func: types.FunctionType, obj: object) -> types.MethodType: ... +def create_unbound_method(func: types.FunctionType, cls: type) -> types.FunctionType: ... + +Iterator = object + +def get_method_function(meth: types.MethodType) -> types.FunctionType: ... +def get_method_self(meth: types.MethodType) -> Optional[object]: ... +def get_function_closure(fun: types.FunctionType) -> Optional[Tuple[types._Cell, ...]]: ... +def get_function_code(fun: types.FunctionType) -> types.CodeType: ... +def get_function_defaults(fun: types.FunctionType) -> Optional[Tuple[Any, ...]]: ... +def get_function_globals(fun: types.FunctionType) -> Dict[str, Any]: ... +def iterkeys(d: Mapping[_K, _V]) -> typing.Iterator[_K]: ... +def itervalues(d: Mapping[_K, _V]) -> typing.Iterator[_V]: ... +def iteritems(d: Mapping[_K, _V]) -> typing.Iterator[Tuple[_K, _V]]: ... + +# def iterlists + +def viewkeys(d: Mapping[_K, _V]) -> KeysView[_K]: ... +def viewvalues(d: Mapping[_K, _V]) -> ValuesView[_V]: ... +def viewitems(d: Mapping[_K, _V]) -> ItemsView[_K, _V]: ... +def b(s: str) -> binary_type: ... +def u(s: str) -> text_type: ... + +unichr = chr + +def int2byte(i: int) -> bytes: ... +def byte2int(bs: binary_type) -> int: ... +def indexbytes(buf: binary_type, i: int) -> int: ... +def iterbytes(buf: binary_type) -> typing.Iterator[int]: ... +def assertCountEqual(self: unittest.TestCase, first: Iterable[_T], second: Iterable[_T], msg: Optional[str] = ...) -> None: ... +@overload +def assertRaisesRegex(self: unittest.TestCase, msg: Optional[str] = ...) -> Any: ... +@overload +def assertRaisesRegex(self: unittest.TestCase, callable_obj: Callable[..., Any], *args: Any, **kwargs: Any) -> Any: ... +def assertRegex( + self: unittest.TestCase, text: AnyStr, expected_regex: Union[AnyStr, Pattern[AnyStr]], msg: Optional[str] = ... +) -> None: ... + +exec_ = exec + +def reraise( + tp: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[types.TracebackType] = ... +) -> NoReturn: ... +def raise_from(value: Union[BaseException, Type[BaseException]], from_value: Optional[BaseException]) -> NoReturn: ... + +print_ = print + +def with_metaclass(meta: type, *bases: type) -> type: ... +def add_metaclass(metaclass: type) -> Callable[[_T], _T]: ... +def ensure_binary(s: Union[bytes, str], encoding: str = ..., errors: str = ...) -> bytes: ... +def ensure_str(s: Union[bytes, str], encoding: str = ..., errors: str = ...) -> str: ... +def ensure_text(s: Union[bytes, str], encoding: str = ..., errors: str = ...) -> str: ... +def python_2_unicode_compatible(klass: _T) -> _T: ... + +class _LazyDescriptor: + name: str + def __init__(self, name: str) -> None: ... + def __get__(self, obj: Optional[object], type: Optional[type] = ...) -> Any: ... + +class MovedModule(_LazyDescriptor): + mod: str + def __init__(self, name: str, old: str, new: Optional[str] = ...) -> None: ... + def __getattr__(self, attr: str) -> Any: ... + +class MovedAttribute(_LazyDescriptor): + mod: str + attr: str + def __init__( + self, name: str, old_mod: str, new_mod: str, old_attr: Optional[str] = ..., new_attr: Optional[str] = ... + ) -> None: ... + +def add_move(move: Union[MovedModule, MovedAttribute]) -> None: ... +def remove_move(name: str) -> None: ... diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/BaseHTTPServer.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/BaseHTTPServer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0e1ad713145883b4743b74f9a2732db47fecd4a5 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/BaseHTTPServer.pyi @@ -0,0 +1 @@ +from http.server import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/CGIHTTPServer.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/CGIHTTPServer.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0e1ad713145883b4743b74f9a2732db47fecd4a5 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/CGIHTTPServer.pyi @@ -0,0 +1 @@ +from http.server import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..3455676db51ebc5c0b1a461564e404d334f08831 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/__init__.pyi @@ -0,0 +1,65 @@ +# Stubs for six.moves +# +# Note: Commented out items means they weren't implemented at the time. +# Uncomment them when the modules have been added to the typeshed. +import importlib +import shlex +from builtins import filter as filter, input as input, map as map, range as range, zip as zip +from collections import UserDict as UserDict, UserList as UserList, UserString as UserString +from functools import reduce as reduce +from io import StringIO as StringIO +from itertools import filterfalse as filterfalse, zip_longest as zip_longest +from os import getcwd as getcwd, getcwdb as getcwdb +from sys import intern as intern + +# import tkinter.font as tkinter_font +# import tkinter.messagebox as tkinter_messagebox +# import tkinter.simpledialog as tkinter_tksimpledialog +# import tkinter.dnd as tkinter_dnd +# import tkinter.colorchooser as tkinter_colorchooser +# import tkinter.scrolledtext as tkinter_scrolledtext +# import tkinter.simpledialog as tkinter_simpledialog +# import tkinter.tix as tkinter_tix +# import copyreg as copyreg +# import dbm.gnu as dbm_gnu +from . import ( + BaseHTTPServer as BaseHTTPServer, + CGIHTTPServer as CGIHTTPServer, + SimpleHTTPServer as SimpleHTTPServer, + _dummy_thread as _dummy_thread, + _thread as _thread, + builtins as builtins, + configparser as configparser, + cPickle as cPickle, + email_mime_base as email_mime_base, + email_mime_multipart as email_mime_multipart, + email_mime_nonmultipart as email_mime_nonmultipart, + email_mime_text as email_mime_text, + html_entities as html_entities, + html_parser as html_parser, + http_client as http_client, + http_cookiejar as http_cookiejar, + http_cookies as http_cookies, + queue as queue, + reprlib as reprlib, + socketserver as socketserver, + tkinter as tkinter, + tkinter_commondialog as tkinter_commondialog, + tkinter_constants as tkinter_constants, + tkinter_dialog as tkinter_dialog, + tkinter_filedialog as tkinter_filedialog, + tkinter_tkfiledialog as tkinter_tkfiledialog, + tkinter_ttk as tkinter_ttk, + urllib as urllib, + urllib_error as urllib_error, + urllib_parse as urllib_parse, + urllib_robotparser as urllib_robotparser, +) + +# import xmlrpc.client as xmlrpc_client +# import xmlrpc.server as xmlrpc_server + +xrange = range +reload_module = importlib.reload +cStringIO = StringIO +shlex_quote = shlex.quote diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/_thread.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/_thread.pyi new file mode 100644 index 0000000000000000000000000000000000000000..25952a61494f5bfd1d8590b0d8c0bc6463b2a557 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/_thread.pyi @@ -0,0 +1 @@ +from _thread import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/builtins.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/builtins.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9596ba032aa04297e0de1b55984edc6644bdcf8e --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/builtins.pyi @@ -0,0 +1 @@ +from builtins import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/cPickle.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/cPickle.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2b944b59d656595cf65b49208c94952ead33a51e --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/cPickle.pyi @@ -0,0 +1 @@ +from pickle import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/collections_abc.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/collections_abc.pyi new file mode 100644 index 0000000000000000000000000000000000000000..dba0f1535768ef4cb3b6ccee1a244f15efdf6d7a --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/collections_abc.pyi @@ -0,0 +1 @@ +from collections.abc import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/configparser.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/configparser.pyi new file mode 100644 index 0000000000000000000000000000000000000000..044861ce0c38d4b7901e7ac1970747b0628c5928 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/configparser.pyi @@ -0,0 +1 @@ +from configparser import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_base.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_base.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4df155c939d5592c4301cf93099d7163c7f11619 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_base.pyi @@ -0,0 +1 @@ +from email.mime.base import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_multipart.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_multipart.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4f312412bbc0d41f6a645da66df4beefc14f224c --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_multipart.pyi @@ -0,0 +1 @@ +from email.mime.multipart import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_nonmultipart.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_nonmultipart.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c15c8c0440b5e4817384341f18351d5a078ba237 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_nonmultipart.pyi @@ -0,0 +1 @@ +from email.mime.nonmultipart import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_text.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_text.pyi new file mode 100644 index 0000000000000000000000000000000000000000..51e147387fa218e3269958debaae49027f390e43 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/email_mime_text.pyi @@ -0,0 +1 @@ +from email.mime.text import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/html_entities.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/html_entities.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c1244ddbee455e79f8eec1f16fb591a33965fd4d --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/html_entities.pyi @@ -0,0 +1 @@ +from html.entities import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/html_parser.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/html_parser.pyi new file mode 100644 index 0000000000000000000000000000000000000000..6db6dd83f35ee3d637c2235e7a2a6ec41571a073 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/html_parser.pyi @@ -0,0 +1 @@ +from html.parser import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_client.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_client.pyi new file mode 100644 index 0000000000000000000000000000000000000000..36d29b9551bc7014616d3355f86bd7cc614b68d1 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_client.pyi @@ -0,0 +1 @@ +from http.client import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_cookiejar.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_cookiejar.pyi new file mode 100644 index 0000000000000000000000000000000000000000..88a1aed6cc0f32642ea2ec2dff27830ba6d941e7 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_cookiejar.pyi @@ -0,0 +1 @@ +from http.cookiejar import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_cookies.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_cookies.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9c59a53977bcaabfbb84049a1aa82d4bd8aeb4b2 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/http_cookies.pyi @@ -0,0 +1 @@ +from http.cookies import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/queue.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/queue.pyi new file mode 100644 index 0000000000000000000000000000000000000000..fe7be53a37ad9af2008cd02c63efc5ede8887ab3 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/queue.pyi @@ -0,0 +1 @@ +from queue import * diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/__init__.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d08209c51cc1a932021c9e8c81760a800966905d --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/__init__.pyi @@ -0,0 +1,5 @@ +import six.moves.urllib.error as error +import six.moves.urllib.parse as parse +import six.moves.urllib.request as request +import six.moves.urllib.response as response +import six.moves.urllib.robotparser as robotparser diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/error.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/error.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4e10fe2fd42f550d3cd725bf75b327154c91fe61 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/error.pyi @@ -0,0 +1 @@ +from urllib.error import ContentTooShortError as ContentTooShortError, HTTPError as HTTPError, URLError as URLError diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/parse.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/parse.pyi new file mode 100644 index 0000000000000000000000000000000000000000..20adc639d55af19a105d8d0347caad45be6331ad --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/parse.pyi @@ -0,0 +1,30 @@ +# Stubs for six.moves.urllib.parse +# +# Note: Commented out items means they weren't implemented at the time. +# Uncomment them when the modules have been added to the typeshed. +# from urllib.parse import splitquery as splitquery +# from urllib.parse import splittag as splittag +# from urllib.parse import splituser as splituser +from urllib.parse import ( + ParseResult as ParseResult, + SplitResult as SplitResult, + parse_qs as parse_qs, + parse_qsl as parse_qsl, + quote as quote, + quote_plus as quote_plus, + unquote as unquote, + unquote_plus as unquote_plus, + unquote_to_bytes as unquote_to_bytes, + urldefrag as urldefrag, + urlencode as urlencode, + urljoin as urljoin, + urlparse as urlparse, + urlsplit as urlsplit, + urlunparse as urlunparse, + urlunsplit as urlunsplit, + uses_fragment as uses_fragment, + uses_netloc as uses_netloc, + uses_params as uses_params, + uses_query as uses_query, + uses_relative as uses_relative, +) diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/request.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/request.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9b670b4d98b0cd3b587602b9790e3ef77fe204b0 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/request.pyi @@ -0,0 +1,41 @@ +# Stubs for six.moves.urllib.request +# +# Note: Commented out items means they weren't implemented at the time. +# Uncomment them when the modules have been added to the typeshed. +# from urllib.request import proxy_bypass as proxy_bypass +from urllib.request import ( + AbstractBasicAuthHandler as AbstractBasicAuthHandler, + AbstractDigestAuthHandler as AbstractDigestAuthHandler, + BaseHandler as BaseHandler, + CacheFTPHandler as CacheFTPHandler, + FancyURLopener as FancyURLopener, + FileHandler as FileHandler, + FTPHandler as FTPHandler, + HTTPBasicAuthHandler as HTTPBasicAuthHandler, + HTTPCookieProcessor as HTTPCookieProcessor, + HTTPDefaultErrorHandler as HTTPDefaultErrorHandler, + HTTPDigestAuthHandler as HTTPDigestAuthHandler, + HTTPErrorProcessor as HTTPErrorProcessor, + HTTPHandler as HTTPHandler, + HTTPPasswordMgr as HTTPPasswordMgr, + HTTPPasswordMgrWithDefaultRealm as HTTPPasswordMgrWithDefaultRealm, + HTTPRedirectHandler as HTTPRedirectHandler, + HTTPSHandler as HTTPSHandler, + OpenerDirector as OpenerDirector, + ProxyBasicAuthHandler as ProxyBasicAuthHandler, + ProxyDigestAuthHandler as ProxyDigestAuthHandler, + ProxyHandler as ProxyHandler, + Request as Request, + UnknownHandler as UnknownHandler, + URLopener as URLopener, + build_opener as build_opener, + getproxies as getproxies, + install_opener as install_opener, + parse_http_list as parse_http_list, + parse_keqv_list as parse_keqv_list, + pathname2url as pathname2url, + url2pathname as url2pathname, + urlcleanup as urlcleanup, + urlopen as urlopen, + urlretrieve as urlretrieve, +) diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/response.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/response.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9f681ea33cad7343e6ab24b0e6f2aa2449cdc81c --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/response.pyi @@ -0,0 +1,8 @@ +# Stubs for six.moves.urllib.response +# +# Note: Commented out items means they weren't implemented at the time. +# Uncomment them when the modules have been added to the typeshed. +# from urllib.response import addbase as addbase +# from urllib.response import addclosehook as addclosehook +# from urllib.response import addinfo as addinfo +from urllib.response import addinfourl as addinfourl diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/robotparser.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/robotparser.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bccda14b4342cce0ba0c9224c034a759ec2a1371 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/six/moves/urllib/robotparser.pyi @@ -0,0 +1 @@ +from urllib.robotparser import RobotFileParser as RobotFileParser diff --git a/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/typed_ast/ast27.pyi b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/typed_ast/ast27.pyi new file mode 100644 index 0000000000000000000000000000000000000000..139e58aac246e19330ef8f623312438eca685329 --- /dev/null +++ b/lib/python3.10/site-packages/jedi/third_party/typeshed/third_party/3/typed_ast/ast27.pyi @@ -0,0 +1,335 @@ +import typing +from typing import Any, Iterator, Optional, Union + +class NodeVisitor: + def visit(self, node: AST) -> Any: ... + def generic_visit(self, node: AST) -> None: ... + +class NodeTransformer(NodeVisitor): + def generic_visit(self, node: AST) -> None: ... + +def parse(source: Union[str, bytes], filename: Union[str, bytes] = ..., mode: str = ...) -> AST: ... +def copy_location(new_node: AST, old_node: AST) -> AST: ... +def dump(node: AST, annotate_fields: bool = ..., include_attributes: bool = ...) -> str: ... +def fix_missing_locations(node: AST) -> AST: ... +def get_docstring(node: AST, clean: bool = ...) -> Optional[bytes]: ... +def increment_lineno(node: AST, n: int = ...) -> AST: ... +def iter_child_nodes(node: AST) -> Iterator[AST]: ... +def iter_fields(node: AST) -> Iterator[typing.Tuple[str, Any]]: ... +def literal_eval(node_or_string: Union[str, AST]) -> Any: ... +def walk(node: AST) -> Iterator[AST]: ... + +PyCF_ONLY_AST: int + +# ast classes + +identifier = str + +class AST: + _attributes: typing.Tuple[str, ...] + _fields: typing.Tuple[str, ...] + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + +class mod(AST): ... + +class Module(mod): + body: typing.List[stmt] + type_ignores: typing.List[TypeIgnore] + +class Interactive(mod): + body: typing.List[stmt] + +class Expression(mod): + body: expr + +class FunctionType(mod): + argtypes: typing.List[expr] + returns: expr + +class Suite(mod): + body: typing.List[stmt] + +class stmt(AST): + lineno: int + col_offset: int + +class FunctionDef(stmt): + name: identifier + args: arguments + body: typing.List[stmt] + decorator_list: typing.List[expr] + type_comment: Optional[str] + +class ClassDef(stmt): + name: identifier + bases: typing.List[expr] + body: typing.List[stmt] + decorator_list: typing.List[expr] + +class Return(stmt): + value: Optional[expr] + +class Delete(stmt): + targets: typing.List[expr] + +class Assign(stmt): + targets: typing.List[expr] + value: expr + type_comment: Optional[str] + +class AugAssign(stmt): + target: expr + op: operator + value: expr + +class Print(stmt): + dest: Optional[expr] + values: typing.List[expr] + nl: bool + +class For(stmt): + target: expr + iter: expr + body: typing.List[stmt] + orelse: typing.List[stmt] + type_comment: Optional[str] + +class While(stmt): + test: expr + body: typing.List[stmt] + orelse: typing.List[stmt] + +class If(stmt): + test: expr + body: typing.List[stmt] + orelse: typing.List[stmt] + +class With(stmt): + context_expr: expr + optional_vars: Optional[expr] + body: typing.List[stmt] + type_comment: Optional[str] + +class Raise(stmt): + type: Optional[expr] + inst: Optional[expr] + tback: Optional[expr] + +class TryExcept(stmt): + body: typing.List[stmt] + handlers: typing.List[ExceptHandler] + orelse: typing.List[stmt] + +class TryFinally(stmt): + body: typing.List[stmt] + finalbody: typing.List[stmt] + +class Assert(stmt): + test: expr + msg: Optional[expr] + +class Import(stmt): + names: typing.List[alias] + +class ImportFrom(stmt): + module: Optional[identifier] + names: typing.List[alias] + level: Optional[int] + +class Exec(stmt): + body: expr + globals: Optional[expr] + locals: Optional[expr] + +class Global(stmt): + names: typing.List[identifier] + +class Expr(stmt): + value: expr + +class Pass(stmt): ... +class Break(stmt): ... +class Continue(stmt): ... +class slice(AST): ... + +_slice = slice # this lets us type the variable named 'slice' below + +class Slice(slice): + lower: Optional[expr] + upper: Optional[expr] + step: Optional[expr] + +class ExtSlice(slice): + dims: typing.List[slice] + +class Index(slice): + value: expr + +class Ellipsis(slice): ... + +class expr(AST): + lineno: int + col_offset: int + +class BoolOp(expr): + op: boolop + values: typing.List[expr] + +class BinOp(expr): + left: expr + op: operator + right: expr + +class UnaryOp(expr): + op: unaryop + operand: expr + +class Lambda(expr): + args: arguments + body: expr + +class IfExp(expr): + test: expr + body: expr + orelse: expr + +class Dict(expr): + keys: typing.List[expr] + values: typing.List[expr] + +class Set(expr): + elts: typing.List[expr] + +class ListComp(expr): + elt: expr + generators: typing.List[comprehension] + +class SetComp(expr): + elt: expr + generators: typing.List[comprehension] + +class DictComp(expr): + key: expr + value: expr + generators: typing.List[comprehension] + +class GeneratorExp(expr): + elt: expr + generators: typing.List[comprehension] + +class Yield(expr): + value: Optional[expr] + +class Compare(expr): + left: expr + ops: typing.List[cmpop] + comparators: typing.List[expr] + +class Call(expr): + func: expr + args: typing.List[expr] + keywords: typing.List[keyword] + starargs: Optional[expr] + kwargs: Optional[expr] + +class Repr(expr): + value: expr + +class Num(expr): + n: Union[int, float, complex] + +class Str(expr): + s: Union[str, bytes] + kind: str + +class Attribute(expr): + value: expr + attr: identifier + ctx: expr_context + +class Subscript(expr): + value: expr + slice: _slice + ctx: expr_context + +class Name(expr): + id: identifier + ctx: expr_context + +class List(expr): + elts: typing.List[expr] + ctx: expr_context + +class Tuple(expr): + elts: typing.List[expr] + ctx: expr_context + +class expr_context(AST): ... +class AugLoad(expr_context): ... +class AugStore(expr_context): ... +class Del(expr_context): ... +class Load(expr_context): ... +class Param(expr_context): ... +class Store(expr_context): ... +class boolop(AST): ... +class And(boolop): ... +class Or(boolop): ... +class operator(AST): ... +class Add(operator): ... +class BitAnd(operator): ... +class BitOr(operator): ... +class BitXor(operator): ... +class Div(operator): ... +class FloorDiv(operator): ... +class LShift(operator): ... +class Mod(operator): ... +class Mult(operator): ... +class Pow(operator): ... +class RShift(operator): ... +class Sub(operator): ... +class unaryop(AST): ... +class Invert(unaryop): ... +class Not(unaryop): ... +class UAdd(unaryop): ... +class USub(unaryop): ... +class cmpop(AST): ... +class Eq(cmpop): ... +class Gt(cmpop): ... +class GtE(cmpop): ... +class In(cmpop): ... +class Is(cmpop): ... +class IsNot(cmpop): ... +class Lt(cmpop): ... +class LtE(cmpop): ... +class NotEq(cmpop): ... +class NotIn(cmpop): ... + +class comprehension(AST): + target: expr + iter: expr + ifs: typing.List[expr] + +class ExceptHandler(AST): + type: Optional[expr] + name: Optional[expr] + body: typing.List[stmt] + lineno: int + col_offset: int + +class arguments(AST): + args: typing.List[expr] + vararg: Optional[identifier] + kwarg: Optional[identifier] + defaults: typing.List[expr] + type_comments: typing.List[Optional[str]] + +class keyword(AST): + arg: identifier + value: expr + +class alias(AST): + name: identifier + asname: Optional[identifier] + +class TypeIgnore(AST): + lineno: int diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93516c5adf832a008b7381f58607137a3c34eb8b Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/_identifier.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/_identifier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..334205c7bf35d9efa10d2b9d0f900fe815ae7277 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/_identifier.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/async_utils.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/async_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46adfbb973c1d255e57270a8c2f235591eedabc3 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/async_utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/bccache.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/bccache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da2c27100b3a2996fdb3d0224067459c29e1ce54 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/bccache.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/compiler.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/compiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..726251c1df3d03b64d784132af68ccf638184cd5 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/compiler.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/constants.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e4b71c06f87b0e578097789de2ae7222eafe511 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/constants.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/debug.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/debug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fdaa67efc3c34c279fe5259726f968e4b5cf624 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/debug.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/defaults.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/defaults.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5639b9d385468e26c08eddcb35af589a356dd85e Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/defaults.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/environment.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/environment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36fd26c0414abbb0feac6fcc0f8c1936fc263e00 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/environment.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/exceptions.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59724c2b4af26763eaa4e7a7c3d059dacaa201b1 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/exceptions.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/ext.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/ext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3e248a98a9e9e55801fb4056c10522873cb1b7c Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/ext.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/filters.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/filters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7917963aae2ef65a33a4070bc4a4a310b8340934 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/filters.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/idtracking.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/idtracking.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f98bd71e92f56f71cb0e01a03abb280fa9754652 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/idtracking.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/lexer.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/lexer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18e79f48fc4adfdcbeacc622670c11ba6e44d49c Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/lexer.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/loaders.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/loaders.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12e829d932e1c5b926617e91339742474d4b9e88 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/loaders.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/meta.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/meta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..581684b381771209abe3947dc7cb813c95dd91b1 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/meta.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/nativetypes.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/nativetypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b22f56deae5511ce86867f4976de76025dd9d1d Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/nativetypes.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/nodes.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96ca0c6cbf880d8c4be30cebb576617d5233f9b1 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/nodes.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/optimizer.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b58eab582f55c472f79e6076c5d3d7d65be2c8d Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/optimizer.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/parser.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7adb2d2403097e60bca4cd57a1d3f7085616953e Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/parser.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/runtime.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/runtime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdb1c2433eed3cd372ac23a76fb7ffb471f83609 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/runtime.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/sandbox.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/sandbox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93f9f05106ee15c8259b51824b2a936c3851af75 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/sandbox.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/tests.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/tests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4630be9be5648d9cd11b95d01eedfa9bcd1b0298 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/tests.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/utils.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0426123938bd276253e270365b3634e2dda9b1c7 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jinja2/__pycache__/visitor.cpython-310.pyc b/lib/python3.10/site-packages/jinja2/__pycache__/visitor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b83eb93b8d2164ea5b4e4b3f7bad6d8f0c2f2d37 Binary files /dev/null and b/lib/python3.10/site-packages/jinja2/__pycache__/visitor.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib-1.5.1.dist-info/licenses/LICENSE.txt b/lib/python3.10/site-packages/joblib-1.5.1.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..910537bd33412dd9b70c4d07cedd41b519be7fb5 --- /dev/null +++ b/lib/python3.10/site-packages/joblib-1.5.1.dist-info/licenses/LICENSE.txt @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2008-2021, The joblib developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f514e8d8df1efeaa92268c59f40553b07cbd997a Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d50fdf151aab16d38305bafde10c9c1a6399be26 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/_cloudpickle_wrapper.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3918f25cb8146733092c5b17797194a463b6da10 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/_dask.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa3e78185b0e32fa913bf8069a63ffd18daafc1b Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/_memmapping_reducer.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53f957986b4079ea822268e16fa6050792dd665c Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/_multiprocessing_helpers.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8eb20d77aeb98f644dd24c53e97e6d6716fa203 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/_parallel_backends.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e576fc342d55a0a56db1d013221559ba582c3c2 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/_store_backends.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7362f4f6536925a54141f5e6a6f2df087de06451 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/_utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49cf725f3495e7a59622c5f63d124d362a2f1928 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/backports.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f32ab2a2066a57398fecf6e37f1a89930daa4793 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/compressor.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ef2f44e3adfacaa57ea2866e39f9846a3b61c15 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/disk.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a31845e5cf1d0040ed6938c8898379ead8d2b8ab Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/executor.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75803f0feb2acdb6f7e878b45d0773a6d99bba80 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/func_inspect.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..270fd22805cd264796522316b6d8dcf9085e170f Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/hashing.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1356d06f641b067f7ea78b8b59bf48833892aeb0 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/logger.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..182b5f212b2736b67af1810cfcc91d1b8e3ddeed Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/memory.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a38b163eb945f216543612c8e92bc6761c3df340 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98dfb062d9bb8ca5d15ce79568a19535a9347ba7 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_compat.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0984f9debde074442469082682a34f6789e1bbd4 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/numpy_pickle_utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77688965e4463f63eafaa18471d4bf2d95b0f028 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/parallel.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c38816861308654b369f6ffa674ed6d3c46433d Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/pool.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc b/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..799fd2cddbd9651474b8db01a11c26f8d3f41150 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/__pycache__/testing.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/__init__.py b/lib/python3.10/site-packages/joblib/externals/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a89f74441bb28a710b61f0c001f63cecc293454 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py b/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3768a936efd38d3799af95d4420457525c119a42 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/cloudpickle/__init__.py @@ -0,0 +1,18 @@ +from . import cloudpickle +from .cloudpickle import * # noqa + +__doc__ = cloudpickle.__doc__ + +__version__ = "3.1.1" + +__all__ = [ # noqa + "__version__", + "Pickler", + "CloudPickler", + "dumps", + "loads", + "dump", + "load", + "register_pickle_by_value", + "unregister_pickle_by_value", +] diff --git a/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98f4e3444a0d89186b0e960ae95c71b90a06a3e2 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2df723c262b28515dac7c78e8ff3be6d2b7fc3c Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb92cbaa3d30239f88e33175732798e3b3b637a2 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/cloudpickle/__pycache__/cloudpickle_fast.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py b/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py new file mode 100644 index 0000000000000000000000000000000000000000..4d532e5de9f2cd4fc82bb7cd9e982d47c26dc5e5 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle.py @@ -0,0 +1,1545 @@ +"""Pickler class to extend the standard pickle.Pickler functionality + +The main objective is to make it natural to perform distributed computing on +clusters (such as PySpark, Dask, Ray...) with interactively defined code +(functions, classes, ...) written in notebooks or console. + +In particular this pickler adds the following features: +- serialize interactively-defined or locally-defined functions, classes, + enums, typevars, lambdas and nested functions to compiled byte code; +- deal with some other non-serializable objects in an ad-hoc manner where + applicable. + +This pickler is therefore meant to be used for the communication between short +lived Python processes running the same version of Python and libraries. In +particular, it is not meant to be used for long term storage of Python objects. + +It does not include an unpickler, as standard Python unpickling suffices. + +This module was extracted from the `cloud` package, developed by `PiCloud, Inc. +`_. + +Copyright (c) 2012-now, CloudPickle developers and contributors. +Copyright (c) 2012, Regents of the University of California. +Copyright (c) 2009 `PiCloud, Inc. `_. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the University of California, Berkeley nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import _collections_abc +from collections import ChainMap, OrderedDict +import abc +import builtins +import copyreg +import dataclasses +import dis +from enum import Enum +import io +import itertools +import logging +import opcode +import pickle +from pickle import _getattribute as _pickle_getattribute +import platform +import struct +import sys +import threading +import types +import typing +import uuid +import warnings +import weakref + +# The following import is required to be imported in the cloudpickle +# namespace to be able to load pickle files generated with older versions of +# cloudpickle. See: tests/test_backward_compat.py +from types import CellType # noqa: F401 + + +# cloudpickle is meant for inter process communication: we expect all +# communicating processes to run the same Python version hence we favor +# communication speed over compatibility: +DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL + +# Names of modules whose resources should be treated as dynamic. +_PICKLE_BY_VALUE_MODULES = set() + +# Track the provenance of reconstructed dynamic classes to make it possible to +# reconstruct instances from the matching singleton class definition when +# appropriate and preserve the usual "isinstance" semantics of Python objects. +_DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary() +_DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary() +_DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock() + +PYPY = platform.python_implementation() == "PyPy" + +builtin_code_type = None +if PYPY: + # builtin-code objects only exist in pypy + builtin_code_type = type(float.__new__.__code__) + +_extract_code_globals_cache = weakref.WeakKeyDictionary() + + +def _get_or_create_tracker_id(class_def): + with _DYNAMIC_CLASS_TRACKER_LOCK: + class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def) + if class_tracker_id is None: + class_tracker_id = uuid.uuid4().hex + _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id + _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def + return class_tracker_id + + +def _lookup_class_or_track(class_tracker_id, class_def): + if class_tracker_id is not None: + with _DYNAMIC_CLASS_TRACKER_LOCK: + class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault( + class_tracker_id, class_def + ) + _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id + return class_def + + +def register_pickle_by_value(module): + """Register a module to make its functions and classes picklable by value. + + By default, functions and classes that are attributes of an importable + module are to be pickled by reference, that is relying on re-importing + the attribute from the module at load time. + + If `register_pickle_by_value(module)` is called, all its functions and + classes are subsequently to be pickled by value, meaning that they can + be loaded in Python processes where the module is not importable. + + This is especially useful when developing a module in a distributed + execution environment: restarting the client Python process with the new + source code is enough: there is no need to re-install the new version + of the module on all the worker nodes nor to restart the workers. + + Note: this feature is considered experimental. See the cloudpickle + README.md file for more details and limitations. + """ + if not isinstance(module, types.ModuleType): + raise ValueError(f"Input should be a module object, got {str(module)} instead") + # In the future, cloudpickle may need a way to access any module registered + # for pickling by value in order to introspect relative imports inside + # functions pickled by value. (see + # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633). + # This access can be ensured by checking that module is present in + # sys.modules at registering time and assuming that it will still be in + # there when accessed during pickling. Another alternative would be to + # store a weakref to the module. Even though cloudpickle does not implement + # this introspection yet, in order to avoid a possible breaking change + # later, we still enforce the presence of module inside sys.modules. + if module.__name__ not in sys.modules: + raise ValueError( + f"{module} was not imported correctly, have you used an " + "`import` statement to access it?" + ) + _PICKLE_BY_VALUE_MODULES.add(module.__name__) + + +def unregister_pickle_by_value(module): + """Unregister that the input module should be pickled by value.""" + if not isinstance(module, types.ModuleType): + raise ValueError(f"Input should be a module object, got {str(module)} instead") + if module.__name__ not in _PICKLE_BY_VALUE_MODULES: + raise ValueError(f"{module} is not registered for pickle by value") + else: + _PICKLE_BY_VALUE_MODULES.remove(module.__name__) + + +def list_registry_pickle_by_value(): + return _PICKLE_BY_VALUE_MODULES.copy() + + +def _is_registered_pickle_by_value(module): + module_name = module.__name__ + if module_name in _PICKLE_BY_VALUE_MODULES: + return True + while True: + parent_name = module_name.rsplit(".", 1)[0] + if parent_name == module_name: + break + if parent_name in _PICKLE_BY_VALUE_MODULES: + return True + module_name = parent_name + return False + + +if sys.version_info >= (3, 14): + def _getattribute(obj, name): + return _pickle_getattribute(obj, name.split('.')) +else: + def _getattribute(obj, name): + return _pickle_getattribute(obj, name)[0] + + +def _whichmodule(obj, name): + """Find the module an object belongs to. + + This function differs from ``pickle.whichmodule`` in two ways: + - it does not mangle the cases where obj's module is __main__ and obj was + not found in any module. + - Errors arising during module introspection are ignored, as those errors + are considered unwanted side effects. + """ + module_name = getattr(obj, "__module__", None) + + if module_name is not None: + return module_name + # Protect the iteration by using a copy of sys.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr or + # other threads importing at the same time. + for module_name, module in sys.modules.copy().items(): + # Some modules such as coverage can inject non-module objects inside + # sys.modules + if ( + module_name == "__main__" + or module_name == "__mp_main__" + or module is None + or not isinstance(module, types.ModuleType) + ): + continue + try: + if _getattribute(module, name) is obj: + return module_name + except Exception: + pass + return None + + +def _should_pickle_by_reference(obj, name=None): + """Test whether an function or a class should be pickled by reference + + Pickling by reference means by that the object (typically a function or a + class) is an attribute of a module that is assumed to be importable in the + target Python environment. Loading will therefore rely on importing the + module and then calling `getattr` on it to access the function or class. + + Pickling by reference is the only option to pickle functions and classes + in the standard library. In cloudpickle the alternative option is to + pickle by value (for instance for interactively or locally defined + functions and classes or for attributes of modules that have been + explicitly registered to be pickled by value. + """ + if isinstance(obj, types.FunctionType) or issubclass(type(obj), type): + module_and_name = _lookup_module_and_qualname(obj, name=name) + if module_and_name is None: + return False + module, name = module_and_name + return not _is_registered_pickle_by_value(module) + + elif isinstance(obj, types.ModuleType): + # We assume that sys.modules is primarily used as a cache mechanism for + # the Python import machinery. Checking if a module has been added in + # is sys.modules therefore a cheap and simple heuristic to tell us + # whether we can assume that a given module could be imported by name + # in another Python process. + if _is_registered_pickle_by_value(obj): + return False + return obj.__name__ in sys.modules + else: + raise TypeError( + "cannot check importability of {} instances".format(type(obj).__name__) + ) + + +def _lookup_module_and_qualname(obj, name=None): + if name is None: + name = getattr(obj, "__qualname__", None) + if name is None: # pragma: no cover + # This used to be needed for Python 2.7 support but is probably not + # needed anymore. However we keep the __name__ introspection in case + # users of cloudpickle rely on this old behavior for unknown reasons. + name = getattr(obj, "__name__", None) + + module_name = _whichmodule(obj, name) + + if module_name is None: + # In this case, obj.__module__ is None AND obj was not found in any + # imported module. obj is thus treated as dynamic. + return None + + if module_name == "__main__": + return None + + # Note: if module_name is in sys.modules, the corresponding module is + # assumed importable at unpickling time. See #357 + module = sys.modules.get(module_name, None) + if module is None: + # The main reason why obj's module would not be imported is that this + # module has been dynamically created, using for example + # types.ModuleType. The other possibility is that module was removed + # from sys.modules after obj was created/imported. But this case is not + # supported, as the standard pickle does not support it either. + return None + + try: + obj2 = _getattribute(module, name) + except AttributeError: + # obj was not found inside the module it points to + return None + if obj2 is not obj: + return None + return module, name + + +def _extract_code_globals(co): + """Find all globals names read or written to by codeblock co.""" + out_names = _extract_code_globals_cache.get(co) + if out_names is None: + # We use a dict with None values instead of a set to get a + # deterministic order and avoid introducing non-deterministic pickle + # bytes as a results. + out_names = {name: None for name in _walk_global_ops(co)} + + # Declaring a function inside another one using the "def ..." syntax + # generates a constant code object corresponding to the one of the + # nested function's As the nested function may itself need global + # variables, we need to introspect its code, extract its globals, (look + # for code object in it's co_consts attribute..) and add the result to + # code_globals + if co.co_consts: + for const in co.co_consts: + if isinstance(const, types.CodeType): + out_names.update(_extract_code_globals(const)) + + _extract_code_globals_cache[co] = out_names + + return out_names + + +def _find_imported_submodules(code, top_level_dependencies): + """Find currently imported submodules used by a function. + + Submodules used by a function need to be detected and referenced for the + function to work correctly at depickling time. Because submodules can be + referenced as attribute of their parent package (``package.submodule``), we + need a special introspection technique that does not rely on GLOBAL-related + opcodes to find references of them in a code object. + + Example: + ``` + import concurrent.futures + import cloudpickle + def func(): + x = concurrent.futures.ThreadPoolExecutor + if __name__ == '__main__': + cloudpickle.dumps(func) + ``` + The globals extracted by cloudpickle in the function's state include the + concurrent package, but not its submodule (here, concurrent.futures), which + is the module used by func. Find_imported_submodules will detect the usage + of concurrent.futures. Saving this module alongside with func will ensure + that calling func once depickled does not fail due to concurrent.futures + not being imported + """ + + subimports = [] + # check if any known dependency is an imported package + for x in top_level_dependencies: + if ( + isinstance(x, types.ModuleType) + and hasattr(x, "__package__") + and x.__package__ + ): + # check if the package has any currently loaded sub-imports + prefix = x.__name__ + "." + # A concurrent thread could mutate sys.modules, + # make sure we iterate over a copy to avoid exceptions + for name in list(sys.modules): + # Older versions of pytest will add a "None" module to + # sys.modules. + if name is not None and name.startswith(prefix): + # check whether the function can address the sub-module + tokens = set(name[len(prefix) :].split(".")) + if not tokens - set(code.co_names): + subimports.append(sys.modules[name]) + return subimports + + +# relevant opcodes +STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"] +DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"] +LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"] +GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL) +HAVE_ARGUMENT = dis.HAVE_ARGUMENT +EXTENDED_ARG = dis.EXTENDED_ARG + + +_BUILTIN_TYPE_NAMES = {} +for k, v in types.__dict__.items(): + if type(v) is type: + _BUILTIN_TYPE_NAMES[v] = k + + +def _builtin_type(name): + if name == "ClassType": # pragma: no cover + # Backward compat to load pickle files generated with cloudpickle + # < 1.3 even if loading pickle files from older versions is not + # officially supported. + return type + return getattr(types, name) + + +def _walk_global_ops(code): + """Yield referenced name for global-referencing instructions in code.""" + for instr in dis.get_instructions(code): + op = instr.opcode + if op in GLOBAL_OPS: + yield instr.argval + + +def _extract_class_dict(cls): + """Retrieve a copy of the dict of a class without the inherited method.""" + # Hack to circumvent non-predictable memoization caused by string interning. + # See the inline comment in _class_setstate for details. + clsdict = {"".join(k): cls.__dict__[k] for k in sorted(cls.__dict__)} + + if len(cls.__bases__) == 1: + inherited_dict = cls.__bases__[0].__dict__ + else: + inherited_dict = {} + for base in reversed(cls.__bases__): + inherited_dict.update(base.__dict__) + to_remove = [] + for name, value in clsdict.items(): + try: + base_value = inherited_dict[name] + if value is base_value: + to_remove.append(name) + except KeyError: + pass + for name in to_remove: + clsdict.pop(name) + return clsdict + + +def is_tornado_coroutine(func): + """Return whether `func` is a Tornado coroutine function. + + Running coroutines are not supported. + """ + warnings.warn( + "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be " + "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function " + "directly instead.", + category=DeprecationWarning, + ) + if "tornado.gen" not in sys.modules: + return False + gen = sys.modules["tornado.gen"] + if not hasattr(gen, "is_coroutine_function"): + # Tornado version is too old + return False + return gen.is_coroutine_function(func) + + +def subimport(name): + # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is + # the name of a submodule, __import__ will return the top-level root module + # of this submodule. For instance, __import__('os.path') returns the `os` + # module. + __import__(name) + return sys.modules[name] + + +def dynamic_subimport(name, vars): + mod = types.ModuleType(name) + mod.__dict__.update(vars) + mod.__dict__["__builtins__"] = builtins.__dict__ + return mod + + +def _get_cell_contents(cell): + try: + return cell.cell_contents + except ValueError: + # Handle empty cells explicitly with a sentinel value. + return _empty_cell_value + + +def instance(cls): + """Create a new instance of a class. + + Parameters + ---------- + cls : type + The class to create an instance of. + + Returns + ------- + instance : cls + A new instance of ``cls``. + """ + return cls() + + +@instance +class _empty_cell_value: + """Sentinel for empty closures.""" + + @classmethod + def __reduce__(cls): + return cls.__name__ + + +def _make_function(code, globals, name, argdefs, closure): + # Setting __builtins__ in globals is needed for nogil CPython. + globals["__builtins__"] = __builtins__ + return types.FunctionType(code, globals, name, argdefs, closure) + + +def _make_empty_cell(): + if False: + # trick the compiler into creating an empty cell in our lambda + cell = None + raise AssertionError("this route should not be executed") + + return (lambda: cell).__closure__[0] + + +def _make_cell(value=_empty_cell_value): + cell = _make_empty_cell() + if value is not _empty_cell_value: + cell.cell_contents = value + return cell + + +def _make_skeleton_class( + type_constructor, name, bases, type_kwargs, class_tracker_id, extra +): + """Build dynamic class with an empty __dict__ to be filled once memoized + + If class_tracker_id is not None, try to lookup an existing class definition + matching that id. If none is found, track a newly reconstructed class + definition under that id so that other instances stemming from the same + class id will also reuse this class definition. + + The "extra" variable is meant to be a dict (or None) that can be used for + forward compatibility shall the need arise. + """ + # We need to intern the keys of the type_kwargs dict to avoid having + # different pickles for the same dynamic class depending on whether it was + # dynamically created or reconstructed from a pickled stream. + type_kwargs = {sys.intern(k): v for k, v in type_kwargs.items()} + + skeleton_class = types.new_class( + name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs) + ) + + return _lookup_class_or_track(class_tracker_id, skeleton_class) + + +def _make_skeleton_enum( + bases, name, qualname, members, module, class_tracker_id, extra +): + """Build dynamic enum with an empty __dict__ to be filled once memoized + + The creation of the enum class is inspired by the code of + EnumMeta._create_. + + If class_tracker_id is not None, try to lookup an existing enum definition + matching that id. If none is found, track a newly reconstructed enum + definition under that id so that other instances stemming from the same + class id will also reuse this enum definition. + + The "extra" variable is meant to be a dict (or None) that can be used for + forward compatibility shall the need arise. + """ + # enums always inherit from their base Enum class at the last position in + # the list of base classes: + enum_base = bases[-1] + metacls = enum_base.__class__ + classdict = metacls.__prepare__(name, bases) + + for member_name, member_value in members.items(): + classdict[member_name] = member_value + enum_class = metacls.__new__(metacls, name, bases, classdict) + enum_class.__module__ = module + enum_class.__qualname__ = qualname + + return _lookup_class_or_track(class_tracker_id, enum_class) + + +def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id): + tv = typing.TypeVar( + name, + *constraints, + bound=bound, + covariant=covariant, + contravariant=contravariant, + ) + return _lookup_class_or_track(class_tracker_id, tv) + + +def _decompose_typevar(obj): + return ( + obj.__name__, + obj.__bound__, + obj.__constraints__, + obj.__covariant__, + obj.__contravariant__, + _get_or_create_tracker_id(obj), + ) + + +def _typevar_reduce(obj): + # TypeVar instances require the module information hence why we + # are not using the _should_pickle_by_reference directly + module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__) + + if module_and_name is None: + return (_make_typevar, _decompose_typevar(obj)) + elif _is_registered_pickle_by_value(module_and_name[0]): + return (_make_typevar, _decompose_typevar(obj)) + + return (getattr, module_and_name) + + +def _get_bases(typ): + if "__orig_bases__" in getattr(typ, "__dict__", {}): + # For generic types (see PEP 560) + # Note that simply checking `hasattr(typ, '__orig_bases__')` is not + # correct. Subclasses of a fully-parameterized generic class does not + # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')` + # will return True because it's defined in the base class. + bases_attr = "__orig_bases__" + else: + # For regular class objects + bases_attr = "__bases__" + return getattr(typ, bases_attr) + + +def _make_dict_keys(obj, is_ordered=False): + if is_ordered: + return OrderedDict.fromkeys(obj).keys() + else: + return dict.fromkeys(obj).keys() + + +def _make_dict_values(obj, is_ordered=False): + if is_ordered: + return OrderedDict((i, _) for i, _ in enumerate(obj)).values() + else: + return {i: _ for i, _ in enumerate(obj)}.values() + + +def _make_dict_items(obj, is_ordered=False): + if is_ordered: + return OrderedDict(obj).items() + else: + return obj.items() + + +# COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS +# ------------------------------------------------- + + +def _class_getnewargs(obj): + type_kwargs = {} + if "__module__" in obj.__dict__: + type_kwargs["__module__"] = obj.__module__ + + __dict__ = obj.__dict__.get("__dict__", None) + if isinstance(__dict__, property): + type_kwargs["__dict__"] = __dict__ + + return ( + type(obj), + obj.__name__, + _get_bases(obj), + type_kwargs, + _get_or_create_tracker_id(obj), + None, + ) + + +def _enum_getnewargs(obj): + members = {e.name: e.value for e in obj} + return ( + obj.__bases__, + obj.__name__, + obj.__qualname__, + members, + obj.__module__, + _get_or_create_tracker_id(obj), + None, + ) + + +# COLLECTION OF OBJECTS RECONSTRUCTORS +# ------------------------------------ +def _file_reconstructor(retval): + return retval + + +# COLLECTION OF OBJECTS STATE GETTERS +# ----------------------------------- + + +def _function_getstate(func): + # - Put func's dynamic attributes (stored in func.__dict__) in state. These + # attributes will be restored at unpickling time using + # f.__dict__.update(state) + # - Put func's members into slotstate. Such attributes will be restored at + # unpickling time by iterating over slotstate and calling setattr(func, + # slotname, slotvalue) + slotstate = { + # Hack to circumvent non-predictable memoization caused by string interning. + # See the inline comment in _class_setstate for details. + "__name__": "".join(func.__name__), + "__qualname__": "".join(func.__qualname__), + "__annotations__": func.__annotations__, + "__kwdefaults__": func.__kwdefaults__, + "__defaults__": func.__defaults__, + "__module__": func.__module__, + "__doc__": func.__doc__, + "__closure__": func.__closure__, + } + + f_globals_ref = _extract_code_globals(func.__code__) + f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__} + + if func.__closure__ is not None: + closure_values = list(map(_get_cell_contents, func.__closure__)) + else: + closure_values = () + + # Extract currently-imported submodules used by func. Storing these modules + # in a smoke _cloudpickle_subimports attribute of the object's state will + # trigger the side effect of importing these modules at unpickling time + # (which is necessary for func to work correctly once depickled) + slotstate["_cloudpickle_submodules"] = _find_imported_submodules( + func.__code__, itertools.chain(f_globals.values(), closure_values) + ) + slotstate["__globals__"] = f_globals + + # Hack to circumvent non-predictable memoization caused by string interning. + # See the inline comment in _class_setstate for details. + state = {"".join(k): v for k, v in func.__dict__.items()} + return state, slotstate + + +def _class_getstate(obj): + clsdict = _extract_class_dict(obj) + clsdict.pop("__weakref__", None) + + if issubclass(type(obj), abc.ABCMeta): + # If obj is an instance of an ABCMeta subclass, don't pickle the + # cache/negative caches populated during isinstance/issubclass + # checks, but pickle the list of registered subclasses of obj. + clsdict.pop("_abc_cache", None) + clsdict.pop("_abc_negative_cache", None) + clsdict.pop("_abc_negative_cache_version", None) + registry = clsdict.pop("_abc_registry", None) + if registry is None: + # The abc caches and registered subclasses of a + # class are bundled into the single _abc_impl attribute + clsdict.pop("_abc_impl", None) + (registry, _, _, _) = abc._get_dump(obj) + + clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry] + else: + # In the above if clause, registry is a set of weakrefs -- in + # this case, registry is a WeakSet + clsdict["_abc_impl"] = [type_ for type_ in registry] + + if "__slots__" in clsdict: + # pickle string length optimization: member descriptors of obj are + # created automatically from obj's __slots__ attribute, no need to + # save them in obj's state + if isinstance(obj.__slots__, str): + clsdict.pop(obj.__slots__) + else: + for k in obj.__slots__: + clsdict.pop(k, None) + + clsdict.pop("__dict__", None) # unpicklable property object + + return (clsdict, {}) + + +def _enum_getstate(obj): + clsdict, slotstate = _class_getstate(obj) + + members = {e.name: e.value for e in obj} + # Cleanup the clsdict that will be passed to _make_skeleton_enum: + # Those attributes are already handled by the metaclass. + for attrname in [ + "_generate_next_value_", + "_member_names_", + "_member_map_", + "_member_type_", + "_value2member_map_", + ]: + clsdict.pop(attrname, None) + for member in members: + clsdict.pop(member) + # Special handling of Enum subclasses + return clsdict, slotstate + + +# COLLECTIONS OF OBJECTS REDUCERS +# ------------------------------- +# A reducer is a function taking a single argument (obj), and that returns a +# tuple with all the necessary data to re-construct obj. Apart from a few +# exceptions (list, dict, bytes, int, etc.), a reducer is necessary to +# correctly pickle an object. +# While many built-in objects (Exceptions objects, instances of the "object" +# class, etc), are shipped with their own built-in reducer (invoked using +# obj.__reduce__), some do not. The following methods were created to "fill +# these holes". + + +def _code_reduce(obj): + """code object reducer.""" + # If you are not sure about the order of arguments, take a look at help + # of the specific type from types, for example: + # >>> from types import CodeType + # >>> help(CodeType) + + # Hack to circumvent non-predictable memoization caused by string interning. + # See the inline comment in _class_setstate for details. + co_name = "".join(obj.co_name) + + # Create shallow copies of these tuple to make cloudpickle payload deterministic. + # When creating a code object during load, copies of these four tuples are + # created, while in the main process, these tuples can be shared. + # By always creating copies, we make sure the resulting payload is deterministic. + co_names = tuple(name for name in obj.co_names) + co_varnames = tuple(name for name in obj.co_varnames) + co_freevars = tuple(name for name in obj.co_freevars) + co_cellvars = tuple(name for name in obj.co_cellvars) + if hasattr(obj, "co_exceptiontable"): + # Python 3.11 and later: there are some new attributes + # related to the enhanced exceptions. + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + co_names, + co_varnames, + obj.co_filename, + co_name, + obj.co_qualname, + obj.co_firstlineno, + obj.co_linetable, + obj.co_exceptiontable, + co_freevars, + co_cellvars, + ) + elif hasattr(obj, "co_linetable"): + # Python 3.10 and later: obj.co_lnotab is deprecated and constructor + # expects obj.co_linetable instead. + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + co_names, + co_varnames, + obj.co_filename, + co_name, + obj.co_firstlineno, + obj.co_linetable, + co_freevars, + co_cellvars, + ) + elif hasattr(obj, "co_nmeta"): # pragma: no cover + # "nogil" Python: modified attributes from 3.9 + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_framesize, + obj.co_ndefaultargs, + obj.co_nmeta, + obj.co_flags, + obj.co_code, + obj.co_consts, + co_varnames, + obj.co_filename, + co_name, + obj.co_firstlineno, + obj.co_lnotab, + obj.co_exc_handlers, + obj.co_jump_table, + co_freevars, + co_cellvars, + obj.co_free2reg, + obj.co_cell2reg, + ) + else: + # Backward compat for 3.8 and 3.9 + args = ( + obj.co_argcount, + obj.co_posonlyargcount, + obj.co_kwonlyargcount, + obj.co_nlocals, + obj.co_stacksize, + obj.co_flags, + obj.co_code, + obj.co_consts, + co_names, + co_varnames, + obj.co_filename, + co_name, + obj.co_firstlineno, + obj.co_lnotab, + co_freevars, + co_cellvars, + ) + return types.CodeType, args + + +def _cell_reduce(obj): + """Cell (containing values of a function's free variables) reducer.""" + try: + obj.cell_contents + except ValueError: # cell is empty + return _make_empty_cell, () + else: + return _make_cell, (obj.cell_contents,) + + +def _classmethod_reduce(obj): + orig_func = obj.__func__ + return type(obj), (orig_func,) + + +def _file_reduce(obj): + """Save a file.""" + import io + + if not hasattr(obj, "name") or not hasattr(obj, "mode"): + raise pickle.PicklingError( + "Cannot pickle files that do not map to an actual file" + ) + if obj is sys.stdout: + return getattr, (sys, "stdout") + if obj is sys.stderr: + return getattr, (sys, "stderr") + if obj is sys.stdin: + raise pickle.PicklingError("Cannot pickle standard input") + if obj.closed: + raise pickle.PicklingError("Cannot pickle closed files") + if hasattr(obj, "isatty") and obj.isatty(): + raise pickle.PicklingError("Cannot pickle files that map to tty objects") + if "r" not in obj.mode and "+" not in obj.mode: + raise pickle.PicklingError( + "Cannot pickle files that are not opened for reading: %s" % obj.mode + ) + + name = obj.name + + retval = io.StringIO() + + try: + # Read the whole file + curloc = obj.tell() + obj.seek(0) + contents = obj.read() + obj.seek(curloc) + except OSError as e: + raise pickle.PicklingError( + "Cannot pickle file %s as it cannot be read" % name + ) from e + retval.write(contents) + retval.seek(curloc) + + retval.name = name + return _file_reconstructor, (retval,) + + +def _getset_descriptor_reduce(obj): + return getattr, (obj.__objclass__, obj.__name__) + + +def _mappingproxy_reduce(obj): + return types.MappingProxyType, (dict(obj),) + + +def _memoryview_reduce(obj): + return bytes, (obj.tobytes(),) + + +def _module_reduce(obj): + if _should_pickle_by_reference(obj): + return subimport, (obj.__name__,) + else: + # Some external libraries can populate the "__builtins__" entry of a + # module's `__dict__` with unpicklable objects (see #316). For that + # reason, we do not attempt to pickle the "__builtins__" entry, and + # restore a default value for it at unpickling time. + state = obj.__dict__.copy() + state.pop("__builtins__", None) + return dynamic_subimport, (obj.__name__, state) + + +def _method_reduce(obj): + return (types.MethodType, (obj.__func__, obj.__self__)) + + +def _logger_reduce(obj): + return logging.getLogger, (obj.name,) + + +def _root_logger_reduce(obj): + return logging.getLogger, () + + +def _property_reduce(obj): + return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__) + + +def _weakset_reduce(obj): + return weakref.WeakSet, (list(obj),) + + +def _dynamic_class_reduce(obj): + """Save a class that can't be referenced as a module attribute. + + This method is used to serialize classes that are defined inside + functions, or that otherwise can't be serialized as attribute lookups + from importable modules. + """ + if Enum is not None and issubclass(obj, Enum): + return ( + _make_skeleton_enum, + _enum_getnewargs(obj), + _enum_getstate(obj), + None, + None, + _class_setstate, + ) + else: + return ( + _make_skeleton_class, + _class_getnewargs(obj), + _class_getstate(obj), + None, + None, + _class_setstate, + ) + + +def _class_reduce(obj): + """Select the reducer depending on the dynamic nature of the class obj.""" + if obj is type(None): # noqa + return type, (None,) + elif obj is type(Ellipsis): + return type, (Ellipsis,) + elif obj is type(NotImplemented): + return type, (NotImplemented,) + elif obj in _BUILTIN_TYPE_NAMES: + return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],) + elif not _should_pickle_by_reference(obj): + return _dynamic_class_reduce(obj) + return NotImplemented + + +def _dict_keys_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_keys, (list(obj),) + + +def _dict_values_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_values, (list(obj),) + + +def _dict_items_reduce(obj): + return _make_dict_items, (dict(obj),) + + +def _odict_keys_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_keys, (list(obj), True) + + +def _odict_values_reduce(obj): + # Safer not to ship the full dict as sending the rest might + # be unintended and could potentially cause leaking of + # sensitive information + return _make_dict_values, (list(obj), True) + + +def _odict_items_reduce(obj): + return _make_dict_items, (dict(obj), True) + + +def _dataclass_field_base_reduce(obj): + return _get_dataclass_field_type_sentinel, (obj.name,) + + +# COLLECTIONS OF OBJECTS STATE SETTERS +# ------------------------------------ +# state setters are called at unpickling time, once the object is created and +# it has to be updated to how it was at unpickling time. + + +def _function_setstate(obj, state): + """Update the state of a dynamic function. + + As __closure__ and __globals__ are readonly attributes of a function, we + cannot rely on the native setstate routine of pickle.load_build, that calls + setattr on items of the slotstate. Instead, we have to modify them inplace. + """ + state, slotstate = state + obj.__dict__.update(state) + + obj_globals = slotstate.pop("__globals__") + obj_closure = slotstate.pop("__closure__") + # _cloudpickle_subimports is a set of submodules that must be loaded for + # the pickled function to work correctly at unpickling time. Now that these + # submodules are depickled (hence imported), they can be removed from the + # object's state (the object state only served as a reference holder to + # these submodules) + slotstate.pop("_cloudpickle_submodules") + + obj.__globals__.update(obj_globals) + obj.__globals__["__builtins__"] = __builtins__ + + if obj_closure is not None: + for i, cell in enumerate(obj_closure): + try: + value = cell.cell_contents + except ValueError: # cell is empty + continue + obj.__closure__[i].cell_contents = value + + for k, v in slotstate.items(): + setattr(obj, k, v) + + +def _class_setstate(obj, state): + state, slotstate = state + registry = None + for attrname, attr in state.items(): + if attrname == "_abc_impl": + registry = attr + else: + # Note: setting attribute names on a class automatically triggers their + # interning in CPython: + # https://github.com/python/cpython/blob/v3.12.0/Objects/object.c#L957 + # + # This means that to get deterministic pickling for a dynamic class that + # was initially defined in a different Python process, the pickler + # needs to ensure that dynamic class and function attribute names are + # systematically copied into a non-interned version to avoid + # unpredictable pickle payloads. + # + # Indeed the Pickler's memoizer relies on physical object identity to break + # cycles in the reference graph of the object being serialized. + setattr(obj, attrname, attr) + + if sys.version_info >= (3, 13) and "__firstlineno__" in state: + # Set the Python 3.13+ only __firstlineno__ attribute one more time, as it + # will be automatically deleted by the `setattr(obj, attrname, attr)` call + # above when `attrname` is "__firstlineno__". We assume that preserving this + # information might be important for some users and that it not stale in the + # context of cloudpickle usage, hence legitimate to propagate. Furthermore it + # is necessary to do so to keep deterministic chained pickling as tested in + # test_deterministic_str_interning_for_chained_dynamic_class_pickling. + obj.__firstlineno__ = state["__firstlineno__"] + + if registry is not None: + for subclass in registry: + obj.register(subclass) + + return obj + + +# COLLECTION OF DATACLASS UTILITIES +# --------------------------------- +# There are some internal sentinel values whose identity must be preserved when +# unpickling dataclass fields. Each sentinel value has a unique name that we can +# use to retrieve its identity at unpickling time. + + +_DATACLASSE_FIELD_TYPE_SENTINELS = { + dataclasses._FIELD.name: dataclasses._FIELD, + dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR, + dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR, +} + + +def _get_dataclass_field_type_sentinel(name): + return _DATACLASSE_FIELD_TYPE_SENTINELS[name] + + +class Pickler(pickle.Pickler): + # set of reducers defined and used by cloudpickle (private) + _dispatch_table = {} + _dispatch_table[classmethod] = _classmethod_reduce + _dispatch_table[io.TextIOWrapper] = _file_reduce + _dispatch_table[logging.Logger] = _logger_reduce + _dispatch_table[logging.RootLogger] = _root_logger_reduce + _dispatch_table[memoryview] = _memoryview_reduce + _dispatch_table[property] = _property_reduce + _dispatch_table[staticmethod] = _classmethod_reduce + _dispatch_table[CellType] = _cell_reduce + _dispatch_table[types.CodeType] = _code_reduce + _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce + _dispatch_table[types.ModuleType] = _module_reduce + _dispatch_table[types.MethodType] = _method_reduce + _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce + _dispatch_table[weakref.WeakSet] = _weakset_reduce + _dispatch_table[typing.TypeVar] = _typevar_reduce + _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce + _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce + _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce + _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce + _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce + _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce + _dispatch_table[abc.abstractmethod] = _classmethod_reduce + _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce + _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce + _dispatch_table[abc.abstractproperty] = _property_reduce + _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce + + dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table) + + # function reducers are defined as instance methods of cloudpickle.Pickler + # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref) + def _dynamic_function_reduce(self, func): + """Reduce a function that is not pickleable via attribute lookup.""" + newargs = self._function_getnewargs(func) + state = _function_getstate(func) + return (_make_function, newargs, state, None, None, _function_setstate) + + def _function_reduce(self, obj): + """Reducer for function objects. + + If obj is a top-level attribute of a file-backed module, this reducer + returns NotImplemented, making the cloudpickle.Pickler fall back to + traditional pickle.Pickler routines to save obj. Otherwise, it reduces + obj using a custom cloudpickle reducer designed specifically to handle + dynamic functions. + """ + if _should_pickle_by_reference(obj): + return NotImplemented + else: + return self._dynamic_function_reduce(obj) + + def _function_getnewargs(self, func): + code = func.__code__ + + # base_globals represents the future global namespace of func at + # unpickling time. Looking it up and storing it in + # cloudpickle.Pickler.globals_ref allow functions sharing the same + # globals at pickling time to also share them once unpickled, at one + # condition: since globals_ref is an attribute of a cloudpickle.Pickler + # instance, and that a new cloudpickle.Pickler is created each time + # cloudpickle.dump or cloudpickle.dumps is called, functions also need + # to be saved within the same invocation of + # cloudpickle.dump/cloudpickle.dumps (for example: + # cloudpickle.dumps([f1, f2])). There is no such limitation when using + # cloudpickle.Pickler.dump, as long as the multiple invocations are + # bound to the same cloudpickle.Pickler instance. + base_globals = self.globals_ref.setdefault(id(func.__globals__), {}) + + if base_globals == {}: + # Add module attributes used to resolve relative imports + # instructions inside func. + for k in ["__package__", "__name__", "__path__", "__file__"]: + if k in func.__globals__: + base_globals[k] = func.__globals__[k] + + # Do not bind the free variables before the function is created to + # avoid infinite recursion. + if func.__closure__ is None: + closure = None + else: + closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars))) + + return code, base_globals, None, None, closure + + def dump(self, obj): + try: + return super().dump(obj) + except RuntimeError as e: + if len(e.args) > 0 and "recursion" in e.args[0]: + msg = "Could not pickle object as excessively deep recursion required." + raise pickle.PicklingError(msg) from e + else: + raise + + def __init__(self, file, protocol=None, buffer_callback=None): + if protocol is None: + protocol = DEFAULT_PROTOCOL + super().__init__(file, protocol=protocol, buffer_callback=buffer_callback) + # map functions __globals__ attribute ids, to ensure that functions + # sharing the same global namespace at pickling time also share + # their global namespace at unpickling time. + self.globals_ref = {} + self.proto = int(protocol) + + if not PYPY: + # pickle.Pickler is the C implementation of the CPython pickler and + # therefore we rely on reduce_override method to customize the pickler + # behavior. + + # `cloudpickle.Pickler.dispatch` is only left for backward + # compatibility - note that when using protocol 5, + # `cloudpickle.Pickler.dispatch` is not an extension of + # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler` + # subclasses the C-implemented `pickle.Pickler`, which does not expose + # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler` + # used `cloudpickle.Pickler.dispatch` as a class-level attribute + # storing all reducers implemented by cloudpickle, but the attribute + # name was not a great choice given because it would collide with a + # similarly named attribute in the pure-Python `pickle._Pickler` + # implementation in the standard library. + dispatch = dispatch_table + + # Implementation of the reducer_override callback, in order to + # efficiently serialize dynamic functions and classes by subclassing + # the C-implemented `pickle.Pickler`. + # TODO: decorrelate reducer_override (which is tied to CPython's + # implementation - would it make sense to backport it to pypy? - and + # pickle's protocol 5 which is implementation agnostic. Currently, the + # availability of both notions coincide on CPython's pickle, but it may + # not be the case anymore when pypy implements protocol 5. + + def reducer_override(self, obj): + """Type-agnostic reducing callback for function and classes. + + For performance reasons, subclasses of the C `pickle.Pickler` class + cannot register custom reducers for functions and classes in the + dispatch_table attribute. Reducers for such types must instead + implemented via the special `reducer_override` method. + + Note that this method will be called for any object except a few + builtin-types (int, lists, dicts etc.), which differs from reducers + in the Pickler's dispatch_table, each of them being invoked for + objects of a specific type only. + + This property comes in handy for classes: although most classes are + instances of the ``type`` metaclass, some of them can be instances + of other custom metaclasses (such as enum.EnumMeta for example). In + particular, the metaclass will likely not be known in advance, and + thus cannot be special-cased using an entry in the dispatch_table. + reducer_override, among other things, allows us to register a + reducer that will be called for any class, independently of its + type. + + Notes: + + * reducer_override has the priority over dispatch_table-registered + reducers. + * reducer_override can be used to fix other limitations of + cloudpickle for other types that suffered from type-specific + reducers, such as Exceptions. See + https://github.com/cloudpipe/cloudpickle/issues/248 + """ + t = type(obj) + try: + is_anyclass = issubclass(t, type) + except TypeError: # t is not a class (old Boost; see SF #502085) + is_anyclass = False + + if is_anyclass: + return _class_reduce(obj) + elif isinstance(obj, types.FunctionType): + return self._function_reduce(obj) + else: + # fallback to save_global, including the Pickler's + # dispatch_table + return NotImplemented + + else: + # When reducer_override is not available, hack the pure-Python + # Pickler's types.FunctionType and type savers. Note: the type saver + # must override Pickler.save_global, because pickle.py contains a + # hard-coded call to save_global when pickling meta-classes. + dispatch = pickle.Pickler.dispatch.copy() + + def _save_reduce_pickle5( + self, + func, + args, + state=None, + listitems=None, + dictitems=None, + state_setter=None, + obj=None, + ): + save = self.save + write = self.write + self.save_reduce( + func, + args, + state=None, + listitems=listitems, + dictitems=dictitems, + obj=obj, + ) + # backport of the Python 3.8 state_setter pickle operations + save(state_setter) + save(obj) # simple BINGET opcode as obj is already memoized. + save(state) + write(pickle.TUPLE2) + # Trigger a state_setter(obj, state) function call. + write(pickle.REDUCE) + # The purpose of state_setter is to carry-out an + # inplace modification of obj. We do not care about what the + # method might return, so its output is eventually removed from + # the stack. + write(pickle.POP) + + def save_global(self, obj, name=None, pack=struct.pack): + """Main dispatch method. + + The name of this method is somewhat misleading: all types get + dispatched here. + """ + if obj is type(None): # noqa + return self.save_reduce(type, (None,), obj=obj) + elif obj is type(Ellipsis): + return self.save_reduce(type, (Ellipsis,), obj=obj) + elif obj is type(NotImplemented): + return self.save_reduce(type, (NotImplemented,), obj=obj) + elif obj in _BUILTIN_TYPE_NAMES: + return self.save_reduce( + _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj + ) + + if name is not None: + super().save_global(obj, name=name) + elif not _should_pickle_by_reference(obj, name=name): + self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj) + else: + super().save_global(obj, name=name) + + dispatch[type] = save_global + + def save_function(self, obj, name=None): + """Registered with the dispatch to handle all function types. + + Determines what kind of function obj is (e.g. lambda, defined at + interactive prompt, etc) and handles the pickling appropriately. + """ + if _should_pickle_by_reference(obj, name=name): + return super().save_global(obj, name=name) + elif PYPY and isinstance(obj.__code__, builtin_code_type): + return self.save_pypy_builtin_func(obj) + else: + return self._save_reduce_pickle5( + *self._dynamic_function_reduce(obj), obj=obj + ) + + def save_pypy_builtin_func(self, obj): + """Save pypy equivalent of builtin functions. + + PyPy does not have the concept of builtin-functions. Instead, + builtin-functions are simple function instances, but with a + builtin-code attribute. + Most of the time, builtin functions should be pickled by attribute. + But PyPy has flaky support for __qualname__, so some builtin + functions such as float.__new__ will be classified as dynamic. For + this reason only, we created this special routine. Because + builtin-functions are not expected to have closure or globals, + there is no additional hack (compared the one already implemented + in pickle) to protect ourselves from reference cycles. A simple + (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note + also that PyPy improved their support for __qualname__ in v3.6, so + this routing should be removed when cloudpickle supports only PyPy + 3.6 and later. + """ + rv = ( + types.FunctionType, + (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__), + obj.__dict__, + ) + self.save_reduce(*rv, obj=obj) + + dispatch[types.FunctionType] = save_function + + +# Shorthands similar to pickle.dump/pickle.dumps + + +def dump(obj, file, protocol=None, buffer_callback=None): + """Serialize obj as bytes streamed into file + + protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to + pickle.HIGHEST_PROTOCOL. This setting favors maximum communication + speed between processes running the same Python version. + + Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure + compatibility with older versions of Python (although this is not always + guaranteed to work because cloudpickle relies on some internal + implementation details that can change from one Python version to the + next). + """ + Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj) + + +def dumps(obj, protocol=None, buffer_callback=None): + """Serialize obj as a string of bytes allocated in memory + + protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to + pickle.HIGHEST_PROTOCOL. This setting favors maximum communication + speed between processes running the same Python version. + + Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure + compatibility with older versions of Python (although this is not always + guaranteed to work because cloudpickle relies on some internal + implementation details that can change from one Python version to the + next). + """ + with io.BytesIO() as file: + cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback) + cp.dump(obj) + return file.getvalue() + + +# Include pickles unloading functions in this namespace for convenience. +load, loads = pickle.load, pickle.loads + +# Backward compat alias. +CloudPickler = Pickler diff --git a/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py b/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..20280f0ca354a691861ab6f17821bbeb04632003 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/cloudpickle/cloudpickle_fast.py @@ -0,0 +1,14 @@ +"""Compatibility module. + +It can be necessary to load files generated by previous versions of cloudpickle +that rely on symbols being defined under the `cloudpickle.cloudpickle_fast` +namespace. + +See: tests/test_backward_compat.py +""" + +from . import cloudpickle + + +def __getattr__(name): + return getattr(cloudpickle, name) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/__init__.py b/lib/python3.10/site-packages/joblib/externals/loky/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..af28a24069408b1fcc77a19bc3c0521c40522f8d --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/__init__.py @@ -0,0 +1,45 @@ +r"""The :mod:`loky` module manages a pool of worker that can be re-used across time. +It provides a robust and dynamic implementation os the +:class:`ProcessPoolExecutor` and a function :func:`get_reusable_executor` which +hide the pool management under the hood. +""" + +from concurrent.futures import ( + ALL_COMPLETED, + FIRST_COMPLETED, + FIRST_EXCEPTION, + CancelledError, + Executor, + TimeoutError, + as_completed, + wait, +) + +from ._base import Future +from .backend.context import cpu_count +from .backend.reduction import set_loky_pickler +from .reusable_executor import get_reusable_executor +from .cloudpickle_wrapper import wrap_non_picklable_objects +from .process_executor import BrokenProcessPool, ProcessPoolExecutor + + +__all__ = [ + "get_reusable_executor", + "cpu_count", + "wait", + "as_completed", + "Future", + "Executor", + "ProcessPoolExecutor", + "BrokenProcessPool", + "CancelledError", + "TimeoutError", + "FIRST_COMPLETED", + "FIRST_EXCEPTION", + "ALL_COMPLETED", + "wrap_non_picklable_objects", + "set_loky_pickler", +] + + +__version__ = "3.5.5" diff --git a/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..573ebb1e6d5571e9b9b584e359c738e0a4f1c2ad Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eddb7adf01be8cc5f514488ee47dec0a8699a7bd Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/_base.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..713abe465a825aae61d22ca49803c3b326bc274b Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/cloudpickle_wrapper.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1004a8bacb1d1f3276380a92aae2c2c1e1a243c Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/initializers.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0e94de8c72fe06968b8406ea9e2236c1e016d52 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/process_executor.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf23fb60e1ce8d8c252f96be64e5a432c639fd45 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/__pycache__/reusable_executor.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/_base.py b/lib/python3.10/site-packages/joblib/externals/loky/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..da0abc1e7fa18363e6342a3b67410f1429e6fa10 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/_base.py @@ -0,0 +1,28 @@ +############################################################################### +# Modification of concurrent.futures.Future +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from concurrent/futures/_base.py (17/02/2017) +# * Do not use yield from +# * Use old super syntax +# +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +from concurrent.futures import Future as _BaseFuture +from concurrent.futures._base import LOGGER + + +# To make loky._base.Future instances awaitable by concurrent.futures.wait, +# derive our custom Future class from _BaseFuture. _invoke_callback is the only +# modification made to this class in loky. +# TODO investigate why using `concurrent.futures.Future` directly does not +# always work in our test suite. +class Future(_BaseFuture): + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except BaseException: + LOGGER.exception(f"exception calling callback for {self!r}") diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d339aa644599cf5728394200abdfa19a1256aa02 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/__init__.py @@ -0,0 +1,14 @@ +import os +from multiprocessing import synchronize + +from .context import get_context + + +def _make_name(): + return f"/loky-{os.getpid()}-{next(synchronize.SemLock._rand)}" + + +# monkey patch the name creation for multiprocessing +synchronize.SemLock._make_name = staticmethod(_make_name) + +__all__ = ["get_context"] diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3eba4531836bc1624381452c8ddcb50b7cd58a3 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fed994ce78089836ded18d26c44771428a339af8 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_posix_reduction.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ae3b0946d98ac9d45f38f69e575eea0ea0e90e0 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/_win_reduction.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4101e4d46ec86aa5f36b8a2cdc654edb1d205806 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/context.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e7d0bfc501c7615072aaa8934852d4189d6b503 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/fork_exec.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..588ae74917fa39a27ba0e10d84459415cd72efe2 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_posix.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac573289b8b5b8119775e4d25b423cfb43f4bac9 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/popen_loky_win32.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..602b0744151e1a273ecbf809580035f444967dcf Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/process.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71674dd9dda2d9ab7467c09a6dc371149d4dbf25 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/queues.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..356c71e57ab158b37f579684252178a286a679f1 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/reduction.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a4d1ed4bca32961d10b6d84fb03d574d9510165 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/resource_tracker.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df89e7e1358df0ecc14e76b981a879f85488fe22 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/spawn.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3eff69ffea733683d596212444a44ac72cccbbb4 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/synchronize.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d80df112fc83387591ef299aa6efd8a80927cc3 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/externals/loky/backend/__pycache__/utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..4b800ec07ff26af38174097a194e24413bf6fc2d --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/_posix_reduction.py @@ -0,0 +1,67 @@ +############################################################################### +# Extra reducers for Unix based system and connections objects +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Add adapted reduction for LokyProcesses and socket/Connection +# +import os +import socket +import _socket +from multiprocessing.connection import Connection +from multiprocessing.context import get_spawning_popen + +from .reduction import register + +HAVE_SEND_HANDLE = ( + hasattr(socket, "CMSG_LEN") + and hasattr(socket, "SCM_RIGHTS") + and hasattr(socket.socket, "sendmsg") +) + + +def _mk_inheritable(fd): + os.set_inheritable(fd, True) + return fd + + +def DupFd(fd): + """Return a wrapper for an fd.""" + popen_obj = get_spawning_popen() + if popen_obj is not None: + return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) + elif HAVE_SEND_HANDLE: + from multiprocessing import resource_sharer + + return resource_sharer.DupFd(fd) + else: + raise TypeError( + "Cannot pickle connection object. This object can only be " + "passed when spawning a new process" + ) + + +def _reduce_socket(s): + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + + +def _rebuild_socket(df, family, type, proto): + fd = df.detach() + return socket.fromfd(fd, family, type, proto) + + +def rebuild_connection(df, readable, writable): + fd = df.detach() + return Connection(fd, readable, writable) + + +def reduce_connection(conn): + df = DupFd(conn.fileno()) + return rebuild_connection, (df, conn.readable, conn.writable) + + +register(socket.socket, _reduce_socket) +register(_socket.socket, _reduce_socket) +register(Connection, reduce_connection) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..506d0ecba7c8951ddeaa05b48eb1bdadc8d5ff46 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/_win_reduction.py @@ -0,0 +1,18 @@ +############################################################################### +# Extra reducers for Windows system and connections objects +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Add adapted reduction for LokyProcesses and socket/PipeConnection +# +import socket +from multiprocessing import connection +from multiprocessing.reduction import _reduce_socket + +from .reduction import register + +# register reduction for win32 communication objects +register(socket.socket, _reduce_socket) +register(connection.Connection, connection.reduce_connection) +register(connection.PipeConnection, connection.reduce_pipe_connection) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py new file mode 100644 index 0000000000000000000000000000000000000000..efd98bf3106d38447dbb16fcfcfc292cc37dd4d0 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/context.py @@ -0,0 +1,405 @@ +############################################################################### +# Basic context management with LokyContext +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/context.py +# * Create a context ensuring loky uses only objects that are compatible +# * Add LokyContext to the list of context of multiprocessing so loky can be +# used with multiprocessing.set_start_method +# * Implement a CFS-aware amd physical-core aware cpu_count function. +# +import os +import sys +import math +import subprocess +import traceback +import warnings +import multiprocessing as mp +from multiprocessing import get_context as mp_get_context +from multiprocessing.context import BaseContext +from concurrent.futures.process import _MAX_WINDOWS_WORKERS + + +from .process import LokyProcess, LokyInitMainProcess + +# Apparently, on older Python versions, loky cannot work 61 workers on Windows +# but instead 60: ¯\_(ツ)_/¯ +if sys.version_info < (3, 10): + _MAX_WINDOWS_WORKERS = _MAX_WINDOWS_WORKERS - 1 + +START_METHODS = ["loky", "loky_init_main", "spawn"] +if sys.platform != "win32": + START_METHODS += ["fork", "forkserver"] + +_DEFAULT_START_METHOD = None + +# Cache for the number of physical cores to avoid repeating subprocess calls. +# It should not change during the lifetime of the program. +physical_cores_cache = None + + +def get_context(method=None): + # Try to overload the default context + method = method or _DEFAULT_START_METHOD or "loky" + if method == "fork": + # If 'fork' is explicitly requested, warn user about potential issues. + warnings.warn( + "`fork` start method should not be used with " + "`loky` as it does not respect POSIX. Try using " + "`spawn` or `loky` instead.", + UserWarning, + ) + try: + return mp_get_context(method) + except ValueError: + raise ValueError( + f"Unknown context '{method}'. Value should be in " + f"{START_METHODS}." + ) + + +def set_start_method(method, force=False): + global _DEFAULT_START_METHOD + if _DEFAULT_START_METHOD is not None and not force: + raise RuntimeError("context has already been set") + assert method is None or method in START_METHODS, ( + f"'{method}' is not a valid start_method. It should be in " + f"{START_METHODS}" + ) + + _DEFAULT_START_METHOD = method + + +def get_start_method(): + return _DEFAULT_START_METHOD + + +def cpu_count(only_physical_cores=False): + """Return the number of CPUs the current process can use. + + The returned number of CPUs accounts for: + * the number of CPUs in the system, as given by + ``multiprocessing.cpu_count``; + * the CPU affinity settings of the current process + (available on some Unix systems); + * Cgroup CPU bandwidth limit (available on Linux only, typically + set by docker and similar container orchestration systems); + * the value of the LOKY_MAX_CPU_COUNT environment variable if defined. + and is given as the minimum of these constraints. + + If ``only_physical_cores`` is True, return the number of physical cores + instead of the number of logical cores (hyperthreading / SMT). Note that + this option is not enforced if the number of usable cores is controlled in + any other way such as: process affinity, Cgroup restricted CPU bandwidth + or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical + cores is not found, return the number of logical cores. + + Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for + Python < 3.10), see: + https://bugs.python.org/issue26903. + + It is also always larger or equal to 1. + """ + # Note: os.cpu_count() is allowed to return None in its docstring + os_cpu_count = os.cpu_count() or 1 + if sys.platform == "win32": + # On Windows, attempting to use more than 61 CPUs would result in a + # OS-level error. See https://bugs.python.org/issue26903. According to + # https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups + # it might be possible to go beyond with a lot of extra work but this + # does not look easy. + os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS) + + cpu_count_user = _cpu_count_user(os_cpu_count) + aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1) + + if not only_physical_cores: + return aggregate_cpu_count + + if cpu_count_user < os_cpu_count: + # Respect user setting + return max(cpu_count_user, 1) + + cpu_count_physical, exception = _count_physical_cores() + if cpu_count_physical != "not found": + return cpu_count_physical + + # Fallback to default behavior + if exception is not None: + # warns only the first time + warnings.warn( + "Could not find the number of physical cores for the " + f"following reason:\n{exception}\n" + "Returning the number of logical cores instead. You can " + "silence this warning by setting LOKY_MAX_CPU_COUNT to " + "the number of cores you want to use." + ) + traceback.print_tb(exception.__traceback__) + + return aggregate_cpu_count + + +def _cpu_count_cgroup(os_cpu_count): + # Cgroup CPU bandwidth limit available in Linux since 2.6 kernel + cpu_max_fname = "/sys/fs/cgroup/cpu.max" + cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + if os.path.exists(cpu_max_fname): + # cgroup v2 + # https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html + with open(cpu_max_fname) as fh: + cpu_quota_us, cpu_period_us = fh.read().strip().split() + elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname): + # cgroup v1 + # https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management + with open(cfs_quota_fname) as fh: + cpu_quota_us = fh.read().strip() + with open(cfs_period_fname) as fh: + cpu_period_us = fh.read().strip() + else: + # No Cgroup CPU bandwidth limit (e.g. non-Linux platform) + cpu_quota_us = "max" + cpu_period_us = 100_000 # unused, for consistency with default values + + if cpu_quota_us == "max": + # No active Cgroup quota on a Cgroup-capable platform + return os_cpu_count + else: + cpu_quota_us = int(cpu_quota_us) + cpu_period_us = int(cpu_period_us) + if cpu_quota_us > 0 and cpu_period_us > 0: + return math.ceil(cpu_quota_us / cpu_period_us) + else: # pragma: no cover + # Setting a negative cpu_quota_us value is a valid way to disable + # cgroup CPU bandwith limits + return os_cpu_count + + +def _cpu_count_affinity(os_cpu_count): + # Number of available CPUs given affinity settings + if hasattr(os, "sched_getaffinity"): + try: + return len(os.sched_getaffinity(0)) + except NotImplementedError: + pass + + # On some platforms, os.sched_getaffinity does not exist or raises + # NotImplementedError, let's try with the psutil if installed. + try: + import psutil + + p = psutil.Process() + if hasattr(p, "cpu_affinity"): + return len(p.cpu_affinity()) + + except ImportError: # pragma: no cover + if ( + sys.platform == "linux" + and os.environ.get("LOKY_MAX_CPU_COUNT") is None + ): + # Some platforms don't implement os.sched_getaffinity on Linux which + # can cause severe oversubscription problems. Better warn the + # user in this particularly pathological case which can wreck + # havoc, typically on CI workers. + warnings.warn( + "Failed to inspect CPU affinity constraints on this system. " + "Please install psutil or explictly set LOKY_MAX_CPU_COUNT." + ) + + # This can happen for platforms that do not implement any kind of CPU + # infinity such as macOS-based platforms. + return os_cpu_count + + +def _cpu_count_user(os_cpu_count): + """Number of user defined available CPUs""" + cpu_count_affinity = _cpu_count_affinity(os_cpu_count) + + cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count) + + # User defined soft-limit passed as a loky specific environment variable. + cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count)) + + return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky) + + +def _count_physical_cores(): + """Return a tuple (number of physical cores, exception) + + If the number of physical cores is found, exception is set to None. + If it has not been found, return ("not found", exception). + + The number of physical cores is cached to avoid repeating subprocess calls. + """ + exception = None + + # First check if the value is cached + global physical_cores_cache + if physical_cores_cache is not None: + return physical_cores_cache, exception + + # Not cached yet, find it + try: + if sys.platform == "linux": + cpu_count_physical = _count_physical_cores_linux() + elif sys.platform == "win32": + cpu_count_physical = _count_physical_cores_win32() + elif sys.platform == "darwin": + cpu_count_physical = _count_physical_cores_darwin() + else: + raise NotImplementedError(f"unsupported platform: {sys.platform}") + + # if cpu_count_physical < 1, we did not find a valid value + if cpu_count_physical < 1: + raise ValueError(f"found {cpu_count_physical} physical cores < 1") + + except Exception as e: + exception = e + cpu_count_physical = "not found" + + # Put the result in cache + physical_cores_cache = cpu_count_physical + + return cpu_count_physical, exception + + +def _count_physical_cores_linux(): + try: + cpu_info = subprocess.run( + "lscpu --parse=core".split(), capture_output=True, text=True + ) + cpu_info = cpu_info.stdout.splitlines() + cpu_info = {line for line in cpu_info if not line.startswith("#")} + return len(cpu_info) + except: + pass # fallback to /proc/cpuinfo + + cpu_info = subprocess.run( + "cat /proc/cpuinfo".split(), capture_output=True, text=True + ) + cpu_info = cpu_info.stdout.splitlines() + cpu_info = {line for line in cpu_info if line.startswith("core id")} + return len(cpu_info) + + +def _count_physical_cores_win32(): + try: + cmd = "-Command (Get-CimInstance -ClassName Win32_Processor).NumberOfCores" + cpu_info = subprocess.run( + f"powershell.exe {cmd}".split(), + capture_output=True, + text=True, + ) + cpu_info = cpu_info.stdout.splitlines() + return int(cpu_info[0]) + except: + pass # fallback to wmic (older Windows versions; deprecated now) + + cpu_info = subprocess.run( + "wmic CPU Get NumberOfCores /Format:csv".split(), + capture_output=True, + text=True, + ) + cpu_info = cpu_info.stdout.splitlines() + cpu_info = [ + l.split(",")[1] for l in cpu_info if (l and l != "Node,NumberOfCores") + ] + return sum(map(int, cpu_info)) + + +def _count_physical_cores_darwin(): + cpu_info = subprocess.run( + "sysctl -n hw.physicalcpu".split(), + capture_output=True, + text=True, + ) + cpu_info = cpu_info.stdout + return int(cpu_info) + + +class LokyContext(BaseContext): + """Context relying on the LokyProcess.""" + + _name = "loky" + Process = LokyProcess + cpu_count = staticmethod(cpu_count) + + def Queue(self, maxsize=0, reducers=None): + """Returns a queue object""" + from .queues import Queue + + return Queue(maxsize, reducers=reducers, ctx=self.get_context()) + + def SimpleQueue(self, reducers=None): + """Returns a queue object""" + from .queues import SimpleQueue + + return SimpleQueue(reducers=reducers, ctx=self.get_context()) + + if sys.platform != "win32": + """For Unix platform, use our custom implementation of synchronize + ensuring that we use the loky.backend.resource_tracker to clean-up + the semaphores in case of a worker crash. + """ + + def Semaphore(self, value=1): + """Returns a semaphore object""" + from .synchronize import Semaphore + + return Semaphore(value=value) + + def BoundedSemaphore(self, value): + """Returns a bounded semaphore object""" + from .synchronize import BoundedSemaphore + + return BoundedSemaphore(value) + + def Lock(self): + """Returns a lock object""" + from .synchronize import Lock + + return Lock() + + def RLock(self): + """Returns a recurrent lock object""" + from .synchronize import RLock + + return RLock() + + def Condition(self, lock=None): + """Returns a condition object""" + from .synchronize import Condition + + return Condition(lock) + + def Event(self): + """Returns an event object""" + from .synchronize import Event + + return Event() + + +class LokyInitMainContext(LokyContext): + """Extra context with LokyProcess, which does load the main module + + This context is used for compatibility in the case ``cloudpickle`` is not + present on the running system. This permits to load functions defined in + the ``main`` module, using proper safeguards. The declaration of the + ``executor`` should be protected by ``if __name__ == "__main__":`` and the + functions and variable used from main should be out of this block. + + This mimics the default behavior of multiprocessing under Windows and the + behavior of the ``spawn`` start method on a posix system. + For more details, see the end of the following section of python doc + https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming + """ + + _name = "loky_init_main" + Process = LokyInitMainProcess + + +# Register loky context so it works with multiprocessing.get_context +ctx_loky = LokyContext() +mp.context._concrete_contexts["loky"] = ctx_loky +mp.context._concrete_contexts["loky_init_main"] = LokyInitMainContext() diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py new file mode 100644 index 0000000000000000000000000000000000000000..f5b7ca6918c882b007e47cfd8258d559de834a6f --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/fork_exec.py @@ -0,0 +1,73 @@ +############################################################################### +# Launch a subprocess using forkexec and make sure only the needed fd are +# shared in the two process. +# +# author: Thomas Moreau and Olivier Grisel +# +import sys +import os +import subprocess + + +def fork_exec(cmd, keep_fds, env=None): + import _posixsubprocess + + # Encoded command args as bytes: + cmd = [os.fsencode(arg) for arg in cmd] + + # Copy the environment variables to set in the child process (also encoded + # as bytes). + env = env or {} + env = {**os.environ, **env} + encoded_env = [] + for key, value in env.items(): + encoded_env.append(os.fsencode(f"{key}={value}")) + + # Fds with fileno larger than 3 (stdin=0, stdout=1, stderr=2) are be closed + # in the child process, except for those passed in keep_fds. + keep_fds = tuple(sorted(map(int, keep_fds))) + errpipe_read, errpipe_write = os.pipe() + + if sys.version_info >= (3, 14): + # Python >= 3.14 removed allow_vfork from _posixsubprocess.fork_exec, + # see https://github.com/python/cpython/pull/121383 + pgid_to_set = [-1] + allow_vfork = [] + elif sys.version_info >= (3, 11): + # Python 3.11 - 3.13 has allow_vfork in _posixsubprocess.fork_exec + pgid_to_set = [-1] + allow_vfork = [subprocess._USE_VFORK] + else: + # Python < 3.11 + pgid_to_set = [] + allow_vfork = [] + + try: + return _posixsubprocess.fork_exec( + cmd, # args + cmd[0:1], # executable_list + True, # close_fds + keep_fds, # pass_fds + None, # cwd + encoded_env, # env + -1, # p2cread + -1, # p2cwrite + -1, # c2pread + -1, # c2pwrite + -1, # errread + -1, # errwrite + errpipe_read, # errpipe_read + errpipe_write, # errpipe_write + False, # restore_signal + False, # call_setsid + *pgid_to_set, # pgid_to_set + None, # gid + None, # extra_groups + None, # uid + -1, # child_umask + None, # preexec_fn + *allow_vfork, # extra flag if vfork is available + ) + finally: + os.close(errpipe_read) + os.close(errpipe_write) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py new file mode 100644 index 0000000000000000000000000000000000000000..58753036cd1096ca5ee5f503c064e404109c3b9c --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_posix.py @@ -0,0 +1,193 @@ +############################################################################### +# Popen for LokyProcess. +# +# author: Thomas Moreau and Olivier Grisel +# +import os +import sys +import signal +import pickle +from io import BytesIO +from multiprocessing import util, process +from multiprocessing.connection import wait +from multiprocessing.context import set_spawning_popen + +from . import reduction, resource_tracker, spawn + + +__all__ = ["Popen"] + + +# +# Wrapper for an fd used while launching a process +# + + +class _DupFd: + def __init__(self, fd): + self.fd = reduction._mk_inheritable(fd) + + def detach(self): + return self.fd + + +# +# Start child process using subprocess.Popen +# + + +class Popen: + method = "loky" + DupFd = _DupFd + + def __init__(self, process_obj): + sys.stdout.flush() + sys.stderr.flush() + self.returncode = None + self._fds = [] + self._launch(process_obj) + + def duplicate_for_child(self, fd): + self._fds.append(fd) + return reduction._mk_inheritable(fd) + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + while True: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + else: + break + if pid == self.pid: + if os.WIFSIGNALED(sts): + self.returncode = -os.WTERMSIG(sts) + else: + assert os.WIFEXITED(sts) + self.returncode = os.WEXITSTATUS(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def terminate(self): + if self.returncode is None: + try: + os.kill(self.pid, signal.SIGTERM) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def _launch(self, process_obj): + + tracker_fd = resource_tracker._resource_tracker.getfd() + + fp = BytesIO() + set_spawning_popen(self) + try: + prep_data = spawn.get_preparation_data( + process_obj._name, + getattr(process_obj, "init_main_module", True), + ) + reduction.dump(prep_data, fp) + reduction.dump(process_obj, fp) + + finally: + set_spawning_popen(None) + + try: + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + # for fd in self._fds: + # _mk_inheritable(fd) + + cmd_python = [sys.executable] + cmd_python += ["-m", self.__module__] + cmd_python += ["--process-name", str(process_obj.name)] + cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))] + reduction._mk_inheritable(child_w) + reduction._mk_inheritable(tracker_fd) + self._fds += [child_r, child_w, tracker_fd] + if os.name == "posix": + mp_tracker_fd = prep_data["mp_tracker_fd"] + self.duplicate_for_child(mp_tracker_fd) + + from .fork_exec import fork_exec + + pid = fork_exec(cmd_python, self._fds, env=process_obj.env) + util.debug( + f"launched python with pid {pid} and cmd:\n{cmd_python}" + ) + self.sentinel = parent_r + + method = "getbuffer" + if not hasattr(fp, method): + method = "getvalue" + with os.fdopen(parent_w, "wb") as f: + f.write(getattr(fp, method)()) + self.pid = pid + finally: + if parent_r is not None: + util.Finalize(self, os.close, (parent_r,)) + for fd in (child_r, child_w): + if fd is not None: + os.close(fd) + + @staticmethod + def thread_is_spawning(): + return True + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser("Command line parser") + parser.add_argument( + "--pipe", type=int, required=True, help="File handle for the pipe" + ) + parser.add_argument( + "--process-name", + type=str, + default=None, + help="Identifier for debugging purpose", + ) + + args = parser.parse_args() + + info = {} + exitcode = 1 + try: + with os.fdopen(args.pipe, "rb") as from_parent: + process.current_process()._inheriting = True + try: + prep_data = pickle.load(from_parent) + spawn.prepare(prep_data) + process_obj = pickle.load(from_parent) + finally: + del process.current_process()._inheriting + + exitcode = process_obj._bootstrap() + except Exception: + print("\n\n" + "-" * 80) + print(f"{args.process_name} failed with traceback: ") + print("-" * 80) + import traceback + + print(traceback.format_exc()) + print("\n" + "-" * 80) + finally: + if from_parent is not None: + from_parent.close() + + sys.exit(exitcode) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py new file mode 100644 index 0000000000000000000000000000000000000000..4f85f65df5e22bc2342f44c4a59b5e2ece63a81f --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/popen_loky_win32.py @@ -0,0 +1,173 @@ +import os +import sys +import msvcrt +import _winapi +from pickle import load +from multiprocessing import process, util +from multiprocessing.context import set_spawning_popen +from multiprocessing.popen_spawn_win32 import Popen as _Popen + +from . import reduction, spawn + + +__all__ = ["Popen"] + +# +# +# + + +def _path_eq(p1, p2): + return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) + + +WINENV = hasattr(sys, "_base_executable") and not _path_eq( + sys.executable, sys._base_executable +) + + +def _close_handles(*handles): + for handle in handles: + _winapi.CloseHandle(handle) + + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + + +class Popen(_Popen): + """ + Start a subprocess to run the code of a process object. + + We differ from cpython implementation with the way we handle environment + variables, in order to be able to modify then in the child processes before + importing any library, in order to control the number of threads in C-level + threadpools. + + We also use the loky preparation data, in particular to handle main_module + inits and the loky resource tracker. + """ + + method = "loky" + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data( + process_obj._name, getattr(process_obj, "init_main_module", True) + ) + + # read end of pipe will be duplicated by the child process + # -- see spawn_main() in spawn.py. + # + # bpo-33929: Previously, the read end of pipe was "stolen" by the child + # process, but it leaked a handle if the child process had been + # terminated before it could steal the handle from the parent process. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd = get_command_line(parent_pid=os.getpid(), pipe_handle=rhandle) + + python_exe = spawn.get_executable() + + # copy the environment variables to set in the child process + child_env = {**os.environ, **process_obj.env} + + # bpo-35797: When running in a venv, we bypass the redirect + # executor and launch our base Python. + if WINENV and _path_eq(python_exe, sys.executable): + cmd[0] = python_exe = sys._base_executable + child_env["__PYVENV_LAUNCHER__"] = sys.executable + + cmd = " ".join(f'"{x}"' for x in cmd) + + with open(wfd, "wb") as to_child: + # start process + try: + hp, ht, pid, _ = _winapi.CreateProcess( + python_exe, + cmd, + None, + None, + False, + 0, + child_env, + None, + None, + ) + _winapi.CloseHandle(ht) + except BaseException: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + self.finalizer = util.Finalize( + self, _close_handles, (self.sentinel, int(rhandle)) + ) + + # send information to child + set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + set_spawning_popen(None) + + +def get_command_line(pipe_handle, parent_pid, **kwds): + """Returns prefix of command line used for spawning a child process.""" + if getattr(sys, "frozen", False): + return [sys.executable, "--multiprocessing-fork", pipe_handle] + else: + prog = ( + "from joblib.externals.loky.backend.popen_loky_win32 import main; " + f"main(pipe_handle={pipe_handle}, parent_pid={parent_pid})" + ) + opts = util._args_from_interpreter_flags() + return [ + spawn.get_executable(), + *opts, + "-c", + prog, + "--multiprocessing-fork", + ] + + +def is_forking(argv): + """Return whether commandline indicates we are forking.""" + if len(argv) >= 2 and argv[1] == "--multiprocessing-fork": + return True + else: + return False + + +def main(pipe_handle, parent_pid=None): + """Run code specified by data received over pipe.""" + assert is_forking(sys.argv), "Not forking" + + if parent_pid is not None: + source_process = _winapi.OpenProcess( + _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, False, parent_pid + ) + else: + source_process = None + new_handle = reduction.duplicate( + pipe_handle, source_process=source_process + ) + fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) + parent_sentinel = source_process + + with os.fdopen(fd, "rb", closefd=True) as from_parent: + process.current_process()._inheriting = True + try: + preparation_data = load(from_parent) + spawn.prepare(preparation_data, parent_sentinel) + self = load(from_parent) + finally: + del process.current_process()._inheriting + + exitcode = self._bootstrap(parent_sentinel) + sys.exit(exitcode) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py new file mode 100644 index 0000000000000000000000000000000000000000..356255094b7647be8de6998a8752dd7807b25e10 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/process.py @@ -0,0 +1,85 @@ +############################################################################### +# LokyProcess implementation +# +# authors: Thomas Moreau and Olivier Grisel +# +# based on multiprocessing/process.py (17/02/2017) +# +import sys +from multiprocessing.context import assert_spawning +from multiprocessing.process import BaseProcess + + +class LokyProcess(BaseProcess): + _start_method = "loky" + + def __init__( + self, + group=None, + target=None, + name=None, + args=(), + kwargs={}, + daemon=None, + init_main_module=False, + env=None, + ): + super().__init__( + group=group, + target=target, + name=name, + args=args, + kwargs=kwargs, + daemon=daemon, + ) + self.env = {} if env is None else env + self.authkey = self.authkey + self.init_main_module = init_main_module + + @staticmethod + def _Popen(process_obj): + if sys.platform == "win32": + from .popen_loky_win32 import Popen + else: + from .popen_loky_posix import Popen + return Popen(process_obj) + + +class LokyInitMainProcess(LokyProcess): + _start_method = "loky_init_main" + + def __init__( + self, + group=None, + target=None, + name=None, + args=(), + kwargs={}, + daemon=None, + ): + super().__init__( + group=group, + target=target, + name=name, + args=args, + kwargs=kwargs, + daemon=daemon, + init_main_module=True, + ) + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + + +class AuthenticationKey(bytes): + def __reduce__(self): + try: + assert_spawning(self) + except RuntimeError: + raise TypeError( + "Pickling an AuthenticationKey object is " + "disallowed for security reasons" + ) + return AuthenticationKey, (bytes(self),) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..5afd99b420fbc480ed5eb743333a687110a90e49 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/queues.py @@ -0,0 +1,236 @@ +############################################################################### +# Queue and SimpleQueue implementation for loky +# +# authors: Thomas Moreau, Olivier Grisel +# +# based on multiprocessing/queues.py (16/02/2017) +# * Add some custom reducers for the Queues/SimpleQueue to tweak the +# pickling process. (overload Queue._feed/SimpleQueue.put) +# +import os +import sys +import errno +import weakref +import threading +from multiprocessing import util +from multiprocessing.queues import ( + Full, + Queue as mp_Queue, + SimpleQueue as mp_SimpleQueue, + _sentinel, +) +from multiprocessing.context import assert_spawning + +from .reduction import dumps + + +__all__ = ["Queue", "SimpleQueue", "Full"] + + +class Queue(mp_Queue): + def __init__(self, maxsize=0, reducers=None, ctx=None): + super().__init__(maxsize=maxsize, ctx=ctx) + self._reducers = reducers + + # Use custom queue set/get state to be able to reduce the custom reducers + def __getstate__(self): + assert_spawning(self) + return ( + self._ignore_epipe, + self._maxsize, + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + self._sem, + self._opid, + ) + + def __setstate__(self, state): + ( + self._ignore_epipe, + self._maxsize, + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + self._sem, + self._opid, + ) = state + if sys.version_info >= (3, 9): + self._reset() + else: + self._after_fork() + + # Overload _start_thread to correctly call our custom _feed + def _start_thread(self): + util.debug("Queue._start_thread()") + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=( + self._buffer, + self._notempty, + self._send_bytes, + self._wlock, + self._writer.close, + self._reducers, + self._ignore_epipe, + self._on_queue_feeder_error, + self._sem, + ), + name="QueueFeederThread", + ) + self._thread.daemon = True + + util.debug("doing self._thread.start()") + self._thread.start() + util.debug("... done self._thread.start()") + + # On process exit we will wait for data to be flushed to pipe. + # + # However, if this process created the queue then all + # processes which use the queue will be descendants of this + # process. Therefore waiting for the queue to be flushed + # is pointless once all the child processes have been joined. + created_by_this_process = self._opid == os.getpid() + if not self._joincancelled and not created_by_this_process: + self._jointhread = util.Finalize( + self._thread, + Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5, + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = util.Finalize( + self, + Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10, + ) + + # Overload the _feed methods to use our custom pickling strategy. + @staticmethod + def _feed( + buffer, + notempty, + send_bytes, + writelock, + close, + reducers, + ignore_epipe, + onerror, + queue_sem, + ): + util.debug("starting thread to feed data to pipe") + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != "win32": + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while True: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while True: + obj = bpopleft() + if obj is sentinel: + util.debug("feeder thread got sentinel -- exiting") + close() + return + + # serialize the data before acquiring the lock + obj_ = dumps(obj, reducers=reducers) + if wacquire is None: + send_bytes(obj_) + else: + wacquire() + try: + send_bytes(obj_) + finally: + wrelease() + # Remove references early to avoid leaking memory + del obj, obj_ + except IndexError: + pass + except BaseException as e: + if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if util.is_exiting(): + util.info(f"error in queue thread: {e}") + return + else: + queue_sem.release() + onerror(e, obj) + + def _on_queue_feeder_error(self, e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + + traceback.print_exc() + + +class SimpleQueue(mp_SimpleQueue): + def __init__(self, reducers=None, ctx=None): + super().__init__(ctx=ctx) + + # Add possiblity to use custom reducers + self._reducers = reducers + + def close(self): + self._reader.close() + self._writer.close() + + # Use custom queue set/get state to be able to reduce the custom reducers + def __getstate__(self): + assert_spawning(self) + return ( + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + ) + + def __setstate__(self, state): + ( + self._reader, + self._writer, + self._reducers, + self._rlock, + self._wlock, + ) = state + + # Overload put to use our customizable reducer + def put(self, obj): + # serialize the data before acquiring the lock + obj = dumps(obj, reducers=self._reducers) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..c6f9297c5b605314626b8f4dcbc90f22a2a5e628 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/reduction.py @@ -0,0 +1,223 @@ +############################################################################### +# Customizable Pickler with some basic reducers +# +# author: Thomas Moreau +# +# adapted from multiprocessing/reduction.py (17/02/2017) +# * Replace the ForkingPickler with a similar _LokyPickler, +# * Add CustomizableLokyPickler to allow customizing pickling process +# on the fly. +# +import copyreg +import io +import functools +import types +import sys +import os + +from multiprocessing import util +from pickle import loads, HIGHEST_PROTOCOL + +############################################################################### +# Enable custom pickling in Loky. + +_dispatch_table = {} + + +def register(type_, reduce_function): + _dispatch_table[type_] = reduce_function + + +############################################################################### +# Registers extra pickling routines to improve picklization for loky + + +# make methods picklable +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) + + +class _C: + def f(self): + pass + + @classmethod + def h(cls): + pass + + +register(type(_C().f), _reduce_method) +register(type(_C.h), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) + + +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +# Make partial func pickable +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) + + +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) + + +register(functools.partial, _reduce_partial) + +if sys.platform != "win32": + from ._posix_reduction import _mk_inheritable # noqa: F401 +else: + from . import _win_reduction # noqa: F401 + +# global variable to change the pickler behavior +try: + from joblib.externals import cloudpickle # noqa: F401 + + DEFAULT_ENV = "cloudpickle" +except ImportError: + # If cloudpickle is not present, fallback to pickle + DEFAULT_ENV = "pickle" + +ENV_LOKY_PICKLER = os.environ.get("LOKY_PICKLER", DEFAULT_ENV) +_LokyPickler = None +_loky_pickler_name = None + + +def set_loky_pickler(loky_pickler=None): + global _LokyPickler, _loky_pickler_name + + if loky_pickler is None: + loky_pickler = ENV_LOKY_PICKLER + + loky_pickler_cls = None + + # The default loky_pickler is cloudpickle + if loky_pickler in ["", None]: + loky_pickler = "cloudpickle" + + if loky_pickler == _loky_pickler_name: + return + + if loky_pickler == "cloudpickle": + from joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls + else: + try: + from importlib import import_module + + module_pickle = import_module(loky_pickler) + loky_pickler_cls = module_pickle.Pickler + except (ImportError, AttributeError) as e: + extra_info = ( + "\nThis error occurred while setting loky_pickler to" + f" '{loky_pickler}', as required by the env variable " + "LOKY_PICKLER or the function set_loky_pickler." + ) + e.args = (e.args[0] + extra_info,) + e.args[1:] + e.msg = e.args[0] + raise e + + util.debug( + f"Using '{loky_pickler if loky_pickler else 'cloudpickle'}' for " + "serialization." + ) + + class CustomizablePickler(loky_pickler_cls): + _loky_pickler_cls = loky_pickler_cls + + def _set_dispatch_table(self, dispatch_table): + for ancestor_class in self._loky_pickler_cls.mro(): + dt_attribute = getattr(ancestor_class, "dispatch_table", None) + if isinstance(dt_attribute, types.MemberDescriptorType): + # Ancestor class (typically _pickle.Pickler) has a + # member_descriptor for its "dispatch_table" attribute. Use + # it to set the dispatch_table as a member instead of a + # dynamic attribute in the __dict__ of the instance, + # otherwise it will not be taken into account by the C + # implementation of the dump method if a subclass defines a + # class-level dispatch_table attribute as was done in + # cloudpickle 1.6.0: + # https://github.com/joblib/loky/pull/260 + dt_attribute.__set__(self, dispatch_table) + break + + # On top of member descriptor set, also use setattr such that code + # that directly access self.dispatch_table gets a consistent view + # of the same table. + self.dispatch_table = dispatch_table + + def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL): + loky_pickler_cls.__init__(self, writer, protocol=protocol) + if reducers is None: + reducers = {} + + if hasattr(self, "dispatch_table"): + # Force a copy that we will update without mutating the + # any class level defined dispatch_table. + loky_dt = dict(self.dispatch_table) + else: + # Use standard reducers as bases + loky_dt = copyreg.dispatch_table.copy() + + # Register loky specific reducers + loky_dt.update(_dispatch_table) + + # Set the new dispatch table, taking care of the fact that we + # need to use the member_descriptor when we inherit from a + # subclass of the C implementation of the Pickler base class + # with an class level dispatch_table attribute. + self._set_dispatch_table(loky_dt) + + # Register the reducers + for type, reduce_func in reducers.items(): + self.register(type, reduce_func) + + def register(self, type, reduce_func): + """Attach a reducer function to a given type in the dispatch table.""" + self.dispatch_table[type] = reduce_func + + _LokyPickler = CustomizablePickler + _loky_pickler_name = loky_pickler + + +def get_loky_pickler_name(): + global _loky_pickler_name + return _loky_pickler_name + + +def get_loky_pickler(): + global _LokyPickler + return _LokyPickler + + +# Set it to its default value +set_loky_pickler() + + +def dump(obj, file, reducers=None, protocol=None): + """Replacement for pickle.dump() using _LokyPickler.""" + global _LokyPickler + _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj) + + +def dumps(obj, reducers=None, protocol=None): + global _LokyPickler + + buf = io.BytesIO() + dump(obj, buf, reducers=reducers, protocol=protocol) + return buf.getbuffer() + + +__all__ = ["dump", "dumps", "loads", "register", "set_loky_pickler"] + +if sys.platform == "win32": + from multiprocessing.reduction import duplicate + + __all__ += ["duplicate"] diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..ff1aea1ded0d1ad77e1353d39da6e575f7d05292 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/resource_tracker.py @@ -0,0 +1,351 @@ +############################################################################### +# Server process to keep track of unlinked resources, like folders and +# semaphores and clean them. +# +# author: Thomas Moreau +# +# Adapted from multiprocessing/resource_tracker.py +# * add some VERBOSE logging, +# * add support to track folders, +# * add Windows support, +# * refcounting scheme to avoid unlinking resources still in use. +# +# On Unix we run a server process which keeps track of unlinked +# resources. The server ignores SIGINT and SIGTERM and reads from a +# pipe. The resource_tracker implements a reference counting scheme: each time +# a Python process anticipates the shared usage of a resource by another +# process, it signals the resource_tracker of this shared usage, and in return, +# the resource_tracker increments the resource's reference count by 1. +# Similarly, when access to a resource is closed by a Python process, the +# process notifies the resource_tracker by asking it to decrement the +# resource's reference count by 1. When the reference count drops to 0, the +# resource_tracker attempts to clean up the underlying resource. + +# Finally, every other process connected to the resource tracker has a copy of +# the writable end of the pipe used to communicate with it, so the resource +# tracker gets EOF when all other processes have exited. Then the +# resource_tracker process unlinks any remaining leaked resources (with +# reference count above 0) + +# For semaphores, this is important because the system only supports a limited +# number of named semaphores, and they will not be automatically removed till +# the next reboot. Without this resource tracker process, "killall python" +# would probably leave unlinked semaphores. + +# Note that this behavior differs from CPython's resource_tracker, which only +# implements list of shared resources, and not a proper refcounting scheme. +# Also, CPython's resource tracker will only attempt to cleanup those shared +# resources once all processes connected to the resource tracker have exited. + + +import os +import shutil +import sys +import signal +import warnings +from _multiprocessing import sem_unlink +from multiprocessing import util +from multiprocessing.resource_tracker import ( + ResourceTracker as _ResourceTracker, +) + +from . import spawn + +if sys.platform == "win32": + import _winapi + import msvcrt + from multiprocessing.reduction import duplicate + + +__all__ = ["ensure_running", "register", "unregister"] + +_HAVE_SIGMASK = hasattr(signal, "pthread_sigmask") +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + +_CLEANUP_FUNCS = {"folder": shutil.rmtree, "file": os.unlink} + +if os.name == "posix": + _CLEANUP_FUNCS["semlock"] = sem_unlink + + +VERBOSE = False + + +class ResourceTracker(_ResourceTracker): + """Resource tracker with refcounting scheme. + + This class is an extension of the multiprocessing ResourceTracker class + which implements a reference counting scheme to avoid unlinking shared + resources still in use in other processes. + + This feature is notably used by `joblib.Parallel` to share temporary + folders and memory mapped files between the main process and the worker + processes. + + The actual implementation of the refcounting scheme is in the main + function, which is run in a dedicated process. + """ + + def maybe_unlink(self, name, rtype): + """Decrement the refcount of a resource, and delete it if it hits 0""" + self.ensure_running() + self._send("MAYBE_UNLINK", name, rtype) + + def ensure_running(self): + """Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.""" + with self._lock: + if self._fd is not None: + # resource tracker was launched before, is it still running? + if self._check_alive(): + # => still alive + return + # => dead, launch it again + os.close(self._fd) + if os.name == "posix": + try: + # At this point, the resource_tracker process has been + # killed or crashed. Let's remove the process entry + # from the process table to avoid zombie processes. + os.waitpid(self._pid, 0) + except OSError: + # The process was terminated or is a child from an + # ancestor of the current process. + pass + self._fd = None + self._pid = None + + warnings.warn( + "resource_tracker: process died unexpectedly, " + "relaunching. Some folders/sempahores might " + "leak." + ) + + fds_to_pass = [] + try: + fds_to_pass.append(sys.stderr.fileno()) + except Exception: + pass + + r, w = os.pipe() + if sys.platform == "win32": + _r = duplicate(msvcrt.get_osfhandle(r), inheritable=True) + os.close(r) + r = _r + + cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})" + try: + fds_to_pass.append(r) + # process will out live us, so no need to wait on pid + exe = spawn.get_executable() + args = [exe, *util._args_from_interpreter_flags(), "-c", cmd] + util.debug(f"launching resource tracker: {args}") + # bpo-33613: Register a signal mask that will block the + # signals. This signal mask will be inherited by the child + # that is going to be spawned and will protect the child from a + # race condition that can make the child die before it + # registers signal handlers for SIGINT and SIGTERM. The mask is + # unregistered after spawning the child. + try: + if _HAVE_SIGMASK: + signal.pthread_sigmask( + signal.SIG_BLOCK, _IGNORED_SIGNALS + ) + pid = spawnv_passfds(exe, args, fds_to_pass) + finally: + if _HAVE_SIGMASK: + signal.pthread_sigmask( + signal.SIG_UNBLOCK, _IGNORED_SIGNALS + ) + except BaseException: + os.close(w) + raise + else: + self._fd = w + self._pid = pid + finally: + if sys.platform == "win32": + _winapi.CloseHandle(r) + else: + os.close(r) + + def __del__(self): + # ignore error due to trying to clean up child process which has already been + # shutdown on windows See https://github.com/joblib/loky/pull/450 + # This is only required if __del__ is defined + if not hasattr(_ResourceTracker, "__del__"): + return + try: + super().__del__() + except ChildProcessError: + pass + + +_resource_tracker = ResourceTracker() +ensure_running = _resource_tracker.ensure_running +register = _resource_tracker.register +maybe_unlink = _resource_tracker.maybe_unlink +unregister = _resource_tracker.unregister +getfd = _resource_tracker.getfd + + +def main(fd, verbose=0): + """Run resource tracker.""" + # protect the process from ^C and "killall python" etc + if verbose: + util.log_to_stderr(level=util.DEBUG) + + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + + for f in (sys.stdin, sys.stdout): + try: + f.close() + except Exception: + pass + + if verbose: + util.debug("Main resource tracker is running") + + registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()} + try: + # keep track of registered/unregistered resources + if sys.platform == "win32": + fd = msvcrt.open_osfhandle(fd, os.O_RDONLY) + with open(fd, "rb") as f: + while True: + line = f.readline() + if line == b"": # EOF + break + try: + splitted = line.strip().decode("ascii").split(":") + # name can potentially contain separator symbols (for + # instance folders on Windows) + cmd, name, rtype = ( + splitted[0], + ":".join(splitted[1:-1]), + splitted[-1], + ) + + if cmd == "PROBE": + continue + + if rtype not in _CLEANUP_FUNCS: + raise ValueError( + f"Cannot register {name} for automatic cleanup: " + f"unknown resource type ({rtype}). Resource type " + "should be one of the following: " + f"{list(_CLEANUP_FUNCS.keys())}" + ) + + if cmd == "REGISTER": + if name not in registry[rtype]: + registry[rtype][name] = 1 + else: + registry[rtype][name] += 1 + + if verbose: + util.debug( + "[ResourceTracker] incremented refcount of " + f"{rtype} {name} " + f"(current {registry[rtype][name]})" + ) + elif cmd == "UNREGISTER": + del registry[rtype][name] + if verbose: + util.debug( + f"[ResourceTracker] unregister {name} {rtype}: " + f"registry({len(registry)})" + ) + elif cmd == "MAYBE_UNLINK": + registry[rtype][name] -= 1 + if verbose: + util.debug( + "[ResourceTracker] decremented refcount of " + f"{rtype} {name} " + f"(current {registry[rtype][name]})" + ) + + if registry[rtype][name] == 0: + del registry[rtype][name] + try: + if verbose: + util.debug( + f"[ResourceTracker] unlink {name}" + ) + _CLEANUP_FUNCS[rtype](name) + except Exception as e: + warnings.warn( + f"resource_tracker: {name}: {e!r}" + ) + + else: + raise RuntimeError(f"unrecognized command {cmd!r}") + except BaseException: + try: + sys.excepthook(*sys.exc_info()) + except BaseException: + pass + finally: + # all processes have terminated; cleanup any remaining resources + def _unlink_resources(rtype_registry, rtype): + if rtype_registry: + try: + warnings.warn( + "resource_tracker: There appear to be " + f"{len(rtype_registry)} leaked {rtype} objects to " + "clean up at shutdown" + ) + except Exception: + pass + for name in rtype_registry: + # For some reason the process which created and registered this + # resource has failed to unregister it. Presumably it has + # died. We therefore clean it up. + try: + _CLEANUP_FUNCS[rtype](name) + if verbose: + util.debug(f"[ResourceTracker] unlink {name}") + except Exception as e: + warnings.warn(f"resource_tracker: {name}: {e!r}") + + for rtype, rtype_registry in registry.items(): + if rtype == "folder": + continue + else: + _unlink_resources(rtype_registry, rtype) + + # The default cleanup routine for folders deletes everything inside + # those folders recursively, which can include other resources tracked + # by the resource tracker). To limit the risk of the resource tracker + # attempting to delete twice a resource (once as part of a tracked + # folder, and once as a resource), we delete the folders after all + # other resource types. + if "folder" in registry: + _unlink_resources(registry["folder"], "folder") + + if verbose: + util.debug("resource tracker shut down") + + +def spawnv_passfds(path, args, passfds): + if sys.platform != "win32": + args = [arg.encode("utf-8") for arg in args] + path = path.encode("utf-8") + return util.spawnv_passfds(path, args, passfds) + else: + passfds = sorted(passfds) + cmd = " ".join(f'"{x}"' for x in args) + try: + _, ht, pid, _ = _winapi.CreateProcess( + path, cmd, None, None, True, 0, None, None, None + ) + _winapi.CloseHandle(ht) + except BaseException: + pass + return pid diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..9a6ef9d9770ee651bf4a0112da98257cb739836a --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/spawn.py @@ -0,0 +1,244 @@ +############################################################################### +# Prepares and processes the data to setup the new process environment +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/spawn.py (17/02/2017) +# * Improve logging data +# +import os +import sys +import runpy +import textwrap +import types +from multiprocessing import process, util + + +if sys.platform != "win32": + WINEXE = False + WINSERVICE = False +else: + import msvcrt + from multiprocessing.reduction import duplicate + + WINEXE = sys.platform == "win32" and getattr(sys, "frozen", False) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, "python.exe") +else: + _python_exe = sys.executable + + +def get_executable(): + return _python_exe + + +def _check_not_importing_main(): + if getattr(process.current_process(), "_inheriting", False): + raise RuntimeError( + textwrap.dedent( + """\ + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.""" + ) + ) + + +def get_preparation_data(name, init_main_module=True): + """Return info about parent needed by child to unpickle process object.""" + _check_not_importing_main() + d = dict( + log_to_stderr=util._log_to_stderr, + authkey=bytes(process.current_process().authkey), + name=name, + sys_argv=sys.argv, + orig_dir=process.ORIGINAL_DIR, + dir=os.getcwd(), + ) + + # Send sys_path and make sure the current directory will not be changed + d["sys_path"] = [p if p != "" else process.ORIGINAL_DIR for p in sys.path] + + # Make sure to pass the information if the multiprocessing logger is active + if util._logger is not None: + d["log_level"] = util._logger.getEffectiveLevel() + if util._logger.handlers: + h = util._logger.handlers[0] + d["log_fmt"] = h.formatter._fmt + + # Tell the child how to communicate with the resource_tracker + from .resource_tracker import _resource_tracker + + _resource_tracker.ensure_running() + if sys.platform == "win32": + d["tracker_fd"] = msvcrt.get_osfhandle(_resource_tracker._fd) + else: + d["tracker_fd"] = _resource_tracker._fd + + if os.name == "posix": + # joblib/loky#242: allow loky processes to retrieve the resource + # tracker of their parent in case the child processes depickles + # shared_memory objects, that are still tracked by multiprocessing's + # resource_tracker by default. + # XXX: this is a workaround that may be error prone: in the future, it + # would be better to have loky subclass multiprocessing's shared_memory + # to force registration of shared_memory segments via loky's + # resource_tracker. + from multiprocessing.resource_tracker import ( + _resource_tracker as mp_resource_tracker, + ) + + # multiprocessing's resource_tracker must be running before loky + # process is created (othewise the child won't be able to use it if it + # is created later on) + mp_resource_tracker.ensure_running() + d["mp_tracker_fd"] = mp_resource_tracker._fd + + # Figure out whether to initialise main in the subprocess as a module + # or through direct execution (or to leave it alone entirely) + if init_main_module: + main_module = sys.modules["__main__"] + try: + main_mod_name = getattr(main_module.__spec__, "name", None) + except BaseException: + main_mod_name = None + if main_mod_name is not None: + d["init_main_from_name"] = main_mod_name + elif sys.platform != "win32" or (not WINEXE and not WINSERVICE): + main_path = getattr(main_module, "__file__", None) + if main_path is not None: + if ( + not os.path.isabs(main_path) + and process.ORIGINAL_DIR is not None + ): + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d["init_main_from_path"] = os.path.normpath(main_path) + + return d + + +# +# Prepare current process +# +old_main_modules = [] + + +def prepare(data, parent_sentinel=None): + """Try to get current process ready to unpickle process object.""" + if "name" in data: + process.current_process().name = data["name"] + + if "authkey" in data: + process.current_process().authkey = data["authkey"] + + if "log_to_stderr" in data and data["log_to_stderr"]: + util.log_to_stderr() + + if "log_level" in data: + util.get_logger().setLevel(data["log_level"]) + + if "log_fmt" in data: + import logging + + util.get_logger().handlers[0].setFormatter( + logging.Formatter(data["log_fmt"]) + ) + + if "sys_path" in data: + sys.path = data["sys_path"] + + if "sys_argv" in data: + sys.argv = data["sys_argv"] + + if "dir" in data: + os.chdir(data["dir"]) + + if "orig_dir" in data: + process.ORIGINAL_DIR = data["orig_dir"] + + if "mp_tracker_fd" in data: + from multiprocessing.resource_tracker import ( + _resource_tracker as mp_resource_tracker, + ) + + mp_resource_tracker._fd = data["mp_tracker_fd"] + if "tracker_fd" in data: + from .resource_tracker import _resource_tracker + + if sys.platform == "win32": + handle = data["tracker_fd"] + handle = duplicate(handle, source_process=parent_sentinel) + _resource_tracker._fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) + else: + _resource_tracker._fd = data["tracker_fd"] + + if "init_main_from_name" in data: + _fixup_main_from_name(data["init_main_from_name"]) + elif "init_main_from_path" in data: + _fixup_main_from_path(data["init_main_from_path"]) + + +# Multiprocessing module helpers to fix up the main module in +# spawned subprocesses +def _fixup_main_from_name(mod_name): + # __main__.py files for packages, directories, zip archives, etc, run + # their "main only" code unconditionally, so we don't even try to + # populate anything in __main__, nor do we make any changes to + # __main__ attributes + current_main = sys.modules["__main__"] + if mod_name == "__main__" or mod_name.endswith(".__main__"): + return + + # If this process was forked, __main__ may already be populated + if getattr(current_main.__spec__, "name", None) == mod_name: + return + + # Otherwise, __main__ may contain some non-main code where we need to + # support unpickling it properly. We rerun it as __mp_main__ and make + # the normal __main__ an alias to that + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_module( + mod_name, run_name="__mp_main__", alter_sys=True + ) + main_module.__dict__.update(main_content) + sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module + + +def _fixup_main_from_path(main_path): + # If this process was forked, __main__ may already be populated + current_main = sys.modules["__main__"] + + # Unfortunately, the main ipython launch script historically had no + # "if __name__ == '__main__'" guard, so we work around that + # by treating it like a __main__.py file + # See https://github.com/ipython/ipython/issues/4698 + main_name = os.path.splitext(os.path.basename(main_path))[0] + if main_name == "ipython": + return + + # Otherwise, if __file__ already has the setting we expect, + # there's nothing more to do + if getattr(current_main, "__file__", None) == main_path: + return + + # If the parent process has sent a path through rather than a module + # name we assume it is an executable script that may contain + # non-main code that needs to be executed + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_path(main_path, run_name="__mp_main__") + main_module.__dict__.update(main_content) + sys.modules["__main__"] = sys.modules["__mp_main__"] = main_module diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py new file mode 100644 index 0000000000000000000000000000000000000000..18db3e34db979240b4a4a943ea6931db3091321d --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/synchronize.py @@ -0,0 +1,409 @@ +############################################################################### +# Synchronization primitives based on our SemLock implementation +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from multiprocessing/synchronize.py (17/02/2017) +# * Remove ctx argument for compatibility reason +# * Registers a cleanup function with the loky resource_tracker to remove the +# semaphore when the process dies instead. +# +# TODO: investigate which Python version is required to be able to use +# multiprocessing.resource_tracker and therefore multiprocessing.synchronize +# instead of a loky-specific fork. + +import os +import sys +import tempfile +import threading +import _multiprocessing +from time import time as _time +from multiprocessing import process, util +from multiprocessing.context import assert_spawning + +from . import resource_tracker + +__all__ = [ + "Lock", + "RLock", + "Semaphore", + "BoundedSemaphore", + "Condition", + "Event", +] +# Try to import the mp.synchronize module cleanly, if it fails +# raise ImportError for platforms lacking a working sem_open implementation. +# See issue 3770 +try: + from _multiprocessing import SemLock as _SemLock + from _multiprocessing import sem_unlink +except ImportError: + raise ImportError( + "This platform lacks a functioning sem_open" + " implementation, therefore, the required" + " synchronization primitives needed will not" + " function, see issue 3770." + ) + +# +# Constants +# + +RECURSIVE_MUTEX, SEMAPHORE = range(2) +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + + +class SemLock: + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, name=None): + # unlink_now is only used on win32 or when we are using fork. + unlink_now = False + if name is None: + # Try to find an unused name for the SemLock instance. + for _ in range(100): + try: + self._semlock = _SemLock( + kind, value, maxvalue, SemLock._make_name(), unlink_now + ) + except FileExistsError: # pragma: no cover + pass + else: + break + else: # pragma: no cover + raise FileExistsError("cannot find name for semaphore") + else: + self._semlock = _SemLock(kind, value, maxvalue, name, unlink_now) + self.name = name + util.debug( + f"created semlock with handle {self._semlock.handle} and name " + f'"{self.name}"' + ) + + self._make_methods() + + def _after_fork(obj): + obj._semlock._after_fork() + + util.register_after_fork(self, _after_fork) + + # When the object is garbage collected or the + # process shuts down we unlink the semaphore name + resource_tracker.register(self._semlock.name, "semlock") + util.Finalize( + self, SemLock._cleanup, (self._semlock.name,), exitpriority=0 + ) + + @staticmethod + def _cleanup(name): + try: + sem_unlink(name) + except FileNotFoundError: + # Already unlinked, possibly by user code: ignore and make sure to + # unregister the semaphore from the resource tracker. + pass + finally: + resource_tracker.unregister(name, "semlock") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def __enter__(self): + return self._semlock.acquire() + + def __exit__(self, *args): + return self._semlock.release() + + def __getstate__(self): + assert_spawning(self) + sl = self._semlock + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _SemLock._rebuild(*state) + util.debug( + f'recreated blocker with handle {state[0]!r} and name "{state[3]}"' + ) + self._make_methods() + + @staticmethod + def _make_name(): + # OSX does not support long names for semaphores + return f"/loky-{os.getpid()}-{next(SemLock._rand)}" + + +# +# Semaphore +# + + +class Semaphore(SemLock): + def __init__(self, value=1): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX) + + def get_value(self): + if sys.platform == "darwin": + raise NotImplementedError("OSX does not implement sem_getvalue") + return self._semlock._get_value() + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = "unknown" + return f"<{self.__class__.__name__}(value={value})>" + + +# +# Bounded semaphore +# + + +class BoundedSemaphore(Semaphore): + def __init__(self, value=1): + SemLock.__init__(self, SEMAPHORE, value, value) + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = "unknown" + return ( + f"<{self.__class__.__name__}(value={value}, " + f"maxvalue={self._semlock.maxvalue})>" + ) + + +# +# Non-recursive lock +# + + +class Lock(SemLock): + def __init__(self): + super().__init__(SEMAPHORE, 1, 1) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != "MainThread": + name = f"{name}|{threading.current_thread().name}" + elif self._semlock._get_value() == 1: + name = "None" + elif self._semlock._count() > 0: + name = "SomeOtherThread" + else: + name = "SomeOtherProcess" + except Exception: + name = "unknown" + return f"<{self.__class__.__name__}(owner={name})>" + + +# +# Recursive lock +# + + +class RLock(SemLock): + def __init__(self): + super().__init__(RECURSIVE_MUTEX, 1, 1) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != "MainThread": + name = f"{name}|{threading.current_thread().name}" + count = self._semlock._count() + elif self._semlock._get_value() == 1: + name, count = "None", 0 + elif self._semlock._count() > 0: + name, count = "SomeOtherThread", "nonzero" + else: + name, count = "SomeOtherProcess", "nonzero" + except Exception: + name, count = "unknown", "unknown" + return f"<{self.__class__.__name__}({name}, {count})>" + + +# +# Condition variable +# + + +class Condition: + def __init__(self, lock=None): + self._lock = lock or RLock() + self._sleeping_count = Semaphore(0) + self._woken_count = Semaphore(0) + self._wait_semaphore = Semaphore(0) + self._make_methods() + + def __getstate__(self): + assert_spawning(self) + return ( + self._lock, + self._sleeping_count, + self._woken_count, + self._wait_semaphore, + ) + + def __setstate__(self, state): + ( + self._lock, + self._sleeping_count, + self._woken_count, + self._wait_semaphore, + ) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = ( + self._sleeping_count._semlock._get_value() + - self._woken_count._semlock._get_value() + ) + except Exception: + num_waiters = "unknown" + return f"<{self.__class__.__name__}({self._lock}, {num_waiters})>" + + def wait(self, timeout=None): + assert ( + self._lock._semlock._is_mine() + ), "must acquire() condition before using wait()" + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for _ in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for _ in range(count): + self._lock.acquire() + + def notify(self): + assert self._lock._semlock._is_mine(), "lock is not owned" + assert not self._wait_semaphore.acquire(False) + + # to take account of timeouts since last notify() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res + + if self._sleeping_count.acquire(False): # try grabbing a sleeper + self._wait_semaphore.release() # wake up one sleeper + self._woken_count.acquire() # wait for the sleeper to wake + + # rezero _wait_semaphore in case a timeout just happened + self._wait_semaphore.acquire(False) + + def notify_all(self): + assert self._lock._semlock._is_mine(), "lock is not owned" + assert not self._wait_semaphore.acquire(False) + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res + + sleepers = 0 + while self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for _ in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = _time() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - _time() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +# +# Event +# + + +class Event: + def __init__(self): + self._cond = Condition(Lock()) + self._flag = Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False diff --git a/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py b/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aa089f7a1bf9b577455775f6d6249baf4bd430de --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/backend/utils.py @@ -0,0 +1,181 @@ +import os +import sys +import time +import errno +import signal +import warnings +import subprocess +import traceback + +try: + import psutil +except ImportError: + psutil = None + + +def kill_process_tree(process, use_psutil=True): + """Terminate process and its descendants with SIGKILL""" + if use_psutil and psutil is not None: + _kill_process_tree_with_psutil(process) + else: + _kill_process_tree_without_psutil(process) + + +def recursive_terminate(process, use_psutil=True): + warnings.warn( + "recursive_terminate is deprecated in loky 3.2, use kill_process_tree" + "instead", + DeprecationWarning, + ) + kill_process_tree(process, use_psutil=use_psutil) + + +def _kill_process_tree_with_psutil(process): + try: + descendants = psutil.Process(process.pid).children(recursive=True) + except psutil.NoSuchProcess: + return + + # Kill the descendants in reverse order to avoid killing the parents before + # the descendant in cases where there are more processes nested. + for descendant in descendants[::-1]: + try: + descendant.kill() + except psutil.NoSuchProcess: + pass + + try: + psutil.Process(process.pid).kill() + except psutil.NoSuchProcess: + pass + process.join() + + +def _kill_process_tree_without_psutil(process): + """Terminate a process and its descendants.""" + try: + if sys.platform == "win32": + _windows_taskkill_process_tree(process.pid) + else: + _posix_recursive_kill(process.pid) + except Exception: # pragma: no cover + details = traceback.format_exc() + warnings.warn( + "Failed to kill subprocesses on this platform. Please install" + "psutil: https://github.com/giampaolo/psutil\n" + f"Details:\n{details}" + ) + # In case we cannot introspect or kill the descendants, we fall back to + # only killing the main process. + # + # Note: on Windows, process.kill() is an alias for process.terminate() + # which in turns calls the Win32 API function TerminateProcess(). + process.kill() + process.join() + + +def _windows_taskkill_process_tree(pid): + # On windows, the taskkill function with option `/T` terminate a given + # process pid and its children. + try: + subprocess.check_output( + ["taskkill", "/F", "/T", "/PID", str(pid)], stderr=None + ) + except subprocess.CalledProcessError as e: + # In Windows, taskkill returns 128, 255 for no process found. + if e.returncode not in [128, 255]: + # Let's raise to let the caller log the error details in a + # warning and only kill the root process. + raise # pragma: no cover + + +def _kill(pid): + # Not all systems (e.g. Windows) have a SIGKILL, but the C specification + # mandates a SIGTERM signal. While Windows is handled specifically above, + # let's try to be safe for other hypothetic platforms that only have + # SIGTERM without SIGKILL. + kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM) + try: + os.kill(pid, kill_signal) + except OSError as e: + # if OSError is raised with [Errno 3] no such process, the process + # is already terminated, else, raise the error and let the top + # level function raise a warning and retry to kill the process. + if e.errno != errno.ESRCH: + raise # pragma: no cover + + +def _posix_recursive_kill(pid): + """Recursively kill the descendants of a process before killing it.""" + try: + children_pids = subprocess.check_output( + ["pgrep", "-P", str(pid)], stderr=None, text=True + ) + except subprocess.CalledProcessError as e: + # `ps` returns 1 when no child process has been found + if e.returncode == 1: + children_pids = "" + else: + raise # pragma: no cover + + # Decode the result, split the cpid and remove the trailing line + for cpid in children_pids.splitlines(): + cpid = int(cpid) + _posix_recursive_kill(cpid) + + _kill(pid) + + +def get_exitcodes_terminated_worker(processes): + """Return a formatted string with the exitcodes of terminated workers. + + If necessary, wait (up to .25s) for the system to correctly set the + exitcode of one terminated worker. + """ + patience = 5 + + # Catch the exitcode of the terminated workers. There should at least be + # one. If not, wait a bit for the system to correctly set the exitcode of + # the terminated worker. + exitcodes = [ + p.exitcode for p in list(processes.values()) if p.exitcode is not None + ] + while not exitcodes and patience > 0: + patience -= 1 + exitcodes = [ + p.exitcode + for p in list(processes.values()) + if p.exitcode is not None + ] + time.sleep(0.05) + + return _format_exitcodes(exitcodes) + + +def _format_exitcodes(exitcodes): + """Format a list of exit code with names of the signals if possible""" + str_exitcodes = [ + f"{_get_exitcode_name(e)}({e})" for e in exitcodes if e is not None + ] + return "{" + ", ".join(str_exitcodes) + "}" + + +def _get_exitcode_name(exitcode): + if sys.platform == "win32": + # The exitcode are unreliable on windows (see bpo-31863). + # For this case, return UNKNOWN + return "UNKNOWN" + + if exitcode < 0: + try: + import signal + + return signal.Signals(-exitcode).name + except ValueError: + return "UNKNOWN" + elif exitcode != 255: + # The exitcode are unreliable on forkserver were 255 is always returned + # (see bpo-30589). For this case, return UNKNOWN + return "EXIT" + + return "UNKNOWN" diff --git a/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py b/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..90e5d1013c610259a9c38e3247741889d02ac22b --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/cloudpickle_wrapper.py @@ -0,0 +1,102 @@ +import inspect +from functools import partial +from joblib.externals.cloudpickle import dumps, loads + + +WRAP_CACHE = {} + + +class CloudpickledObjectWrapper: + def __init__(self, obj, keep_wrapper=False): + self._obj = obj + self._keep_wrapper = keep_wrapper + + def __reduce__(self): + _pickled_object = dumps(self._obj) + if not self._keep_wrapper: + return loads, (_pickled_object,) + + return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper) + + def __getattr__(self, attr): + # Ensure that the wrapped object can be used seemlessly as the + # previous object. + if attr not in ["_obj", "_keep_wrapper"]: + return getattr(self._obj, attr) + return getattr(self, attr) + + +# Make sure the wrapped object conserves the callable property +class CallableObjectWrapper(CloudpickledObjectWrapper): + def __call__(self, *args, **kwargs): + return self._obj(*args, **kwargs) + + +def _wrap_non_picklable_objects(obj, keep_wrapper): + if callable(obj): + return CallableObjectWrapper(obj, keep_wrapper=keep_wrapper) + return CloudpickledObjectWrapper(obj, keep_wrapper=keep_wrapper) + + +def _reconstruct_wrapper(_pickled_object, keep_wrapper): + obj = loads(_pickled_object) + return _wrap_non_picklable_objects(obj, keep_wrapper) + + +def _wrap_objects_when_needed(obj): + # Function to introspect an object and decide if it should be wrapped or + # not. + need_wrap = "__main__" in getattr(obj, "__module__", "") + if isinstance(obj, partial): + return partial( + _wrap_objects_when_needed(obj.func), + *[_wrap_objects_when_needed(a) for a in obj.args], + **{ + k: _wrap_objects_when_needed(v) + for k, v in obj.keywords.items() + }, + ) + if callable(obj): + # Need wrap if the object is a function defined in a local scope of + # another function. + func_code = getattr(obj, "__code__", "") + need_wrap |= getattr(func_code, "co_flags", 0) & inspect.CO_NESTED + + # Need wrap if the obj is a lambda expression + func_name = getattr(obj, "__name__", "") + need_wrap |= "" in func_name + + if not need_wrap: + return obj + + wrapped_obj = WRAP_CACHE.get(obj) + if wrapped_obj is None: + wrapped_obj = _wrap_non_picklable_objects(obj, keep_wrapper=False) + WRAP_CACHE[obj] = wrapped_obj + return wrapped_obj + + +def wrap_non_picklable_objects(obj, keep_wrapper=True): + """Wrapper for non-picklable object to use cloudpickle to serialize them. + + Note that this wrapper tends to slow down the serialization process as it + is done with cloudpickle which is typically slower compared to pickle. The + proper way to solve serialization issues is to avoid defining functions and + objects in the main scripts and to implement __reduce__ functions for + complex classes. + """ + # If obj is a class, create a CloudpickledClassWrapper which instantiates + # the object internally and wrap it directly in a CloudpickledObjectWrapper + if inspect.isclass(obj): + + class CloudpickledClassWrapper(CloudpickledObjectWrapper): + def __init__(self, *args, **kwargs): + self._obj = obj(*args, **kwargs) + self._keep_wrapper = keep_wrapper + + CloudpickledClassWrapper.__name__ = obj.__name__ + return CloudpickledClassWrapper + + # If obj is an instance of a class, just wrap it in a regular + # CloudpickledObjectWrapper + return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/initializers.py b/lib/python3.10/site-packages/joblib/externals/loky/initializers.py new file mode 100644 index 0000000000000000000000000000000000000000..aea0e56c25d0d74e04788493058549a1399f8342 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/initializers.py @@ -0,0 +1,80 @@ +import warnings + + +def _viztracer_init(init_kwargs): + """Initialize viztracer's profiler in worker processes""" + from viztracer import VizTracer + + tracer = VizTracer(**init_kwargs) + tracer.register_exit() + tracer.start() + + +def _make_viztracer_initializer_and_initargs(): + try: + import viztracer + + tracer = viztracer.get_tracer() + if tracer is not None and getattr(tracer, "enable", False): + # Profiler is active: introspect its configuration to + # initialize the workers with the same configuration. + return _viztracer_init, (tracer.init_kwargs,) + except ImportError: + # viztracer is not installed: nothing to do + pass + except Exception as e: + # In case viztracer's API evolve, we do not want to crash loky but + # we want to know about it to be able to update loky. + warnings.warn(f"Unable to introspect viztracer state: {e}") + return None, () + + +class _ChainedInitializer: + """Compound worker initializer + + This is meant to be used in conjunction with _chain_initializers to + produce the necessary chained_args list to be passed to __call__. + """ + + def __init__(self, initializers): + self._initializers = initializers + + def __call__(self, *chained_args): + for initializer, args in zip(self._initializers, chained_args): + initializer(*args) + + +def _chain_initializers(initializer_and_args): + """Convenience helper to combine a sequence of initializers. + + If some initializers are None, they are filtered out. + """ + filtered_initializers = [] + filtered_initargs = [] + for initializer, initargs in initializer_and_args: + if initializer is not None: + filtered_initializers.append(initializer) + filtered_initargs.append(initargs) + + if not filtered_initializers: + return None, () + elif len(filtered_initializers) == 1: + return filtered_initializers[0], filtered_initargs[0] + else: + return _ChainedInitializer(filtered_initializers), filtered_initargs + + +def _prepare_initializer(initializer, initargs): + if initializer is not None and not callable(initializer): + raise TypeError( + f"initializer must be a callable, got: {initializer!r}" + ) + + # Introspect runtime to determine if we need to propagate the viztracer + # profiler information to the workers: + return _chain_initializers( + [ + (initializer, initargs), + _make_viztracer_initializer_and_initargs(), + ] + ) diff --git a/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py b/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..3f32994e1c305862763051e9b84514b0130b9a0b --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/process_executor.py @@ -0,0 +1,1344 @@ +############################################################################### +# Re-implementation of the ProcessPoolExecutor more robust to faults +# +# author: Thomas Moreau and Olivier Grisel +# +# adapted from concurrent/futures/process_pool_executor.py (17/02/2017) +# * Add an extra management thread to detect executor_manager_thread failures, +# * Improve the shutdown process to avoid deadlocks, +# * Add timeout for workers, +# * More robust pickling process. +# +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | +--------+ | 4, result | | | +| | | ... | | 3, except | | | ++----------+ +------------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +""" + + +__author__ = "Thomas Moreau (thomas.moreau.2010@gmail.com)" + + +import faulthandler +import os +import gc +import sys +import queue +import struct +import weakref +import warnings +import itertools +import traceback +import threading +from time import time, sleep +import multiprocessing as mp +from functools import partial +from pickle import PicklingError +from concurrent.futures import Executor +from concurrent.futures._base import LOGGER +from concurrent.futures.process import BrokenProcessPool as _BPPException +from multiprocessing.connection import wait + +from ._base import Future +from .backend import get_context +from .backend.context import cpu_count, _MAX_WINDOWS_WORKERS +from .backend.queues import Queue, SimpleQueue +from .backend.reduction import set_loky_pickler, get_loky_pickler_name +from .backend.utils import kill_process_tree, get_exitcodes_terminated_worker +from .initializers import _prepare_initializer + + +# Mechanism to prevent infinite process spawning. When a worker of a +# ProcessPoolExecutor nested in MAX_DEPTH Executor tries to create a new +# Executor, a LokyRecursionError is raised +MAX_DEPTH = int(os.environ.get("LOKY_MAX_DEPTH", 10)) +_CURRENT_DEPTH = 0 + +# Minimum time interval between two consecutive memory leak protection checks. +_MEMORY_LEAK_CHECK_DELAY = 1.0 + +# Number of bytes of memory usage allowed over the reference process size. +_MAX_MEMORY_LEAK_SIZE = int(3e8) + + +try: + from psutil import Process + + _USE_PSUTIL = True + + def _get_memory_usage(pid, force_gc=False): + if force_gc: + gc.collect() + + mem_size = Process(pid).memory_info().rss + mp.util.debug(f"psutil return memory size: {mem_size}") + return mem_size + +except ImportError: + _USE_PSUTIL = False + + +class _ThreadWakeup: + def __init__(self): + self._closed = False + self._reader, self._writer = mp.Pipe(duplex=False) + + def close(self): + if not self._closed: + self._closed = True + self._writer.close() + self._reader.close() + + def wakeup(self): + if not self._closed: + self._writer.send_bytes(b"") + + def clear(self): + if not self._closed: + while self._reader.poll(): + self._reader.recv_bytes() + + +class _ExecutorFlags: + """necessary references to maintain executor states without preventing gc + + It permits to keep the information needed by executor_manager_thread + and crash_detection_thread to maintain the pool without preventing the + garbage collection of unreferenced executors. + """ + + def __init__(self, shutdown_lock): + + self.shutdown = False + self.broken = None + self.kill_workers = False + self.shutdown_lock = shutdown_lock + + def flag_as_shutting_down(self, kill_workers=None): + with self.shutdown_lock: + self.shutdown = True + if kill_workers is not None: + self.kill_workers = kill_workers + + def flag_as_broken(self, broken): + with self.shutdown_lock: + self.shutdown = True + self.broken = broken + + +# Prior to 3.9, executor_manager_thread is created as daemon thread. This means +# that it is not joined automatically when the interpreter is shutting down. +# To work around this problem, an exit handler is installed to tell the +# thread to exit when the interpreter is shutting down and then waits until +# it finishes. The thread needs to be daemonized because the atexit hooks are +# called after all non daemonized threads are joined. +# +# Starting 3.9, there exists a specific atexit hook to be called before joining +# the threads so the executor_manager_thread does not need to be daemonized +# anymore. +# +# The atexit hooks are registered when starting the first ProcessPoolExecutor +# to avoid import having an effect on the interpreter. + +_global_shutdown = False +_global_shutdown_lock = threading.Lock() +_threads_wakeups = weakref.WeakKeyDictionary() + + +def _python_exit(): + global _global_shutdown + _global_shutdown = True + + # Materialize the list of items to avoid error due to iterating over + # changing size dictionary. + items = list(_threads_wakeups.items()) + if len(items) > 0: + mp.util.debug( + f"Interpreter shutting down. Waking up {len(items)}" + f"executor_manager_thread:\n{items}" + ) + + # Wake up the executor_manager_thread's so they can detect the interpreter + # is shutting down and exit. + for _, (shutdown_lock, thread_wakeup) in items: + with shutdown_lock: + thread_wakeup.wakeup() + + # Collect the executor_manager_thread's to make sure we exit cleanly. + for thread, _ in items: + # This locks is to prevent situations where an executor is gc'ed in one + # thread while the atexit finalizer is running in another thread. + with _global_shutdown_lock: + thread.join() + + +# With the fork context, _thread_wakeups is propagated to children. +# Clear it after fork to avoid some situation that can cause some +# freeze when joining the workers. +mp.util.register_after_fork(_threads_wakeups, lambda obj: obj.clear()) + + +# Module variable to register the at_exit call +process_pool_executor_at_exit = None + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + + +class _RemoteTraceback(Exception): + """Embed stringification of remote traceback in local traceback""" + + def __init__(self, tb=None): + self.tb = f'\n"""\n{tb}"""' + + def __str__(self): + return self.tb + + +# Do not inherit from BaseException to mirror +# concurrent.futures.process._ExceptionWithTraceback +class _ExceptionWithTraceback: + def __init__(self, exc): + tb = getattr(exc, "__traceback__", None) + if tb is None: + _, _, tb = sys.exc_info() + tb = traceback.format_exception(type(exc), exc, tb) + tb = "".join(tb) + self.exc = exc + self.tb = tb + + def __reduce__(self): + return _rebuild_exc, (self.exc, self.tb) + + +def _rebuild_exc(exc, tb): + exc.__cause__ = _RemoteTraceback(tb) + return exc + + +class _WorkItem: + + __slots__ = ["future", "fn", "args", "kwargs"] + + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + +class _ResultItem: + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + + +class _CallItem: + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + + # Store the current loky_pickler so it is correctly set in the worker + self.loky_pickler = get_loky_pickler_name() + + def __call__(self): + set_loky_pickler(self.loky_pickler) + return self.fn(*self.args, **self.kwargs) + + def __repr__(self): + return ( + f"CallItem({self.work_id}, {self.fn}, {self.args}, {self.kwargs})" + ) + + +class _SafeQueue(Queue): + """Safe Queue set exception to the future object linked to a job""" + + def __init__( + self, + max_size=0, + ctx=None, + pending_work_items=None, + running_work_items=None, + thread_wakeup=None, + shutdown_lock=None, + reducers=None, + ): + self.thread_wakeup = thread_wakeup + self.shutdown_lock = shutdown_lock + self.pending_work_items = pending_work_items + self.running_work_items = running_work_items + super().__init__(max_size, reducers=reducers, ctx=ctx) + + def _on_queue_feeder_error(self, e, obj): + if isinstance(obj, _CallItem): + # format traceback only works on python3 + if isinstance(e, struct.error): + raised_error = RuntimeError( + "The task could not be sent to the workers as it is too " + "large for `send_bytes`." + ) + else: + raised_error = PicklingError( + "Could not pickle the task to send it to the workers." + ) + tb = traceback.format_exception( + type(e), e, getattr(e, "__traceback__", None) + ) + raised_error.__cause__ = _RemoteTraceback("".join(tb)) + work_item = self.pending_work_items.pop(obj.work_id, None) + self.running_work_items.remove(obj.work_id) + # work_item can be None if another process terminated. In this + # case, the executor_manager_thread fails all work_items with + # BrokenProcessPool + if work_item is not None: + work_item.future.set_exception(raised_error) + del work_item + with self.shutdown_lock: + self.thread_wakeup.wakeup() + else: + super()._on_queue_feeder_error(e, obj) + + +def _get_chunks(chunksize, *iterables): + """Iterates over zip()ed iterables in chunks.""" + it = zip(*iterables) + while True: + chunk = tuple(itertools.islice(it, chunksize)) + if not chunk: + return + yield chunk + + +def _process_chunk(fn, chunk): + """Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + """ + return [fn(*args) for args in chunk] + + +def _sendback_result(result_queue, work_id, result=None, exception=None): + """Safely send back the given result or exception""" + try: + result_queue.put( + _ResultItem(work_id, result=result, exception=exception) + ) + except BaseException as e: + exc = _ExceptionWithTraceback(e) + result_queue.put(_ResultItem(work_id, exception=exc)) + + +def _enable_faulthandler_if_needed(): + if "PYTHONFAULTHANDLER" in os.environ: + # Respect the environment variable to configure faulthandler. This + # makes it possible to never enable faulthandler in the loky workers by + # setting PYTHONFAULTHANDLER=0 explicitly in the environment. + mp.util.debug( + f"faulthandler explicitly configured by environment variable: " + f"PYTHONFAULTHANDLER={os.environ['PYTHONFAULTHANDLER']}." + ) + else: + if faulthandler.is_enabled(): + # Fault handler is already enabled, possibly via a custom + # initializer to customize the behavior. + mp.util.debug("faulthandler already enabled.") + else: + # Enable faulthandler by default with default paramaters otherwise. + mp.util.debug( + "Enabling faulthandler to report tracebacks on worker crashes." + ) + faulthandler.enable() + + +def _process_worker( + call_queue, + result_queue, + initializer, + initargs, + processes_management_lock, + timeout, + worker_exit_lock, + current_depth, +): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + processes_management_lock: A ctx.Lock avoiding worker timeout while + some workers are being spawned. + timeout: maximum time to wait for a new item in the call_queue. If that + time is expired, the worker will shutdown. + worker_exit_lock: Lock to avoid flagging the executor as broken on + workers timeout. + current_depth: Nested parallelism level, to avoid infinite spawning. + """ + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + LOGGER.critical("Exception in initializer:", exc_info=True) + # The parent will notice that the process stopped and + # mark the pool broken + return + + # set the global _CURRENT_DEPTH mechanism to limit recursive call + global _CURRENT_DEPTH + _CURRENT_DEPTH = current_depth + _process_reference_size = None + _last_memory_leak_check = None + pid = os.getpid() + + mp.util.debug(f"Worker started with timeout={timeout}") + _enable_faulthandler_if_needed() + + while True: + try: + call_item = call_queue.get(block=True, timeout=timeout) + if call_item is None: + mp.util.info("Shutting down worker on sentinel") + except queue.Empty: + mp.util.info(f"Shutting down worker after timeout {timeout:0.3f}s") + if processes_management_lock.acquire(block=False): + processes_management_lock.release() + call_item = None + else: + mp.util.info("Could not acquire processes_management_lock") + continue + except BaseException: + previous_tb = traceback.format_exc() + try: + result_queue.put(_RemoteTraceback(previous_tb)) + except BaseException: + # If we cannot format correctly the exception, at least print + # the traceback. + print(previous_tb) + mp.util.debug("Exiting with code 1") + sys.exit(1) + if call_item is None: + # Notify queue management thread about worker shutdown + result_queue.put(pid) + is_clean = worker_exit_lock.acquire(True, timeout=30) + + # Early notify any loky executor running in this worker process + # (nested parallelism) that this process is about to shutdown to + # avoid a deadlock waiting undifinitely for the worker to finish. + _python_exit() + + if is_clean: + mp.util.debug("Exited cleanly") + else: + mp.util.info("Main process did not release worker_exit") + return + try: + r = call_item() + except BaseException as e: + exc = _ExceptionWithTraceback(e) + result_queue.put(_ResultItem(call_item.work_id, exception=exc)) + else: + _sendback_result(result_queue, call_item.work_id, result=r) + del r + + # Free the resource as soon as possible, to avoid holding onto + # open files or shared memory that is not needed anymore + del call_item + + if _USE_PSUTIL: + if _process_reference_size is None: + # Make reference measurement after the first call + _process_reference_size = _get_memory_usage(pid, force_gc=True) + _last_memory_leak_check = time() + continue + if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY: + mem_usage = _get_memory_usage(pid) + _last_memory_leak_check = time() + if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: + # Memory usage stays within bounds: everything is fine. + continue + + # Check again memory usage; this time take the measurement + # after a forced garbage collection to break any reference + # cycles. + mem_usage = _get_memory_usage(pid, force_gc=True) + _last_memory_leak_check = time() + if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: + # The GC managed to free the memory: everything is fine. + continue + + # The process is leaking memory: let the master process + # know that we need to start a new worker. + mp.util.info("Memory leak detected: shutting down worker") + result_queue.put(pid) + with worker_exit_lock: + mp.util.debug("Exit due to memory leak") + return + else: + # if psutil is not installed, trigger gc.collect events + # regularly to limit potential memory leaks due to reference cycles + if _last_memory_leak_check is None or ( + time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY + ): + gc.collect() + _last_memory_leak_check = time() + + +class _ExecutorManagerThread(threading.Thread): + """Manages the communication between this process and the worker processes. + + The manager is run in a local thread. + + Args: + executor: A reference to the ProcessPoolExecutor that owns + this thread. A weakref will be own by the manager as well as + references to internal objects used to introspect the state of + the executor. + """ + + def __init__(self, executor): + # Store references to necessary internals of the executor. + + # A _ThreadWakeup to allow waking up the executor_manager_thread from + # the main Thread and avoid deadlocks caused by permanently + # locked queues. + self.thread_wakeup = executor._executor_manager_thread_wakeup + self.shutdown_lock = executor._shutdown_lock + + # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used + # to determine if the ProcessPoolExecutor has been garbage collected + # and that the manager can exit. + # When the executor gets garbage collected, the weakref callback + # will wake up the queue management thread so that it can terminate + # if there is no pending work item. + def weakref_cb( + _, + thread_wakeup=self.thread_wakeup, + shutdown_lock=self.shutdown_lock, + ): + if mp is not None: + # At this point, the multiprocessing module can already be + # garbage collected. We only log debug info when still + # possible. + mp.util.debug( + "Executor collected: triggering callback for" + " QueueManager wakeup" + ) + with shutdown_lock: + thread_wakeup.wakeup() + + self.executor_reference = weakref.ref(executor, weakref_cb) + + # The flags of the executor + self.executor_flags = executor._flags + + # A list of the ctx.Process instances used as workers. + self.processes = executor._processes + + # A ctx.Queue that will be filled with _CallItems derived from + # _WorkItems for processing by the process workers. + self.call_queue = executor._call_queue + + # A ctx.SimpleQueue of _ResultItems generated by the process workers. + self.result_queue = executor._result_queue + + # A queue.Queue of work ids e.g. Queue([5, 6, ...]). + self.work_ids_queue = executor._work_ids + + # A dict mapping work ids to _WorkItems e.g. + # {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + self.pending_work_items = executor._pending_work_items + + # A list of the work_ids that are currently running + self.running_work_items = executor._running_work_items + + # A lock to avoid concurrent shutdown of workers on timeout and spawn + # of new processes or shut down + self.processes_management_lock = executor._processes_management_lock + + super().__init__(name="ExecutorManagerThread") + if sys.version_info < (3, 9): + self.daemon = True + + def run(self): + # Main loop for the executor manager thread. + + while True: + self.add_call_item_to_queue() + + result_item, is_broken, bpe = self.wait_result_broken_or_wakeup() + + if is_broken: + self.terminate_broken(bpe) + return + if result_item is not None: + self.process_result_item(result_item) + # Delete reference to result_item to avoid keeping references + # while waiting on new results. + del result_item + + if self.is_shutting_down(): + self.flag_executor_shutting_down() + + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not self.pending_work_items: + self.join_executor_internals() + return + + def add_call_item_to_queue(self): + # Fills call_queue with _WorkItems from pending_work_items. + # This function never blocks. + while True: + if self.call_queue.full(): + return + try: + work_id = self.work_ids_queue.get(block=False) + except queue.Empty: + return + else: + work_item = self.pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + self.running_work_items += [work_id] + self.call_queue.put( + _CallItem( + work_id, + work_item.fn, + work_item.args, + work_item.kwargs, + ), + block=True, + ) + else: + del self.pending_work_items[work_id] + continue + + def wait_result_broken_or_wakeup(self): + # Wait for a result to be ready in the result_queue while checking + # that all worker processes are still running, or for a wake up + # signal send. The wake up signals come either from new tasks being + # submitted, from the executor being shutdown/gc-ed, or from the + # shutdown of the python interpreter. + result_reader = self.result_queue._reader + wakeup_reader = self.thread_wakeup._reader + readers = [result_reader, wakeup_reader] + worker_sentinels = [p.sentinel for p in list(self.processes.values())] + ready = wait(readers + worker_sentinels) + + bpe = None + is_broken = True + result_item = None + if result_reader in ready: + try: + result_item = result_reader.recv() + if isinstance(result_item, _RemoteTraceback): + bpe = BrokenProcessPool( + "A task has failed to un-serialize. Please ensure that" + " the arguments of the function are all picklable." + ) + bpe.__cause__ = result_item + else: + is_broken = False + except BaseException as e: + bpe = BrokenProcessPool( + "A result has failed to un-serialize. Please ensure that " + "the objects returned by the function are always " + "picklable." + ) + tb = traceback.format_exception( + type(e), e, getattr(e, "__traceback__", None) + ) + bpe.__cause__ = _RemoteTraceback("".join(tb)) + + elif wakeup_reader in ready: + # This is simply a wake-up event that might either trigger putting + # more tasks in the queue or trigger the clean up of resources. + is_broken = False + else: + # A worker has terminated and we don't know why, set the state of + # the executor as broken + exit_codes = "" + if sys.platform != "win32": + # In Windows, introspecting terminated workers exitcodes seems + # unstable, therefore they are not appended in the exception + # message. + exit_codes = ( + "\nThe exit codes of the workers are " + f"{get_exitcodes_terminated_worker(self.processes)}" + ) + mp.util.debug( + "A worker unexpectedly terminated. Workers that " + "might have caused the breakage: " + + str( + { + p.name: p.exitcode + for p in list(self.processes.values()) + if p is not None and p.sentinel in ready + } + ) + ) + bpe = TerminatedWorkerError( + "A worker process managed by the executor was unexpectedly " + "terminated. This could be caused by a segmentation fault " + "while calling the function or by an excessive memory usage " + "causing the Operating System to kill the worker.\n" + f"{exit_codes}\n" + "Detailed tracebacks of the workers should have been printed " + "to stderr in the executor process if faulthandler was not " + "disabled." + ) + + self.thread_wakeup.clear() + + return result_item, is_broken, bpe + + def process_result_item(self, result_item): + # Process the received a result_item. This can be either the PID of a + # worker that exited gracefully or a _ResultItem + + if isinstance(result_item, int): + # Clean shutdown of a worker using its PID, either on request + # by the executor.shutdown method or by the timeout of the worker + # itself: we should not mark the executor as broken. + with self.processes_management_lock: + p = self.processes.pop(result_item, None) + + # p can be None if the executor is concurrently shutting down. + if p is not None: + p._worker_exit_lock.release() + mp.util.debug( + f"joining {p.name} when processing {p.pid} as result_item" + ) + p.join() + del p + + # Make sure the executor have the right number of worker, even if a + # worker timeout while some jobs were submitted. If some work is + # pending or there is less processes than running items, we need to + # start a new Process and raise a warning. + n_pending = len(self.pending_work_items) + n_running = len(self.running_work_items) + if n_pending - n_running > 0 or n_running > len(self.processes): + executor = self.executor_reference() + if ( + executor is not None + and len(self.processes) < executor._max_workers + ): + warnings.warn( + "A worker stopped while some jobs were given to the " + "executor. This can be caused by a too short worker " + "timeout or by a memory leak.", + UserWarning, + ) + with executor._processes_management_lock: + executor._adjust_process_count() + executor = None + else: + # Received a _ResultItem so mark the future as completed. + work_item = self.pending_work_items.pop(result_item.work_id, None) + # work_item can be None if another process terminated (see above) + if work_item is not None: + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + self.running_work_items.remove(result_item.work_id) + + def is_shutting_down(self): + # Check whether we should start shutting down the executor. + executor = self.executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this thread is not broken AND + # * The executor that owns this worker has been collected OR + # * The executor that owns this worker has been shutdown. + # If the executor is broken, it should be detected in the next loop. + return _global_shutdown or ( + (executor is None or self.executor_flags.shutdown) + and not self.executor_flags.broken + ) + + def terminate_broken(self, bpe): + # Terminate the executor because it is in a broken state. The bpe + # argument can be used to display more information on the error that + # lead the executor into becoming broken. + + # Mark the process pool broken so that submits fail right now. + self.executor_flags.flag_as_broken(bpe) + + # Mark pending tasks as failed. + for work_item in self.pending_work_items.values(): + work_item.future.set_exception(bpe) + # Delete references to object. See issue16284 + del work_item + self.pending_work_items.clear() + + # Terminate remaining workers forcibly: the queues or their + # locks may be in a dirty state and block forever. + self.kill_workers(reason="broken executor") + + # clean up resources + self.join_executor_internals() + + def flag_executor_shutting_down(self): + # Flag the executor as shutting down and cancel remaining tasks if + # requested as early as possible if it is not gc-ed yet. + self.executor_flags.flag_as_shutting_down() + + # Cancel pending work items if requested. + if self.executor_flags.kill_workers: + while self.pending_work_items: + _, work_item = self.pending_work_items.popitem() + work_item.future.set_exception( + ShutdownExecutorError( + "The Executor was shutdown with `kill_workers=True` " + "before this job could complete." + ) + ) + del work_item + + # Kill the remaining worker forcibly to no waste time joining them + self.kill_workers(reason="executor shutting down") + + def kill_workers(self, reason=""): + # Terminate the remaining workers using SIGKILL. This function also + # terminates descendant workers of the children in case there is some + # nested parallelism. + while self.processes: + _, p = self.processes.popitem() + mp.util.debug(f"terminate process {p.name}, reason: {reason}") + try: + kill_process_tree(p) + except ProcessLookupError: # pragma: no cover + pass + + def shutdown_workers(self): + # shutdown all workers in self.processes + + # Create a list to avoid RuntimeError due to concurrent modification of + # processes. nb_children_alive is thus an upper bound. Also release the + # processes' _worker_exit_lock to accelerate the shutdown procedure, as + # there is no need for hand-shake here. + with self.processes_management_lock: + n_children_to_stop = 0 + for p in list(self.processes.values()): + mp.util.debug(f"releasing worker exit lock on {p.name}") + p._worker_exit_lock.release() + n_children_to_stop += 1 + + mp.util.debug(f"found {n_children_to_stop} processes to stop") + + # Send the right number of sentinels, to make sure all children are + # properly terminated. Do it with a mechanism that avoid hanging on + # Full queue when all workers have already been shutdown. + n_sentinels_sent = 0 + cooldown_time = 0.001 + while ( + n_sentinels_sent < n_children_to_stop + and self.get_n_children_alive() > 0 + ): + for _ in range(n_children_to_stop - n_sentinels_sent): + try: + self.call_queue.put_nowait(None) + n_sentinels_sent += 1 + except queue.Full as e: + if cooldown_time > 5.0: + mp.util.info( + "failed to send all sentinels and exit with error." + f"\ncall_queue size={self.call_queue._maxsize}; " + f" full is {self.call_queue.full()}; " + ) + raise e + mp.util.info( + "full call_queue prevented to send all sentinels at " + "once, waiting..." + ) + sleep(cooldown_time) + cooldown_time *= 1.2 + break + + mp.util.debug(f"sent {n_sentinels_sent} sentinels to the call queue") + + def join_executor_internals(self): + self.shutdown_workers() + + # Release the queue's resources as soon as possible. Flag the feeder + # thread for clean exit to avoid having the crash detection thread flag + # the Executor as broken during the shutdown. This is safe as either: + # * We don't need to communicate with the workers anymore + # * There is nothing left in the Queue buffer except None sentinels + mp.util.debug("closing call_queue") + self.call_queue.close() + self.call_queue.join_thread() + + # Closing result_queue + mp.util.debug("closing result_queue") + self.result_queue.close() + + mp.util.debug("closing thread_wakeup") + with self.shutdown_lock: + self.thread_wakeup.close() + + # If .join() is not called on the created processes then + # some ctx.Queue methods may deadlock on macOS. + with self.processes_management_lock: + mp.util.debug(f"joining {len(self.processes)} processes") + n_joined_processes = 0 + while True: + try: + pid, p = self.processes.popitem() + mp.util.debug(f"joining process {p.name} with pid {pid}") + p.join() + n_joined_processes += 1 + except KeyError: + break + + mp.util.debug( + "executor management thread clean shutdown of " + f"{n_joined_processes} workers" + ) + + def get_n_children_alive(self): + # This is an upper bound on the number of children alive. + with self.processes_management_lock: + return sum(p.is_alive() for p in list(self.processes.values())) + + +_system_limits_checked = False +_system_limited = None + + +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked and _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # undetermined limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = ( + f"system provides too few semaphores ({nsems_max} available, " + "256 necessary)" + ) + raise NotImplementedError(_system_limited) + + +def _chain_from_iterable_of_lists(iterable): + """ + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + """ + for element in iterable: + element.reverse() + while element: + yield element.pop() + + +def _check_max_depth(context): + # Limit the maxmal recursion level + global _CURRENT_DEPTH + if context.get_start_method() == "fork" and _CURRENT_DEPTH > 0: + raise LokyRecursionError( + "Could not spawn extra nested processes at depth superior to " + "MAX_DEPTH=1. It is not possible to increase this limit when " + "using the 'fork' start method." + ) + + if 0 < MAX_DEPTH and _CURRENT_DEPTH + 1 > MAX_DEPTH: + raise LokyRecursionError( + "Could not spawn extra nested processes at depth superior to " + f"MAX_DEPTH={MAX_DEPTH}. If this is intendend, you can change " + "this limit with the LOKY_MAX_DEPTH environment variable." + ) + + +class LokyRecursionError(RuntimeError): + """A process tries to spawn too many levels of nested processes.""" + + +class BrokenProcessPool(_BPPException): + """ + Raised when the executor is broken while a future was in the running state. + The cause can an error raised when unpickling the task in the worker + process or when unpickling the result value in the parent process. It can + also be caused by a worker process being terminated unexpectedly. + """ + + +class TerminatedWorkerError(BrokenProcessPool): + """ + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + """ + + +# Alias for backward compat (for code written for loky 1.1.4 and earlier). Do +# not use in new code. +BrokenExecutor = BrokenProcessPool + + +class ShutdownExecutorError(RuntimeError): + """ + Raised when a ProcessPoolExecutor is shutdown while a future was in the + running or pending state. + """ + + +class ProcessPoolExecutor(Executor): + + _at_exit = None + + def __init__( + self, + max_workers=None, + job_reducers=None, + result_reducers=None, + timeout=None, + context=None, + initializer=None, + initargs=(), + env=None, + ): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: int, optional (default: cpu_count()) + The maximum number of processes that can be used to execute the + given calls. If None or not given then as many worker processes + will be created as the number of CPUs the current process + can use. + job_reducers, result_reducers: dict(type: reducer_func) + Custom reducer for pickling the jobs and the results from the + Executor. If only `job_reducers` is provided, `result_reducer` + will use the same reducers + timeout: int, optional (default: None) + Idle workers exit after timeout seconds. If a new job is + submitted after the timeout, the executor will start enough + new Python processes to make sure the pool of workers is full. + context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. + initializer: An callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + env: A dict of environment variable to overwrite in the child + process. The environment variables are set before any module is + loaded. Note that this only works with the loky context. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = cpu_count() + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + self._max_workers = max_workers + + if ( + sys.platform == "win32" + and self._max_workers > _MAX_WINDOWS_WORKERS + ): + warnings.warn( + f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} " + "due to limitations of the operating system." + ) + self._max_workers = _MAX_WINDOWS_WORKERS + + if context is None: + context = get_context() + self._context = context + self._env = env + + self._initializer, self._initargs = _prepare_initializer( + initializer, initargs + ) + _check_max_depth(self._context) + + if result_reducers is None: + result_reducers = job_reducers + + # Timeout + self._timeout = timeout + + # Management thread + self._executor_manager_thread = None + + # Map of pids to processes + self._processes = {} + + # Internal variables of the ProcessPoolExecutor + self._processes = {} + self._queue_count = 0 + self._pending_work_items = {} + self._running_work_items = [] + self._work_ids = queue.Queue() + self._processes_management_lock = self._context.Lock() + self._executor_manager_thread = None + self._shutdown_lock = threading.Lock() + + # _ThreadWakeup is a communication channel used to interrupt the wait + # of the main loop of executor_manager_thread from another thread (e.g. + # when calling executor.submit or executor.shutdown). We do not use the + # _result_queue to send wakeup signals to the executor_manager_thread + # as it could result in a deadlock if a worker process dies with the + # _result_queue write lock still acquired. + # + # _shutdown_lock must be locked to access _ThreadWakeup.wakeup. + self._executor_manager_thread_wakeup = _ThreadWakeup() + + # Flag to hold the state of the Executor. This permits to introspect + # the Executor state even once it has been garbage collected. + self._flags = _ExecutorFlags(self._shutdown_lock) + + # Finally setup the queues for interprocess communication + self._setup_queues(job_reducers, result_reducers) + + mp.util.debug("ProcessPoolExecutor is setup") + + def _setup_queues(self, job_reducers, result_reducers, queue_size=None): + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + if queue_size is None: + queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS + self._call_queue = _SafeQueue( + max_size=queue_size, + pending_work_items=self._pending_work_items, + running_work_items=self._running_work_items, + thread_wakeup=self._executor_manager_thread_wakeup, + shutdown_lock=self._shutdown_lock, + reducers=job_reducers, + ctx=self._context, + ) + # Killed worker processes can produce spurious "broken pipe" + # tracebacks in the queue's own worker thread. But we detect killed + # processes anyway, so silence the tracebacks. + self._call_queue._ignore_epipe = True + + self._result_queue = SimpleQueue( + reducers=result_reducers, ctx=self._context + ) + + def _start_executor_manager_thread(self): + if self._executor_manager_thread is None: + mp.util.debug("_start_executor_manager_thread called") + + # Start the processes so that their sentinels are known. + self._executor_manager_thread = _ExecutorManagerThread(self) + self._executor_manager_thread.start() + + # register this executor in a mechanism that ensures it will wakeup + # when the interpreter is exiting. + _threads_wakeups[self._executor_manager_thread] = ( + self._shutdown_lock, + self._executor_manager_thread_wakeup, + ) + + global process_pool_executor_at_exit + if process_pool_executor_at_exit is None: + # Ensure that the _python_exit function will be called before + # the multiprocessing.Queue._close finalizers which have an + # exitpriority of 10. + + if sys.version_info < (3, 9): + process_pool_executor_at_exit = mp.util.Finalize( + None, _python_exit, exitpriority=20 + ) + else: + process_pool_executor_at_exit = threading._register_atexit( + _python_exit + ) + + def _adjust_process_count(self): + while len(self._processes) < self._max_workers: + worker_exit_lock = self._context.BoundedSemaphore(1) + args = ( + self._call_queue, + self._result_queue, + self._initializer, + self._initargs, + self._processes_management_lock, + self._timeout, + worker_exit_lock, + _CURRENT_DEPTH + 1, + ) + worker_exit_lock.acquire() + try: + # Try to spawn the process with some environment variable to + # overwrite but it only works with the loky context for now. + p = self._context.Process( + target=_process_worker, args=args, env=self._env + ) + except TypeError: + p = self._context.Process(target=_process_worker, args=args) + p._worker_exit_lock = worker_exit_lock + p.start() + self._processes[p.pid] = p + mp.util.debug( + f"Adjusted process count to {self._max_workers}: " + f"{[(p.name, pid) for pid, p in self._processes.items()]}" + ) + + def _ensure_executor_running(self): + """ensures all workers and management thread are running""" + with self._processes_management_lock: + if len(self._processes) != self._max_workers: + self._adjust_process_count() + self._start_executor_manager_thread() + + def submit(self, fn, *args, **kwargs): + with self._flags.shutdown_lock: + if self._flags.broken is not None: + raise self._flags.broken + if self._flags.shutdown: + raise ShutdownExecutorError( + "cannot schedule new futures after shutdown" + ) + + # Cannot submit a new calls once the interpreter is shutting down. + # This check avoids spawning new processes at exit. + if _global_shutdown: + raise RuntimeError( + "cannot schedule new futures after interpreter shutdown" + ) + + f = Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._executor_manager_thread_wakeup.wakeup() + + self._ensure_executor_running() + return f + + submit.__doc__ = Executor.submit.__doc__ + + def map(self, fn, *iterables, **kwargs): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a + time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + timeout = kwargs.get("timeout", None) + chunksize = kwargs.get("chunksize", 1) + if chunksize < 1: + raise ValueError("chunksize must be >= 1.") + + results = super().map( + partial(_process_chunk, fn), + _get_chunks(chunksize, *iterables), + timeout=timeout, + ) + return _chain_from_iterable_of_lists(results) + + def shutdown(self, wait=True, kill_workers=False): + mp.util.debug(f"shutting down executor {self}") + + self._flags.flag_as_shutting_down(kill_workers) + executor_manager_thread = self._executor_manager_thread + executor_manager_thread_wakeup = self._executor_manager_thread_wakeup + + if executor_manager_thread_wakeup is not None: + # Wake up queue management thread + with self._shutdown_lock: + self._executor_manager_thread_wakeup.wakeup() + + if executor_manager_thread is not None and wait: + # This locks avoids concurrent join if the interpreter + # is shutting down. + with _global_shutdown_lock: + executor_manager_thread.join() + _threads_wakeups.pop(executor_manager_thread, None) + + # To reduce the risk of opening too many files, remove references to + # objects that use file descriptors. + self._executor_manager_thread = None + self._executor_manager_thread_wakeup = None + self._call_queue = None + self._result_queue = None + self._processes_management_lock = None + + shutdown.__doc__ = Executor.shutdown.__doc__ diff --git a/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py b/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..faf604c21d3cbeb8fabe14e64d4f94ec1bfd6a94 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/externals/loky/reusable_executor.py @@ -0,0 +1,294 @@ +############################################################################### +# Reusable ProcessPoolExecutor +# +# author: Thomas Moreau and Olivier Grisel +# +import time +import warnings +import threading +import multiprocessing as mp + +from .process_executor import ProcessPoolExecutor, EXTRA_QUEUED_CALLS +from .backend.context import cpu_count +from .backend import get_context + +__all__ = ["get_reusable_executor"] + +# Singleton executor and id management +_executor_lock = threading.RLock() +_next_executor_id = 0 +_executor = None +_executor_kwargs = None + + +def _get_next_executor_id(): + """Ensure that each successive executor instance has a unique, monotonic id. + + The purpose of this monotonic id is to help debug and test automated + instance creation. + """ + global _next_executor_id + with _executor_lock: + executor_id = _next_executor_id + _next_executor_id += 1 + return executor_id + + +def get_reusable_executor( + max_workers=None, + context=None, + timeout=10, + kill_workers=False, + reuse="auto", + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, +): + """Return the current ReusableExectutor instance. + + Start a new instance if it has not been started already or if the previous + instance was left in a broken state. + + If the previous instance does not have the requested number of workers, the + executor is dynamically resized to adjust the number of workers prior to + returning. + + Reusing a singleton instance spares the overhead of starting new worker + processes and importing common python packages each time. + + ``max_workers`` controls the maximum number of tasks that can be running in + parallel in worker processes. By default this is set to the number of + CPUs on the host. + + Setting ``timeout`` (in seconds) makes idle workers automatically shutdown + so as to release system resources. New workers are respawn upon submission + of new tasks so that ``max_workers`` are available to accept the newly + submitted tasks. Setting ``timeout`` to around 100 times the time required + to spawn new processes and import packages in them (on the order of 100ms) + ensures that the overhead of spawning workers is negligible. + + Setting ``kill_workers=True`` makes it possible to forcibly interrupt + previously spawned jobs to get a new instance of the reusable executor + with new constructor argument values. + + The ``job_reducers`` and ``result_reducers`` are used to customize the + pickling of tasks and results send to the executor. + + When provided, the ``initializer`` is run first in newly spawned + processes with argument ``initargs``. + + The environment variable in the child process are a copy of the values in + the main process. One can provide a dict ``{ENV: VAL}`` where ``ENV`` and + ``VAL`` are string literals to overwrite the environment variable ``ENV`` + in the child processes to value ``VAL``. The environment variables are set + in the children before any module is loaded. This only works with the + ``loky`` context. + """ + _executor, _ = _ReusablePoolExecutor.get_reusable_executor( + max_workers=max_workers, + context=context, + timeout=timeout, + kill_workers=kill_workers, + reuse=reuse, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + return _executor + + +class _ReusablePoolExecutor(ProcessPoolExecutor): + def __init__( + self, + submit_resize_lock, + max_workers=None, + context=None, + timeout=None, + executor_id=0, + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, + ): + super().__init__( + max_workers=max_workers, + context=context, + timeout=timeout, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + self.executor_id = executor_id + self._submit_resize_lock = submit_resize_lock + + @classmethod + def get_reusable_executor( + cls, + max_workers=None, + context=None, + timeout=10, + kill_workers=False, + reuse="auto", + job_reducers=None, + result_reducers=None, + initializer=None, + initargs=(), + env=None, + ): + with _executor_lock: + global _executor, _executor_kwargs + executor = _executor + + if max_workers is None: + if reuse is True and executor is not None: + max_workers = executor._max_workers + else: + max_workers = cpu_count() + elif max_workers <= 0: + raise ValueError( + f"max_workers must be greater than 0, got {max_workers}." + ) + + if isinstance(context, str): + context = get_context(context) + if context is not None and context.get_start_method() == "fork": + raise ValueError( + "Cannot use reusable executor with the 'fork' context" + ) + + kwargs = dict( + context=context, + timeout=timeout, + job_reducers=job_reducers, + result_reducers=result_reducers, + initializer=initializer, + initargs=initargs, + env=env, + ) + if executor is None: + is_reused = False + mp.util.debug( + f"Create a executor with max_workers={max_workers}." + ) + executor_id = _get_next_executor_id() + _executor_kwargs = kwargs + _executor = executor = cls( + _executor_lock, + max_workers=max_workers, + executor_id=executor_id, + **kwargs, + ) + else: + if reuse == "auto": + reuse = kwargs == _executor_kwargs + if ( + executor._flags.broken + or executor._flags.shutdown + or not reuse + or executor.queue_size < max_workers + ): + if executor._flags.broken: + reason = "broken" + elif executor._flags.shutdown: + reason = "shutdown" + elif executor.queue_size < max_workers: + # Do not reuse the executor if the queue size is too + # small as this would lead to limited parallelism. + reason = "queue size is too small" + else: + reason = "arguments have changed" + mp.util.debug( + "Creating a new executor with max_workers=" + f"{max_workers} as the previous instance cannot be " + f"reused ({reason})." + ) + executor.shutdown(wait=True, kill_workers=kill_workers) + _executor = executor = _executor_kwargs = None + # Recursive call to build a new instance + return cls.get_reusable_executor( + max_workers=max_workers, **kwargs + ) + else: + mp.util.debug( + "Reusing existing executor with " + f"max_workers={executor._max_workers}." + ) + is_reused = True + executor._resize(max_workers) + + return executor, is_reused + + def submit(self, fn, *args, **kwargs): + with self._submit_resize_lock: + return super().submit(fn, *args, **kwargs) + + def _resize(self, max_workers): + with self._submit_resize_lock: + if max_workers is None: + raise ValueError("Trying to resize with max_workers=None") + elif max_workers == self._max_workers: + return + + if self._executor_manager_thread is None: + # If the executor_manager_thread has not been started + # then no processes have been spawned and we can just + # update _max_workers and return + self._max_workers = max_workers + return + + self._wait_job_completion() + + # Some process might have returned due to timeout so check how many + # children are still alive. Use the _process_management_lock to + # ensure that no process are spawned or timeout during the resize. + with self._processes_management_lock: + processes = list(self._processes.values()) + nb_children_alive = sum(p.is_alive() for p in processes) + self._max_workers = max_workers + for _ in range(max_workers, nb_children_alive): + self._call_queue.put(None) + while ( + len(self._processes) > max_workers and not self._flags.broken + ): + time.sleep(1e-3) + + self._adjust_process_count() + processes = list(self._processes.values()) + while not all(p.is_alive() for p in processes): + time.sleep(1e-3) + + def _wait_job_completion(self): + """Wait for the cache to be empty before resizing the pool.""" + # Issue a warning to the user about the bad effect of this usage. + if self._pending_work_items: + warnings.warn( + "Trying to resize an executor with running jobs: " + "waiting for jobs completion before resizing.", + UserWarning, + ) + mp.util.debug( + f"Executor {self.executor_id} waiting for jobs completion " + "before resizing" + ) + # Wait for the completion of the jobs + while self._pending_work_items: + time.sleep(1e-3) + + def _setup_queues(self, job_reducers, result_reducers): + # As this executor can be resized, use a large queue size to avoid + # underestimating capacity and introducing overhead + # Also handle the case where the user set max_workers to a value larger + # than cpu_count(), to avoid limiting the number of parallel jobs. + + min_queue_size = max(cpu_count(), self._max_workers) + self.queue_size = 2 * min_queue_size + EXTRA_QUEUED_CALLS + super()._setup_queues( + job_reducers, result_reducers, queue_size=self.queue_size + ) diff --git a/lib/python3.10/site-packages/joblib/test/__init__.py b/lib/python3.10/site-packages/joblib/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c57745c41d7f207abf0fecea6d280ca186047262 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd1200e7ad4f07bec484c9d2552aa158c355c0eb Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/common.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..946aff035659cf5d55ffaf3a1939fb0265e21d21 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_backports.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71ce7fafc88348f6a0879bac4a1192e670c3067c Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_cloudpickle_wrapper.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daed819bfa8c58b74d1fe35552f343ab9e0b5db6 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_config.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a218ce569e6592099c4315499bddc2b6445b986 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_dask.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c23361c1e7710600da878c09b04b258f7c83214b Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_disk.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ba20adef9a373ca3245b9bbdc948bbf9258b1d2 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcc9d54e2388d28be05f1bf4eaee6df08879ca13 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_func_inspect_special_encoding.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61fa1c47c2e1d96fe605e71cfc244b6fac0a85a7 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_hashing.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddc02c98870aa384d16beee6ddf537dbd4435f6c Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_init.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efae28d50b93e613ed95c79c80bac8b088e27980 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_logger.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9b642f52cbd85effe73850d562cb014de57f777 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_memmapping.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..598381512bf164e4ec11767b6382de1d07f90901 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d150a4090ff8e2577c3a2e0ffb5bc61e7cb6d47f Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_memory_async.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6567ff1c9ea17d8dd83d4117406162509e8ff02f Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_missing_multiprocessing.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d3529fce15b15029dfa4068e35ff2e846790f6b Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_module.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d22111f47400dfaed27df9947ec37027a23591d0 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..780975810bc493bf7ca220df21e69ccd6dacd5c4 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_compat.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3384a88c6a123477acad02ab9c6f03d940ea9d5d Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_numpy_pickle_utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8a4c2780e1570ef4dd2e928d0a5ff8ac75ec3df Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_parallel.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79de737de2a3ff0bac1ab25bbba96153ff1aba36 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_store_backends.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7078e8b338ec37a985e95a7ecd9a14d80f96956 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_testing.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60011365f4127357634ff9f7ebd8e02da2e119e5 Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/test_utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe2e55c64e3d3d40f5bd59611449d6cc07aef83f Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/__pycache__/testutils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/common.py b/lib/python3.10/site-packages/joblib/test/common.py new file mode 100644 index 0000000000000000000000000000000000000000..c011a609375ab9e624e4c8c6642319d50be77893 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/common.py @@ -0,0 +1,84 @@ +""" +Small utilities for testing. +""" + +import gc +import os +import sys +import sysconfig + +from joblib._multiprocessing_helpers import mp +from joblib.testing import SkipTest, skipif + +try: + import lz4 +except ImportError: + lz4 = None + +# TODO straight removal since in joblib.test.common? +IS_PYPY = hasattr(sys, "pypy_version_info") +IS_GIL_DISABLED = ( + sysconfig.get_config_var("Py_GIL_DISABLED") and not sys._is_gil_enabled() +) + +# A decorator to run tests only when numpy is available +try: + import numpy as np + + def with_numpy(func): + """A decorator to skip tests requiring numpy.""" + return func + +except ImportError: + + def with_numpy(func): + """A decorator to skip tests requiring numpy.""" + + def my_func(): + raise SkipTest("Test requires numpy") + + return my_func + + np = None + +# TODO: Turn this back on after refactoring yield based tests in test_hashing +# with_numpy = skipif(not np, reason='Test requires numpy.') + +# we use memory_profiler library for memory consumption checks +try: + from memory_profiler import memory_usage + + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + return func + + def memory_used(func, *args, **kwargs): + """Compute memory usage when executing func.""" + gc.collect() + mem_use = memory_usage((func, args, kwargs), interval=0.001) + return max(mem_use) - min(mem_use) + +except ImportError: + + def with_memory_profiler(func): + """A decorator to skip tests requiring memory_profiler.""" + + def dummy_func(): + raise SkipTest("Test requires memory_profiler.") + + return dummy_func + + memory_usage = memory_used = None + + +with_multiprocessing = skipif(mp is None, reason="Needs multiprocessing to run.") + + +with_dev_shm = skipif( + not os.path.exists("/dev/shm"), + reason="This test requires a large /dev/shm shared memory fs.", +) + +with_lz4 = skipif(lz4 is None, reason="Needs lz4 compression to run") + +without_lz4 = skipif(lz4 is not None, reason="Needs lz4 not being installed to run") diff --git a/lib/python3.10/site-packages/joblib/test/data/__init__.py b/lib/python3.10/site-packages/joblib/test/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ce781cc0913af57d6b94a6e3f4f028c6e82641d Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc b/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0cd84cd6810039f6b9477b5310c3090ddfcaade Binary files /dev/null and b/lib/python3.10/site-packages/joblib/test/data/__pycache__/create_numpy_pickle.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py b/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..1dc80b60b787601267012516c567f354288e0aa6 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/data/create_numpy_pickle.py @@ -0,0 +1,106 @@ +""" +This script is used to generate test data for joblib/test/test_numpy_pickle.py +""" + +import re +import sys + +# pytest needs to be able to import this module even when numpy is +# not installed +try: + import numpy as np +except ImportError: + np = None + +import joblib + + +def get_joblib_version(joblib_version=joblib.__version__): + """Normalize joblib version by removing suffix. + + >>> get_joblib_version('0.8.4') + '0.8.4' + >>> get_joblib_version('0.8.4b1') + '0.8.4' + >>> get_joblib_version('0.9.dev0') + '0.9' + """ + matches = [re.match(r"(\d+).*", each) for each in joblib_version.split(".")] + return ".".join([m.group(1) for m in matches if m is not None]) + + +def write_test_pickle(to_pickle, args): + kwargs = {} + compress = args.compress + method = args.method + joblib_version = get_joblib_version() + py_version = "{0[0]}{0[1]}".format(sys.version_info) + numpy_version = "".join(np.__version__.split(".")[:2]) + + # The game here is to generate the right filename according to the options. + body = "_compressed" if (compress and method == "zlib") else "" + if compress: + if method == "zlib": + kwargs["compress"] = True + extension = ".gz" + else: + kwargs["compress"] = (method, 3) + extension = ".pkl.{}".format(method) + if args.cache_size: + kwargs["cache_size"] = 0 + body += "_cache_size" + else: + extension = ".pkl" + + pickle_filename = "joblib_{}{}_pickle_py{}_np{}{}".format( + joblib_version, body, py_version, numpy_version, extension + ) + + try: + joblib.dump(to_pickle, pickle_filename, **kwargs) + except Exception as e: + # With old python version (=< 3.3.), we can arrive there when + # dumping compressed pickle with LzmaFile. + print( + "Error: cannot generate file '{}' with arguments '{}'. " + "Error was: {}".format(pickle_filename, kwargs, e) + ) + else: + print("File '{}' generated successfully.".format(pickle_filename)) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Joblib pickle data generator.") + parser.add_argument( + "--cache_size", + action="store_true", + help="Force creation of companion numpy files for pickled arrays.", + ) + parser.add_argument( + "--compress", action="store_true", help="Generate compress pickles." + ) + parser.add_argument( + "--method", + type=str, + default="zlib", + choices=["zlib", "gzip", "bz2", "xz", "lzma", "lz4"], + help="Set compression method.", + ) + # We need to be specific about dtypes in particular endianness + # because the pickles can be generated on one architecture and + # the tests run on another one. See + # https://github.com/joblib/joblib/issues/279. + to_pickle = [ + np.arange(5, dtype=np.dtype(" 0 + + +@with_numpy +@with_multiprocessing +def test_parallel_config_params_explicit_set(tmpdir): + with parallel_config(n_jobs=3, max_nbytes=1, temp_folder=tmpdir): + with Parallel(n_jobs=2, prefer="processes", max_nbytes="1M") as p: + assert isinstance(p._backend, LokyBackend) + assert p.n_jobs == 2 + + # Checks that memmapping is disabled + with raises(TypeError, match="Expected np.memmap instance"): + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + +@parametrize("param", ["prefer", "require"]) +def test_parallel_config_bad_params(param): + # Check that an error is raised when setting a wrong backend + # hint or constraint + with raises(ValueError, match=f"{param}=wrong is not a valid"): + with parallel_config(**{param: "wrong"}): + Parallel() + + +def test_parallel_config_constructor_params(): + # Check that an error is raised when backend is None + # but backend constructor params are given + with raises(ValueError, match="only supported when backend is not None"): + with parallel_config(inner_max_num_threads=1): + pass + + with raises(ValueError, match="only supported when backend is not None"): + with parallel_config(backend_param=1): + pass + + with raises(ValueError, match="only supported when backend is a string"): + with parallel_config(backend=BACKENDS[DEFAULT_BACKEND], backend_param=1): + pass + + +def test_parallel_config_nested(): + # Check that nested configuration retrieves the info from the + # parent config and do not reset them. + + with parallel_config(n_jobs=2): + p = Parallel() + assert isinstance(p._backend, BACKENDS[DEFAULT_BACKEND]) + assert p.n_jobs == 2 + + with parallel_config(backend="threading"): + with parallel_config(n_jobs=2): + p = Parallel() + assert isinstance(p._backend, ThreadingBackend) + assert p.n_jobs == 2 + + with parallel_config(verbose=100): + with parallel_config(n_jobs=2): + p = Parallel() + assert p.verbose == 100 + assert p.n_jobs == 2 + + +@with_numpy +@with_multiprocessing +@parametrize( + "backend", + ["multiprocessing", "threading", MultiprocessingBackend(), ThreadingBackend()], +) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_context_error(context, backend): + with raises(AssertionError, match=r"does not acc.*inner_max_num_threads"): + context(backend, inner_max_num_threads=1) + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_n_jobs_none(context): + # Check that n_jobs=None is interpreted as "unset" in Parallel + # non regression test for #1473 + with context(backend="threading", n_jobs=2): + with Parallel(n_jobs=None) as p: + assert p.n_jobs == 2 + + with context(backend="threading"): + default_n_jobs = Parallel().n_jobs + with Parallel(n_jobs=None) as p: + assert p.n_jobs == default_n_jobs + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_config_n_jobs_none(context): + # Check that n_jobs=None is interpreted as "explicitly set" in + # parallel_(config/backend) + # non regression test for #1473 + with context(backend="threading", n_jobs=2): + with context(backend="threading", n_jobs=None): + # n_jobs=None resets n_jobs to backend's default + with Parallel() as p: + assert p.n_jobs == 1 diff --git a/lib/python3.10/site-packages/joblib/test/test_dask.py b/lib/python3.10/site-packages/joblib/test/test_dask.py new file mode 100644 index 0000000000000000000000000000000000000000..5999cc29d1ae6c4e6bd64ec3977d3bfa6a246e66 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_dask.py @@ -0,0 +1,607 @@ +from __future__ import absolute_import, division, print_function + +import os +import warnings +from random import random +from time import sleep +from uuid import uuid4 + +import pytest + +from .. import Parallel, delayed, parallel_backend, parallel_config +from .._dask import DaskDistributedBackend +from ..parallel import AutoBatchingMixin, ThreadingBackend +from .common import np, with_numpy +from .test_parallel import ( + _recursive_backend_info, + _test_deadlock_with_generator, + _test_parallel_unordered_generator_returns_fastest_first, # noqa: E501 +) + +distributed = pytest.importorskip("distributed") +dask = pytest.importorskip("dask") + +# These imports need to be after the pytest.importorskip hence the noqa: E402 +from distributed import Client, LocalCluster, get_client # noqa: E402 +from distributed.metrics import time # noqa: E402 + +# Note: pytest requires to manually import all fixtures used in the test +# and their dependencies. +from distributed.utils_test import cleanup, cluster, inc # noqa: E402, F401 + + +@pytest.fixture(scope="function", autouse=True) +def avoid_dask_env_leaks(tmp_path): + # when starting a dask nanny, the environment variable might change. + # this fixture makes sure the environment is reset after the test. + + from joblib._parallel_backends import ParallelBackendBase + + old_value = {k: os.environ.get(k) for k in ParallelBackendBase.MAX_NUM_THREADS_VARS} + yield + + # Reset the environment variables to their original values + for k, v in old_value.items(): + if v is None: + os.environ.pop(k, None) + else: + os.environ[k] = v + + +def noop(*args, **kwargs): + pass + + +def slow_raise_value_error(condition, duration=0.05): + sleep(duration) + if condition: + raise ValueError("condition evaluated to True") + + +def count_events(event_name, client): + worker_events = client.run(lambda dask_worker: dask_worker.log) + event_counts = {} + for w, events in worker_events.items(): + event_counts[w] = len( + [event for event in list(events) if event[1] == event_name] + ) + return event_counts + + +def test_simple(loop): + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop) as client: # noqa: F841 + with parallel_config(backend="dask"): + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + with pytest.raises(ValueError): + Parallel()( + delayed(slow_raise_value_error)(i == 3) for i in range(10) + ) + + seq = Parallel()(delayed(inc)(i) for i in range(10)) + assert seq == [inc(i) for i in range(10)] + + +def test_dask_backend_uses_autobatching(loop): + assert ( + DaskDistributedBackend.compute_batch_size + is AutoBatchingMixin.compute_batch_size + ) + + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop) as client: # noqa: F841 + with parallel_config(backend="dask"): + with Parallel() as parallel: + # The backend should be initialized with a default + # batch size of 1: + backend = parallel._backend + assert isinstance(backend, DaskDistributedBackend) + assert backend.parallel is parallel + assert backend._effective_batch_size == 1 + + # Launch many short tasks that should trigger + # auto-batching: + parallel(delayed(lambda: None)() for _ in range(int(1e4))) + assert backend._effective_batch_size > 10 + + +@pytest.mark.parametrize("n_jobs", [2, -1]) +@pytest.mark.parametrize("context", [parallel_config, parallel_backend]) +def test_parallel_unordered_generator_returns_fastest_first_with_dask(n_jobs, context): + with distributed.Client(n_workers=2, threads_per_worker=2), context("dask"): + _test_parallel_unordered_generator_returns_fastest_first(None, n_jobs) + + +@with_numpy +@pytest.mark.parametrize("n_jobs", [2, -1]) +@pytest.mark.parametrize("return_as", ["generator", "generator_unordered"]) +@pytest.mark.parametrize("context", [parallel_config, parallel_backend]) +def test_deadlock_with_generator_and_dask(context, return_as, n_jobs): + with distributed.Client(n_workers=2, threads_per_worker=2), context("dask"): + _test_deadlock_with_generator(None, return_as, n_jobs) + + +@with_numpy +@pytest.mark.parametrize("context", [parallel_config, parallel_backend]) +def test_nested_parallelism_with_dask(context): + with distributed.Client(n_workers=2, threads_per_worker=2): + # 10 MB of data as argument to trigger implicit scattering + data = np.ones(int(1e7), dtype=np.uint8) + for i in range(2): + with context("dask"): + backend_types_and_levels = _recursive_backend_info(data=data) + assert len(backend_types_and_levels) == 4 + assert all( + name == "DaskDistributedBackend" for name, _ in backend_types_and_levels + ) + + # No argument + with context("dask"): + backend_types_and_levels = _recursive_backend_info() + assert len(backend_types_and_levels) == 4 + assert all( + name == "DaskDistributedBackend" for name, _ in backend_types_and_levels + ) + + +def random2(): + return random() + + +def test_dont_assume_function_purity(loop): + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop) as client: # noqa: F841 + with parallel_config(backend="dask"): + x, y = Parallel()(delayed(random2)() for i in range(2)) + assert x != y + + +@pytest.mark.parametrize("mixed", [True, False]) +def test_dask_funcname(loop, mixed): + from joblib._dask import Batch + + if not mixed: + tasks = [delayed(inc)(i) for i in range(4)] + batch_repr = "batch_of_inc_4_calls" + else: + tasks = [delayed(abs)(i) if i % 2 else delayed(inc)(i) for i in range(4)] + batch_repr = "mixed_batch_of_inc_4_calls" + + assert repr(Batch(tasks)) == batch_repr + + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop) as client: + with parallel_config(backend="dask"): + _ = Parallel(batch_size=2, pre_dispatch="all")(tasks) + + def f(dask_scheduler): + return list(dask_scheduler.transition_log) + + batch_repr = batch_repr.replace("4", "2") + log = client.run_on_scheduler(f) + assert all("batch_of_inc" in tup[0] for tup in log) + + +def test_no_undesired_distributed_cache_hit(): + # Dask has a pickle cache for callables that are called many times. Because + # the dask backends used to wrap both the functions and the arguments + # under instances of the Batch callable class this caching mechanism could + # lead to bugs as described in: https://github.com/joblib/joblib/pull/1055 + # The joblib-dask backend has been refactored to avoid bundling the + # arguments as an attribute of the Batch instance to avoid this problem. + # This test serves as non-regression problem. + + # Use a large number of input arguments to give the AutoBatchingMixin + # enough tasks to kick-in. + lists = [[] for _ in range(100)] + np = pytest.importorskip("numpy") + X = np.arange(int(1e6)) + + def isolated_operation(list_, data=None): + if data is not None: + np.testing.assert_array_equal(data, X) + list_.append(uuid4().hex) + return list_ + + cluster = LocalCluster(n_workers=1, threads_per_worker=2) + client = Client(cluster) + try: + with parallel_config(backend="dask"): + # dispatches joblib.parallel.BatchedCalls + res = Parallel()(delayed(isolated_operation)(list_) for list_ in lists) + + # The original arguments should not have been mutated as the mutation + # happens in the dask worker process. + assert lists == [[] for _ in range(100)] + + # Here we did not pass any large numpy array as argument to + # isolated_operation so no scattering event should happen under the + # hood. + counts = count_events("receive-from-scatter", client) + assert sum(counts.values()) == 0 + assert all([len(r) == 1 for r in res]) + + with parallel_config(backend="dask"): + # Append a large array which will be scattered by dask, and + # dispatch joblib._dask.Batch + res = Parallel()( + delayed(isolated_operation)(list_, data=X) for list_ in lists + ) + + # This time, auto-scattering should have kicked it. + counts = count_events("receive-from-scatter", client) + assert sum(counts.values()) > 0 + assert all([len(r) == 1 for r in res]) + finally: + client.close(timeout=30) + cluster.close(timeout=30) + + +class CountSerialized(object): + def __init__(self, x): + self.x = x + self.count = 0 + + def __add__(self, other): + return self.x + getattr(other, "x", other) + + __radd__ = __add__ + + def __reduce__(self): + self.count += 1 + return (CountSerialized, (self.x,)) + + +def add5(a, b, c, d=0, e=0): + return a + b + c + d + e + + +def test_manual_scatter(loop): + # Let's check that the number of times scattered and non-scattered + # variables are serialized is consistent between `joblib.Parallel` calls + # and equivalent native `client.submit` call. + + # Number of serializations can vary from dask to another, so this test only + # checks that `joblib.Parallel` does not add more serialization steps than + # a native `client.submit` call, but does not check for an exact number of + # serialization steps. + + w, x, y, z = (CountSerialized(i) for i in range(4)) + + f = delayed(add5) + tasks = [f(x, y, z, d=4, e=5) for _ in range(10)] + tasks += [ + f(x, z, y, d=5, e=4), + f(y, x, z, d=x, e=5), + f(z, z, x, d=z, e=y), + ] + expected = [func(*args, **kwargs) for func, args, kwargs in tasks] + + with cluster() as (s, _): + with Client(s["address"], loop=loop) as client: # noqa: F841 + with parallel_config(backend="dask", scatter=[w, x, y]): + results_parallel = Parallel(batch_size=1)(tasks) + assert results_parallel == expected + + # Check that an error is raised for bad arguments, as scatter must + # take a list/tuple + with pytest.raises(TypeError): + with parallel_config(backend="dask", loop=loop, scatter=1): + pass + + # Scattered variables only serialized during scatter. Checking with an + # extra variable as this count can vary from one dask version + # to another. + n_serialization_scatter_with_parallel = w.count + assert x.count == n_serialization_scatter_with_parallel + assert y.count == n_serialization_scatter_with_parallel + n_serialization_with_parallel = z.count + + # Reset the cluster and the serialization count + for var in (w, x, y, z): + var.count = 0 + + with cluster() as (s, _): + with Client(s["address"], loop=loop) as client: # noqa: F841 + scattered = dict() + for obj in w, x, y: + scattered[id(obj)] = client.scatter(obj, broadcast=True) + results_native = [ + client.submit( + func, + *(scattered.get(id(arg), arg) for arg in args), + **dict( + (key, scattered.get(id(value), value)) + for (key, value) in kwargs.items() + ), + key=str(uuid4()), + ).result() + for (func, args, kwargs) in tasks + ] + assert results_native == expected + + # Now check that the number of serialization steps is the same for joblib + # and native dask calls. + n_serialization_scatter_native = w.count + assert x.count == n_serialization_scatter_native + assert y.count == n_serialization_scatter_native + + assert n_serialization_scatter_with_parallel == n_serialization_scatter_native + + distributed_version = tuple(int(v) for v in distributed.__version__.split(".")) + if distributed_version < (2023, 4): + # Previous to 2023.4, the serialization was adding an extra call to + # __reduce__ for the last job `f(z, z, x, d=z, e=y)`, because `z` + # appears both in the args and kwargs, which is not the case when + # running with joblib. Cope with this discrepancy. + assert z.count == n_serialization_with_parallel + 1 + else: + assert z.count == n_serialization_with_parallel + + +# When the same IOLoop is used for multiple clients in a row, use +# loop_in_thread instead of loop to prevent the Client from closing it. See +# dask/distributed #4112 +def test_auto_scatter(loop_in_thread): + np = pytest.importorskip("numpy") + data1 = np.ones(int(1e4), dtype=np.uint8) + data2 = np.ones(int(1e4), dtype=np.uint8) + data_to_process = ([data1] * 3) + ([data2] * 3) + + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop_in_thread) as client: + with parallel_config(backend="dask"): + # Passing the same data as arg and kwarg triggers a single + # scatter operation whose result is reused. + Parallel()( + delayed(noop)(data, data, i, opt=data) + for i, data in enumerate(data_to_process) + ) + # By default large array are automatically scattered with + # broadcast=1 which means that one worker must directly receive + # the data from the scatter operation once. + counts = count_events("receive-from-scatter", client) + assert counts[a["address"]] + counts[b["address"]] == 2 + + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop_in_thread) as client: + with parallel_config(backend="dask"): + Parallel()(delayed(noop)(data1[:3], i) for i in range(5)) + # Small arrays are passed within the task definition without going + # through a scatter operation. + counts = count_events("receive-from-scatter", client) + assert counts[a["address"]] == 0 + assert counts[b["address"]] == 0 + + +@pytest.mark.parametrize("retry_no", list(range(2))) +def test_nested_scatter(loop, retry_no): + np = pytest.importorskip("numpy") + + NUM_INNER_TASKS = 10 + NUM_OUTER_TASKS = 10 + + def my_sum(x, i, j): + return np.sum(x) + + def outer_function_joblib(array, i): + client = get_client() # noqa + with parallel_config(backend="dask"): + results = Parallel()( + delayed(my_sum)(array[j:], i, j) for j in range(NUM_INNER_TASKS) + ) + return sum(results) + + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop) as _: + with parallel_config(backend="dask"): + my_array = np.ones(10000) + _ = Parallel()( + delayed(outer_function_joblib)(my_array[i:], i) + for i in range(NUM_OUTER_TASKS) + ) + + +def test_nested_backend_context_manager(loop_in_thread): + def get_nested_pids(): + pids = set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + pids |= set(Parallel(n_jobs=2)(delayed(os.getpid)() for _ in range(2))) + return pids + + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop_in_thread) as client: + with parallel_config(backend="dask"): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + # No deadlocks + with Client(s["address"], loop=loop_in_thread) as client: # noqa: F841 + with parallel_config(backend="dask"): + pid_groups = Parallel(n_jobs=2)( + delayed(get_nested_pids)() for _ in range(10) + ) + for pid_group in pid_groups: + assert len(set(pid_group)) <= 2 + + +def test_nested_backend_context_manager_implicit_n_jobs(loop): + # Check that Parallel with no explicit n_jobs value automatically selects + # all the dask workers, including in nested calls. + + def _backend_type(p): + return p._backend.__class__.__name__ + + def get_nested_implicit_n_jobs(): + with Parallel() as p: + return _backend_type(p), p.n_jobs + + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop) as client: # noqa: F841 + with parallel_config(backend="dask"): + with Parallel() as p: + assert _backend_type(p) == "DaskDistributedBackend" + assert p.n_jobs == -1 + all_nested_n_jobs = p( + delayed(get_nested_implicit_n_jobs)() for _ in range(2) + ) + for backend_type, nested_n_jobs in all_nested_n_jobs: + assert backend_type == "DaskDistributedBackend" + assert nested_n_jobs == -1 + + +def test_errors(loop): + with pytest.raises(ValueError) as info: + with parallel_config(backend="dask"): + pass + + assert "create a dask client" in str(info.value).lower() + + +def test_correct_nested_backend(loop): + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop) as client: # noqa: F841 + # No requirement, should be us + with parallel_config(backend="dask"): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require=None) for _ in range(1) + ) + assert isinstance(result[0][0][0], DaskDistributedBackend) + + # Require threads, should be threading + with parallel_config(backend="dask"): + result = Parallel(n_jobs=2)( + delayed(outer)(nested_require="sharedmem") for _ in range(1) + ) + assert isinstance(result[0][0][0], ThreadingBackend) + + +def outer(nested_require): + return Parallel(n_jobs=2, prefer="threads")( + delayed(middle)(nested_require) for _ in range(1) + ) + + +def middle(require): + return Parallel(n_jobs=2, require=require)(delayed(inner)() for _ in range(1)) + + +def inner(): + return Parallel()._backend + + +def test_secede_with_no_processes(loop): + # https://github.com/dask/distributed/issues/1775 + with Client(loop=loop, processes=False, set_as_default=True): + with parallel_config(backend="dask"): + Parallel(n_jobs=4)(delayed(id)(i) for i in range(2)) + + +def _worker_address(_): + from distributed import get_worker + + return get_worker().address + + +def test_dask_backend_keywords(loop): + with cluster() as (s, [a, b]): + with Client(s["address"], loop=loop) as client: # noqa: F841 + with parallel_config(backend="dask", workers=a["address"]): + seq = Parallel()(delayed(_worker_address)(i) for i in range(10)) + assert seq == [a["address"]] * 10 + + with parallel_config(backend="dask", workers=b["address"]): + seq = Parallel()(delayed(_worker_address)(i) for i in range(10)) + assert seq == [b["address"]] * 10 + + +def test_scheduler_tasks_cleanup(loop): + with Client(processes=False, loop=loop) as client: + with parallel_config(backend="dask"): + Parallel()(delayed(inc)(i) for i in range(10)) + + start = time() + while client.cluster.scheduler.tasks: + sleep(0.01) + assert time() < start + 5 + + assert not client.futures + + +@pytest.mark.parametrize("cluster_strategy", ["adaptive", "late_scaling"]) +@pytest.mark.skipif( + distributed.__version__ <= "2.1.1" and distributed.__version__ >= "1.28.0", + reason="distributed bug - https://github.com/dask/distributed/pull/2841", +) +def test_wait_for_workers(cluster_strategy): + cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2) + client = Client(cluster) + if cluster_strategy == "adaptive": + cluster.adapt(minimum=0, maximum=2) + elif cluster_strategy == "late_scaling": + # Tell the cluster to start workers but this is a non-blocking call + # and new workers might take time to connect. In this case the Parallel + # call should wait for at least one worker to come up before starting + # to schedule work. + cluster.scale(2) + try: + with parallel_config(backend="dask"): + # The following should wait a bit for at least one worker to + # become available. + Parallel()(delayed(inc)(i) for i in range(10)) + finally: + client.close() + cluster.close() + + +def test_wait_for_workers_timeout(): + # Start a cluster with 0 worker: + cluster = LocalCluster(n_workers=0, processes=False, threads_per_worker=2) + client = Client(cluster) + try: + with parallel_config(backend="dask", wait_for_workers_timeout=0.1): + # Short timeout: DaskDistributedBackend + msg = "DaskDistributedBackend has no worker after 0.1 seconds." + with pytest.raises(TimeoutError, match=msg): + Parallel()(delayed(inc)(i) for i in range(10)) + + with parallel_config(backend="dask", wait_for_workers_timeout=0): + # No timeout: fallback to generic joblib failure: + msg = "DaskDistributedBackend has no active worker" + with pytest.raises(RuntimeError, match=msg): + Parallel()(delayed(inc)(i) for i in range(10)) + finally: + client.close() + cluster.close() + + +@pytest.mark.parametrize("backend", ["loky", "multiprocessing"]) +def test_joblib_warning_inside_dask_daemonic_worker(backend): + cluster = LocalCluster(n_workers=2) + client = Client(cluster) + try: + + def func_using_joblib_parallel(): + # Somehow trying to check the warning type here (e.g. with + # pytest.warns(UserWarning)) make the test hang. Work-around: + # return the warning record to the client and the warning check is + # done client-side. + with warnings.catch_warnings(record=True) as record: + Parallel(n_jobs=2, backend=backend)(delayed(inc)(i) for i in range(10)) + + return record + + fut = client.submit(func_using_joblib_parallel) + record = fut.result() + + assert len(record) == 1 + warning = record[0].message + assert isinstance(warning, UserWarning) + assert "distributed.worker.daemon" in str(warning) + finally: + client.close(timeout=30) + cluster.close(timeout=30) diff --git a/lib/python3.10/site-packages/joblib/test/test_disk.py b/lib/python3.10/site-packages/joblib/test/test_disk.py new file mode 100644 index 0000000000000000000000000000000000000000..5ae2abbf02cd71fc776f780ca3fdcbe64d8b4b6d --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_disk.py @@ -0,0 +1,80 @@ +""" +Unit tests for the disk utilities. +""" + +# Authors: Gael Varoquaux +# Lars Buitinck +# Copyright (c) 2010 Gael Varoquaux +# License: BSD Style, 3 clauses. + +from __future__ import with_statement + +import array +import os + +from joblib.disk import disk_used, memstr_to_bytes, mkdirp, rm_subdirs +from joblib.testing import parametrize, raises + +############################################################################### + + +def test_disk_used(tmpdir): + cachedir = tmpdir.strpath + # Not write a file that is 1M big in this directory, and check the + # size. The reason we use such a big file is that it makes us robust + # to errors due to block allocation. + a = array.array("i") + sizeof_i = a.itemsize + target_size = 1024 + n = int(target_size * 1024 / sizeof_i) + a = array.array("i", n * (1,)) + with open(os.path.join(cachedir, "test"), "wb") as output: + a.tofile(output) + assert disk_used(cachedir) >= target_size + assert disk_used(cachedir) < target_size + 12 + + +@parametrize( + "text,value", + [ + ("80G", 80 * 1024**3), + ("1.4M", int(1.4 * 1024**2)), + ("120M", 120 * 1024**2), + ("53K", 53 * 1024), + ], +) +def test_memstr_to_bytes(text, value): + assert memstr_to_bytes(text) == value + + +@parametrize( + "text,exception,regex", + [ + ("fooG", ValueError, r"Invalid literal for size.*fooG.*"), + ("1.4N", ValueError, r"Invalid literal for size.*1.4N.*"), + ], +) +def test_memstr_to_bytes_exception(text, exception, regex): + with raises(exception) as excinfo: + memstr_to_bytes(text) + assert excinfo.match(regex) + + +def test_mkdirp(tmpdir): + mkdirp(os.path.join(tmpdir.strpath, "ham")) + mkdirp(os.path.join(tmpdir.strpath, "ham")) + mkdirp(os.path.join(tmpdir.strpath, "spam", "spam")) + + # Not all OSErrors are ignored + with raises(OSError): + mkdirp("") + + +def test_rm_subdirs(tmpdir): + sub_path = os.path.join(tmpdir.strpath, "subdir_one", "subdir_two") + full_path = os.path.join(sub_path, "subdir_three") + mkdirp(os.path.join(full_path)) + + rm_subdirs(sub_path) + assert os.path.exists(sub_path) + assert not os.path.exists(full_path) diff --git a/lib/python3.10/site-packages/joblib/test/test_func_inspect.py b/lib/python3.10/site-packages/joblib/test/test_func_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..be7bef0d10baa21d197602df9e8eec68770237fb --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_func_inspect.py @@ -0,0 +1,338 @@ +""" +Test the func_inspect module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import functools + +from joblib.func_inspect import ( + _clean_win_chars, + filter_args, + format_signature, + get_func_code, + get_func_name, +) +from joblib.memory import Memory +from joblib.test.common import with_numpy +from joblib.testing import fixture, parametrize, raises + + +############################################################################### +# Module-level functions and fixture, for tests +def f(x, y=0): + pass + + +def g(x): + pass + + +def h(x, y=0, *args, **kwargs): + pass + + +def i(x=1): + pass + + +def j(x, y, **kwargs): + pass + + +def k(*args, **kwargs): + pass + + +def m1(x, *, y): + pass + + +def m2(x, *, y, z=3): + pass + + +@fixture(scope="module") +def cached_func(tmpdir_factory): + # Create a Memory object to test decorated functions. + # We should be careful not to call the decorated functions, so that + # cache directories are not created in the temp dir. + cachedir = tmpdir_factory.mktemp("joblib_test_func_inspect") + mem = Memory(cachedir.strpath) + + @mem.cache + def cached_func_inner(x): + return x + + return cached_func_inner + + +class Klass(object): + def f(self, x): + return x + + +############################################################################### +# Tests + + +@parametrize( + "func,args,filtered_args", + [ + (f, [[], (1,)], {"x": 1, "y": 0}), + (f, [["x"], (1,)], {"y": 0}), + (f, [["y"], (0,)], {"x": 0}), + (f, [["y"], (0,), {"y": 1}], {"x": 0}), + (f, [["x", "y"], (0,)], {}), + (f, [[], (0,), {"y": 1}], {"x": 0, "y": 1}), + (f, [["y"], (), {"x": 2, "y": 1}], {"x": 2}), + (g, [[], (), {"x": 1}], {"x": 1}), + (i, [[], (2,)], {"x": 2}), + ], +) +def test_filter_args(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +def test_filter_args_method(): + obj = Klass() + assert filter_args(obj.f, [], (1,)) == {"x": 1, "self": obj} + + +@parametrize( + "func,args,filtered_args", + [ + (h, [[], (1,)], {"x": 1, "y": 0, "*": [], "**": {}}), + (h, [[], (1, 2, 3, 4)], {"x": 1, "y": 2, "*": [3, 4], "**": {}}), + (h, [[], (1, 25), {"ee": 2}], {"x": 1, "y": 25, "*": [], "**": {"ee": 2}}), + (h, [["*"], (1, 2, 25), {"ee": 2}], {"x": 1, "y": 2, "**": {"ee": 2}}), + ], +) +def test_filter_varargs(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +test_filter_kwargs_extra_params = [ + (m1, [[], (1,), {"y": 2}], {"x": 1, "y": 2}), + (m2, [[], (1,), {"y": 2}], {"x": 1, "y": 2, "z": 3}), +] + + +@parametrize( + "func,args,filtered_args", + [ + (k, [[], (1, 2), {"ee": 2}], {"*": [1, 2], "**": {"ee": 2}}), + (k, [[], (3, 4)], {"*": [3, 4], "**": {}}), + ] + + test_filter_kwargs_extra_params, +) +def test_filter_kwargs(func, args, filtered_args): + assert filter_args(func, *args) == filtered_args + + +def test_filter_args_2(): + assert filter_args(j, [], (1, 2), {"ee": 2}) == {"x": 1, "y": 2, "**": {"ee": 2}} + + ff = functools.partial(f, 1) + # filter_args has to special-case partial + assert filter_args(ff, [], (1,)) == {"*": [1], "**": {}} + assert filter_args(ff, ["y"], (1,)) == {"*": [1], "**": {}} + + +@parametrize("func,funcname", [(f, "f"), (g, "g"), (cached_func, "cached_func")]) +def test_func_name(func, funcname): + # Check that we are not confused by decoration + # here testcase 'cached_func' is the function itself + assert get_func_name(func)[1] == funcname + + +def test_func_name_on_inner_func(cached_func): + # Check that we are not confused by decoration + # here testcase 'cached_func' is the 'cached_func_inner' function + # returned by 'cached_func' fixture + assert get_func_name(cached_func)[1] == "cached_func_inner" + + +def test_func_name_collision_on_inner_func(): + # Check that two functions defining and caching an inner function + # with the same do not cause (module, name) collision + def f(): + def inner_func(): + return # pragma: no cover + + return get_func_name(inner_func) + + def g(): + def inner_func(): + return # pragma: no cover + + return get_func_name(inner_func) + + module, name = f() + other_module, other_name = g() + + assert name == other_name + assert module != other_module + + +def test_func_inspect_errors(): + # Check that func_inspect is robust and will work on weird objects + assert get_func_name("a".lower)[-1] == "lower" + assert get_func_code("a".lower)[1:] == (None, -1) + ff = lambda x: x # noqa: E731 + assert get_func_name(ff, win_characters=False)[-1] == "" + assert get_func_code(ff)[1] == __file__.replace(".pyc", ".py") + # Simulate a function defined in __main__ + ff.__module__ = "__main__" + assert get_func_name(ff, win_characters=False)[-1] == "" + assert get_func_code(ff)[1] == __file__.replace(".pyc", ".py") + + +def func_with_kwonly_args(a, b, *, kw1="kw1", kw2="kw2"): + pass + + +def func_with_signature(a: int, b: int) -> None: + pass + + +def test_filter_args_edge_cases(): + assert filter_args(func_with_kwonly_args, [], (1, 2), {"kw1": 3, "kw2": 4}) == { + "a": 1, + "b": 2, + "kw1": 3, + "kw2": 4, + } + + # filter_args doesn't care about keyword-only arguments so you + # can pass 'kw1' into *args without any problem + with raises(ValueError) as excinfo: + filter_args(func_with_kwonly_args, [], (1, 2, 3), {"kw2": 2}) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional parameter") + + assert filter_args( + func_with_kwonly_args, ["b", "kw2"], (1, 2), {"kw1": 3, "kw2": 4} + ) == {"a": 1, "kw1": 3} + + assert filter_args(func_with_signature, ["b"], (1, 2)) == {"a": 1} + + +def test_bound_methods(): + """Make sure that calling the same method on two different instances + of the same class does resolv to different signatures. + """ + a = Klass() + b = Klass() + assert filter_args(a.f, [], (1,)) != filter_args(b.f, [], (1,)) + + +@parametrize( + "exception,regex,func,args", + [ + ( + ValueError, + "ignore_lst must be a list of parameters to ignore", + f, + ["bar", (None,)], + ), + ( + ValueError, + r"Ignore list: argument \'(.*)\' is not defined", + g, + [["bar"], (None,)], + ), + (ValueError, "Wrong number of arguments", h, [[]]), + ], +) +def test_filter_args_error_msg(exception, regex, func, args): + """Make sure that filter_args returns decent error messages, for the + sake of the user. + """ + with raises(exception) as excinfo: + filter_args(func, *args) + excinfo.match(regex) + + +def test_filter_args_no_kwargs_mutation(): + """None-regression test against 0.12.0 changes. + + https://github.com/joblib/joblib/pull/75 + + Make sure filter args doesn't mutate the kwargs dict that gets passed in. + """ + kwargs = {"x": 0} + filter_args(g, [], [], kwargs) + assert kwargs == {"x": 0} + + +def test_clean_win_chars(): + string = r"C:\foo\bar\main.py" + mangled_string = _clean_win_chars(string) + for char in ("\\", ":", "<", ">", "!"): + assert char not in mangled_string + + +@parametrize( + "func,args,kwargs,sgn_expected", + [ + (g, [list(range(5))], {}, "g([0, 1, 2, 3, 4])"), + (k, [1, 2, (3, 4)], {"y": True}, "k(1, 2, (3, 4), y=True)"), + ], +) +def test_format_signature(func, args, kwargs, sgn_expected): + # Test signature formatting. + path, sgn_result = format_signature(func, *args, **kwargs) + assert sgn_result == sgn_expected + + +def test_format_signature_long_arguments(): + shortening_threshold = 1500 + # shortening gets it down to 700 characters but there is the name + # of the function in the signature and a few additional things + # like dots for the ellipsis + shortening_target = 700 + 10 + + arg = "a" * shortening_threshold + _, signature = format_signature(h, arg) + assert len(signature) < shortening_target + + nb_args = 5 + args = [arg for _ in range(nb_args)] + _, signature = format_signature(h, *args) + assert len(signature) < shortening_target * nb_args + + kwargs = {str(i): arg for i, arg in enumerate(args)} + _, signature = format_signature(h, **kwargs) + assert len(signature) < shortening_target * nb_args + + _, signature = format_signature(h, *args, **kwargs) + assert len(signature) < shortening_target * 2 * nb_args + + +@with_numpy +def test_format_signature_numpy(): + """Test the format signature formatting with numpy.""" + + +def test_special_source_encoding(): + from joblib.test.test_func_inspect_special_encoding import big5_f + + func_code, source_file, first_line = get_func_code(big5_f) + assert first_line == 5 + assert "def big5_f():" in func_code + assert "test_func_inspect_special_encoding" in source_file + + +def _get_code(): + from joblib.test.test_func_inspect_special_encoding import big5_f + + return get_func_code(big5_f)[0] + + +def test_func_code_consistency(): + from joblib.parallel import Parallel, delayed + + codes = Parallel(n_jobs=2)(delayed(_get_code)() for _ in range(5)) + assert len(set(codes)) == 1 diff --git a/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py b/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..6c41a59a6900ced36050bf357359c1164a11fdbe --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_func_inspect_special_encoding.py @@ -0,0 +1,9 @@ +# -*- coding: big5 -*- + + +# Some Traditional Chinese characters: ¤@¨Ç¤¤¤å¦r²Å +def big5_f(): + """¥Î©ó´ú¸Õªº¨ç¼Æ + """ + # µùÄÀ + return 0 diff --git a/lib/python3.10/site-packages/joblib/test/test_hashing.py b/lib/python3.10/site-packages/joblib/test/test_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..94b51de5af174933e31dc72242182c90e5c6be6e --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_hashing.py @@ -0,0 +1,520 @@ +""" +Test the hashing module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import collections +import gc +import hashlib +import io +import itertools +import pickle +import random +import sys +import time +from concurrent.futures import ProcessPoolExecutor +from decimal import Decimal + +from joblib.func_inspect import filter_args +from joblib.hashing import hash +from joblib.memory import Memory +from joblib.test.common import np, with_numpy +from joblib.testing import fixture, parametrize, raises, skipif + + +def unicode(s): + return s + + +############################################################################### +# Helper functions for the tests +def time_func(func, *args): + """Time function func on *args.""" + times = list() + for _ in range(3): + t1 = time.time() + func(*args) + times.append(time.time() - t1) + return min(times) + + +def relative_time(func1, func2, *args): + """Return the relative time between func1 and func2 applied on + *args. + """ + time_func1 = time_func(func1, *args) + time_func2 = time_func(func2, *args) + relative_diff = 0.5 * (abs(time_func1 - time_func2) / (time_func1 + time_func2)) + return relative_diff + + +class Klass(object): + def f(self, x): + return x + + +class KlassWithCachedMethod(object): + def __init__(self, cachedir): + mem = Memory(location=cachedir) + self.f = mem.cache(self.f) + + def f(self, x): + return x + + +############################################################################### +# Tests + +input_list = [ + 1, + 2, + 1.0, + 2.0, + 1 + 1j, + 2.0 + 1j, + "a", + "b", + (1,), + ( + 1, + 1, + ), + [ + 1, + ], + [ + 1, + 1, + ], + {1: 1}, + {1: 2}, + {2: 1}, + None, + gc.collect, + [ + 1, + ].append, + # Next 2 sets have unorderable elements in python 3. + set(("a", 1)), + set(("a", 1, ("a", 1))), + # Next 2 dicts have unorderable type of keys in python 3. + {"a": 1, 1: 2}, + {"a": 1, 1: 2, "d": {"a": 1}}, +] + + +@parametrize("obj1", input_list) +@parametrize("obj2", input_list) +def test_trivial_hash(obj1, obj2): + """Smoke test hash on various types.""" + # Check that 2 objects have the same hash only if they are the same. + are_hashes_equal = hash(obj1) == hash(obj2) + are_objs_identical = obj1 is obj2 + assert are_hashes_equal == are_objs_identical + + +def test_hash_methods(): + # Check that hashing instance methods works + a = io.StringIO(unicode("a")) + assert hash(a.flush) == hash(a.flush) + a1 = collections.deque(range(10)) + a2 = collections.deque(range(9)) + assert hash(a1.extend) != hash(a2.extend) + + +@fixture(scope="function") +@with_numpy +def three_np_arrays(): + rnd = np.random.RandomState(0) + arr1 = rnd.random_sample((10, 10)) + arr2 = arr1.copy() + arr3 = arr2.copy() + arr3[0] += 1 + return arr1, arr2, arr3 + + +def test_hash_numpy_arrays(three_np_arrays): + arr1, arr2, arr3 = three_np_arrays + + for obj1, obj2 in itertools.product(three_np_arrays, repeat=2): + are_hashes_equal = hash(obj1) == hash(obj2) + are_arrays_equal = np.all(obj1 == obj2) + assert are_hashes_equal == are_arrays_equal + + assert hash(arr1) != hash(arr1.T) + + +def test_hash_numpy_dict_of_arrays(three_np_arrays): + arr1, arr2, arr3 = three_np_arrays + + d1 = {1: arr1, 2: arr2} + d2 = {1: arr2, 2: arr1} + d3 = {1: arr2, 2: arr3} + + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + + +@with_numpy +@parametrize("dtype", ["datetime64[s]", "timedelta64[D]"]) +def test_numpy_datetime_array(dtype): + # memoryview is not supported for some dtypes e.g. datetime64 + # see https://github.com/joblib/joblib/issues/188 for more details + a_hash = hash(np.arange(10)) + array = np.arange(0, 10, dtype=dtype) + assert hash(array) != a_hash + + +@with_numpy +def test_hash_numpy_noncontiguous(): + a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), order="F")[:, :1, :] + b = np.ascontiguousarray(a) + assert hash(a) != hash(b) + + c = np.asfortranarray(a) + assert hash(a) != hash(c) + + +@with_numpy +@parametrize("coerce_mmap", [True, False]) +def test_hash_memmap(tmpdir, coerce_mmap): + """Check that memmap and arrays hash identically if coerce_mmap is True.""" + filename = tmpdir.join("memmap_temp").strpath + try: + m = np.memmap(filename, shape=(10, 10), mode="w+") + a = np.asarray(m) + are_hashes_equal = hash(a, coerce_mmap=coerce_mmap) == hash( + m, coerce_mmap=coerce_mmap + ) + assert are_hashes_equal == coerce_mmap + finally: + if "m" in locals(): + del m + # Force a garbage-collection cycle, to be certain that the + # object is delete, and we don't run in a problem under + # Windows with a file handle still open. + gc.collect() + + +@with_numpy +@skipif( + sys.platform == "win32", + reason="This test is not stable under windows for some reason", +) +def test_hash_numpy_performance(): + """Check the performance of hashing numpy arrays: + + In [22]: a = np.random.random(1000000) + + In [23]: %timeit hashlib.md5(a).hexdigest() + 100 loops, best of 3: 20.7 ms per loop + + In [24]: %timeit hashlib.md5(pickle.dumps(a, protocol=2)).hexdigest() + 1 loops, best of 3: 73.1 ms per loop + + In [25]: %timeit hashlib.md5(cPickle.dumps(a, protocol=2)).hexdigest() + 10 loops, best of 3: 53.9 ms per loop + + In [26]: %timeit hash(a) + 100 loops, best of 3: 20.8 ms per loop + """ + rnd = np.random.RandomState(0) + a = rnd.random_sample(1000000) + + def md5_hash(x): + return hashlib.md5(memoryview(x)).hexdigest() + + relative_diff = relative_time(md5_hash, hash, a) + assert relative_diff < 0.3 + + # Check that hashing an tuple of 3 arrays takes approximately + # 3 times as much as hashing one array + time_hashlib = 3 * time_func(md5_hash, a) + time_hash = time_func(hash, (a, a, a)) + relative_diff = 0.5 * (abs(time_hash - time_hashlib) / (time_hash + time_hashlib)) + assert relative_diff < 0.3 + + +def test_bound_methods_hash(): + """Make sure that calling the same method on two different instances + of the same class does resolve to the same hashes. + """ + a = Klass() + b = Klass() + assert hash(filter_args(a.f, [], (1,))) == hash(filter_args(b.f, [], (1,))) + + +def test_bound_cached_methods_hash(tmpdir): + """Make sure that calling the same _cached_ method on two different + instances of the same class does resolve to the same hashes. + """ + a = KlassWithCachedMethod(tmpdir.strpath) + b = KlassWithCachedMethod(tmpdir.strpath) + assert hash(filter_args(a.f.func, [], (1,))) == hash( + filter_args(b.f.func, [], (1,)) + ) + + +@with_numpy +def test_hash_object_dtype(): + """Make sure that ndarrays with dtype `object' hash correctly.""" + + a = np.array([np.arange(i) for i in range(6)], dtype=object) + b = np.array([np.arange(i) for i in range(6)], dtype=object) + + assert hash(a) == hash(b) + + +@with_numpy +def test_numpy_scalar(): + # Numpy scalars are built from compiled functions, and lead to + # strange pickling paths explored, that can give hash collisions + a = np.float64(2.0) + b = np.float64(3.0) + assert hash(a) != hash(b) + + +def test_dict_hash(tmpdir): + # Check that dictionaries hash consistently, even though the ordering + # of the keys is not guaranteed + k = KlassWithCachedMethod(tmpdir.strpath) + + d = { + "#s12069__c_maps.nii.gz": [33], + "#s12158__c_maps.nii.gz": [33], + "#s12258__c_maps.nii.gz": [33], + "#s12277__c_maps.nii.gz": [33], + "#s12300__c_maps.nii.gz": [33], + "#s12401__c_maps.nii.gz": [33], + "#s12430__c_maps.nii.gz": [33], + "#s13817__c_maps.nii.gz": [33], + "#s13903__c_maps.nii.gz": [33], + "#s13916__c_maps.nii.gz": [33], + "#s13981__c_maps.nii.gz": [33], + "#s13982__c_maps.nii.gz": [33], + "#s13983__c_maps.nii.gz": [33], + } + + a = k.f(d) + b = k.f(a) + + assert hash(a) == hash(b) + + +def test_set_hash(tmpdir): + # Check that sets hash consistently, even though their ordering + # is not guaranteed + k = KlassWithCachedMethod(tmpdir.strpath) + + s = set( + [ + "#s12069__c_maps.nii.gz", + "#s12158__c_maps.nii.gz", + "#s12258__c_maps.nii.gz", + "#s12277__c_maps.nii.gz", + "#s12300__c_maps.nii.gz", + "#s12401__c_maps.nii.gz", + "#s12430__c_maps.nii.gz", + "#s13817__c_maps.nii.gz", + "#s13903__c_maps.nii.gz", + "#s13916__c_maps.nii.gz", + "#s13981__c_maps.nii.gz", + "#s13982__c_maps.nii.gz", + "#s13983__c_maps.nii.gz", + ] + ) + + a = k.f(s) + b = k.f(a) + + assert hash(a) == hash(b) + + +def test_set_decimal_hash(): + # Check that sets containing decimals hash consistently, even though + # ordering is not guaranteed + assert hash(set([Decimal(0), Decimal("NaN")])) == hash( + set([Decimal("NaN"), Decimal(0)]) + ) + + +def test_string(): + # Test that we obtain the same hash for object owning several strings, + # whatever the past of these strings (which are immutable in Python) + string = "foo" + a = {string: "bar"} + b = {string: "bar"} + c = pickle.loads(pickle.dumps(b)) + assert hash([a, b]) == hash([a, c]) + + +@with_numpy +def test_numpy_dtype_pickling(): + # numpy dtype hashing is tricky to get right: see #231, #239, #251 #1080, + # #1082, and explanatory comments inside + # ``joblib.hashing.NumpyHasher.save``. + + # In this test, we make sure that the pickling of numpy dtypes is robust to + # object identity and object copy. + + dt1 = np.dtype("f4") + dt2 = np.dtype("f4") + + # simple dtypes objects are interned + assert dt1 is dt2 + assert hash(dt1) == hash(dt2) + + dt1_roundtripped = pickle.loads(pickle.dumps(dt1)) + assert dt1 is not dt1_roundtripped + assert hash(dt1) == hash(dt1_roundtripped) + + assert hash([dt1, dt1]) == hash([dt1_roundtripped, dt1_roundtripped]) + assert hash([dt1, dt1]) == hash([dt1, dt1_roundtripped]) + + complex_dt1 = np.dtype([("name", np.str_, 16), ("grades", np.float64, (2,))]) + complex_dt2 = np.dtype([("name", np.str_, 16), ("grades", np.float64, (2,))]) + + # complex dtypes objects are not interned + assert hash(complex_dt1) == hash(complex_dt2) + + complex_dt1_roundtripped = pickle.loads(pickle.dumps(complex_dt1)) + assert complex_dt1_roundtripped is not complex_dt1 + assert hash(complex_dt1) == hash(complex_dt1_roundtripped) + + assert hash([complex_dt1, complex_dt1]) == hash( + [complex_dt1_roundtripped, complex_dt1_roundtripped] + ) + assert hash([complex_dt1, complex_dt1]) == hash( + [complex_dt1_roundtripped, complex_dt1] + ) + + +@parametrize( + "to_hash,expected", + [ + ("This is a string to hash", "71b3f47df22cb19431d85d92d0b230b2"), + ("C'est l\xe9t\xe9", "2d8d189e9b2b0b2e384d93c868c0e576"), + ((123456, 54321, -98765), "e205227dd82250871fa25aa0ec690aa3"), + ( + [random.Random(42).random() for _ in range(5)], + "a11ffad81f9682a7d901e6edc3d16c84", + ), + ({"abcde": 123, "sadfas": [-9999, 2, 3]}, "aeda150553d4bb5c69f0e69d51b0e2ef"), + ], +) +def test_hashes_stay_the_same(to_hash, expected): + # We want to make sure that hashes don't change with joblib + # version. For end users, that would mean that they have to + # regenerate their cache from scratch, which potentially means + # lengthy recomputations. + # Expected results have been generated with joblib 0.9.2 + assert hash(to_hash) == expected + + +@with_numpy +def test_hashes_are_different_between_c_and_fortran_contiguous_arrays(): + # We want to be sure that the c-contiguous and f-contiguous versions of the + # same array produce 2 different hashes. + rng = np.random.RandomState(0) + arr_c = rng.random_sample((10, 10)) + arr_f = np.asfortranarray(arr_c) + assert hash(arr_c) != hash(arr_f) + + +@with_numpy +def test_0d_array(): + hash(np.array(0)) + + +@with_numpy +def test_0d_and_1d_array_hashing_is_different(): + assert hash(np.array(0)) != hash(np.array([0])) + + +@with_numpy +def test_hashes_stay_the_same_with_numpy_objects(): + # Note: joblib used to test numpy objects hashing by comparing the produced + # hash of an object with some hard-coded target value to guarantee that + # hashing remains the same across joblib versions. However, since numpy + # 1.20 and joblib 1.0, joblib relies on potentially unstable implementation + # details of numpy to hash np.dtype objects, which makes the stability of + # hash values across different environments hard to guarantee and to test. + # As a result, hashing stability across joblib versions becomes best-effort + # only, and we only test the consistency within a single environment by + # making sure: + # - the hash of two copies of the same objects is the same + # - hashing some object in two different python processes produces the same + # value. This should be viewed as a proxy for testing hash consistency + # through time between Python sessions (provided no change in the + # environment was done between sessions). + + def create_objects_to_hash(): + rng = np.random.RandomState(42) + # Being explicit about dtypes in order to avoid + # architecture-related differences. Also using 'f4' rather than + # 'f8' for float arrays because 'f8' arrays generated by + # rng.random.randn don't seem to be bit-identical on 32bit and + # 64bit machines. + to_hash_list = [ + rng.randint(-1000, high=1000, size=50).astype(" +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. +import re + +from joblib.logger import PrintTime + + +def test_print_time(tmpdir, capsys): + # A simple smoke test for PrintTime. + logfile = tmpdir.join("test.log").strpath + print_time = PrintTime(logfile=logfile) + print_time("Foo") + # Create a second time, to smoke test log rotation. + print_time = PrintTime(logfile=logfile) + print_time("Foo") + # And a third time + print_time = PrintTime(logfile=logfile) + print_time("Foo") + + out_printed_text, err_printed_text = capsys.readouterr() + # Use regexps to be robust to time variations + match = r"Foo: 0\..s, 0\..min\nFoo: 0\..s, 0..min\nFoo: " + r".\..s, 0..min\n" + if not re.match(match, err_printed_text): + raise AssertionError("Excepted %s, got %s" % (match, err_printed_text)) diff --git a/lib/python3.10/site-packages/joblib/test/test_memmapping.py b/lib/python3.10/site-packages/joblib/test/test_memmapping.py new file mode 100644 index 0000000000000000000000000000000000000000..7797536c9d933bc7b1f782e851990a8ac71d7b65 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_memmapping.py @@ -0,0 +1,1280 @@ +import faulthandler +import gc +import itertools +import mmap +import os +import pickle +import platform +import subprocess +import sys +import threading +from time import sleep + +import pytest + +import joblib._memmapping_reducer as jmr +from joblib._memmapping_reducer import ( + ArrayMemmapForwardReducer, + _get_backing_memmap, + _get_temp_dir, + _strided_from_memmap, + _WeakArrayKeyMap, + has_shareable_memory, +) +from joblib.backports import make_memmap +from joblib.executor import _TestingMemmappingExecutor as TestExecutor +from joblib.parallel import Parallel, delayed +from joblib.pool import MemmappingPool +from joblib.test.common import ( + IS_GIL_DISABLED, + np, + with_dev_shm, + with_multiprocessing, + with_numpy, +) +from joblib.testing import parametrize, raises, skipif + + +def setup_module(): + faulthandler.dump_traceback_later(timeout=300, exit=True) + + +def teardown_module(): + faulthandler.cancel_dump_traceback_later() + + +def check_memmap_and_send_back(array): + assert _get_backing_memmap(array) is not None + return array + + +def check_array(args): + """Dummy helper function to be executed in subprocesses + + Check that the provided array has the expected values in the provided + range. + + """ + data, position, expected = args + np.testing.assert_array_equal(data[position], expected) + + +def inplace_double(args): + """Dummy helper function to be executed in subprocesses + + + Check that the input array has the right values in the provided range + and perform an inplace modification to double the values in the range by + two. + + """ + data, position, expected = args + assert data[position] == expected + data[position] *= 2 + np.testing.assert_array_equal(data[position], 2 * expected) + + +@with_numpy +@with_multiprocessing +def test_memmap_based_array_reducing(tmpdir): + """Check that it is possible to reduce a memmap backed array""" + assert_array_equal = np.testing.assert_array_equal + filename = tmpdir.join("test.mmap").strpath + + # Create a file larger than what will be used by a + buffer = np.memmap(filename, dtype=np.float64, shape=500, mode="w+") + + # Fill the original buffer with negative markers to detect over of + # underflow in case of test failures + buffer[:] = -1.0 * np.arange(buffer.shape[0], dtype=buffer.dtype) + buffer.flush() + + # Memmap a 2D fortran array on a offsetted subsection of the previous + # buffer + a = np.memmap( + filename, dtype=np.float64, shape=(3, 5, 4), mode="r+", order="F", offset=4 + ) + a[:] = np.arange(60).reshape(a.shape) + + # Build various views that share the buffer with the original memmap + + # b is an memmap sliced view on an memmap instance + b = a[1:-1, 2:-1, 2:4] + + # b2 is a memmap 2d with memmap 1d as base + # non-regression test for https://github.com/joblib/joblib/issues/1703 + b2 = buffer.reshape(10, 50) + + # c and d are array views + c = np.asarray(b) + d = c.T + + # Array reducer with auto dumping disabled + reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, "c", True) + + def reconstruct_array_or_memmap(x): + cons, args = reducer(x) + return cons(*args) + + # Reconstruct original memmap + a_reconstructed = reconstruct_array_or_memmap(a) + assert has_shareable_memory(a_reconstructed) + assert isinstance(a_reconstructed, np.memmap) + assert_array_equal(a_reconstructed, a) + + # Reconstruct strided memmap view + b_reconstructed = reconstruct_array_or_memmap(b) + assert has_shareable_memory(b_reconstructed) + assert_array_equal(b_reconstructed, b) + + # Reconstruct memmap 2d with memmap 1d as base + b2_reconstructed = reconstruct_array_or_memmap(b2) + assert has_shareable_memory(b2_reconstructed) + assert_array_equal(b2_reconstructed, b2) + + # Reconstruct arrays views on memmap base + c_reconstructed = reconstruct_array_or_memmap(c) + assert not isinstance(c_reconstructed, np.memmap) + assert has_shareable_memory(c_reconstructed) + assert_array_equal(c_reconstructed, c) + + d_reconstructed = reconstruct_array_or_memmap(d) + assert not isinstance(d_reconstructed, np.memmap) + assert has_shareable_memory(d_reconstructed) + assert_array_equal(d_reconstructed, d) + + # Test graceful degradation on fake memmap instances with in-memory + # buffers + a3 = a * 3 + assert not has_shareable_memory(a3) + a3_reconstructed = reconstruct_array_or_memmap(a3) + assert not has_shareable_memory(a3_reconstructed) + assert not isinstance(a3_reconstructed, np.memmap) + assert_array_equal(a3_reconstructed, a * 3) + + # Test graceful degradation on arrays derived from fake memmap instances + b3 = np.asarray(a3) + assert not has_shareable_memory(b3) + + b3_reconstructed = reconstruct_array_or_memmap(b3) + assert isinstance(b3_reconstructed, np.ndarray) + assert not has_shareable_memory(b3_reconstructed) + assert_array_equal(b3_reconstructed, b3) + + +@with_numpy +@with_multiprocessing +@skipif( + sys.platform != "win32", reason="PermissionError only easily triggerable on Windows" +) +def test_resource_tracker_retries_when_permissionerror(tmpdir): + # Test resource_tracker retry mechanism when unlinking memmaps. See more + # thorough information in the ``unlink_file`` documentation of joblib. + filename = tmpdir.join("test.mmap").strpath + cmd = """if 1: + import os + import numpy as np + import time + from joblib.externals.loky.backend import resource_tracker + resource_tracker.VERBOSE = 1 + + # Start the resource tracker + resource_tracker.ensure_running() + time.sleep(1) + + # Create a file containing numpy data + memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+') + memmap[:] = np.arange(10).astype(np.int8).data + memmap.flush() + assert os.path.exists(r"{filename}") + del memmap + + # Create a np.memmap backed by this file + memmap = np.memmap(r"{filename}", dtype=np.float64, shape=10, mode='w+') + resource_tracker.register(r"{filename}", "file") + + # Ask the resource_tracker to delete the file backing the np.memmap , this + # should raise PermissionError that the resource_tracker will log. + resource_tracker.maybe_unlink(r"{filename}", "file") + + # Wait for the resource_tracker to process the maybe_unlink before cleaning + # up the memmap + time.sleep(2) + """.format(filename=filename) + p = subprocess.Popen( + [sys.executable, "-c", cmd], stderr=subprocess.PIPE, stdout=subprocess.PIPE + ) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err.decode() + assert out == b"" + msg = "tried to unlink {}, got PermissionError".format(filename) + assert msg in err.decode() + + +@with_numpy +@with_multiprocessing +def test_high_dimension_memmap_array_reducing(tmpdir): + assert_array_equal = np.testing.assert_array_equal + + filename = tmpdir.join("test.mmap").strpath + + # Create a high dimensional memmap + a = np.memmap(filename, dtype=np.float64, shape=(100, 15, 15, 3), mode="w+") + a[:] = np.arange(100 * 15 * 15 * 3).reshape(a.shape) + + # Create some slices/indices at various dimensions + b = a[0:10] + c = a[:, 5:10] + d = a[:, :, :, 0] + e = a[1:3:4] + + # Array reducer with auto dumping disabled + reducer = ArrayMemmapForwardReducer(None, tmpdir.strpath, "c", True) + + def reconstruct_array_or_memmap(x): + cons, args = reducer(x) + return cons(*args) + + a_reconstructed = reconstruct_array_or_memmap(a) + assert has_shareable_memory(a_reconstructed) + assert isinstance(a_reconstructed, np.memmap) + assert_array_equal(a_reconstructed, a) + + b_reconstructed = reconstruct_array_or_memmap(b) + assert has_shareable_memory(b_reconstructed) + assert_array_equal(b_reconstructed, b) + + c_reconstructed = reconstruct_array_or_memmap(c) + assert has_shareable_memory(c_reconstructed) + assert_array_equal(c_reconstructed, c) + + d_reconstructed = reconstruct_array_or_memmap(d) + assert has_shareable_memory(d_reconstructed) + assert_array_equal(d_reconstructed, d) + + e_reconstructed = reconstruct_array_or_memmap(e) + assert has_shareable_memory(e_reconstructed) + assert_array_equal(e_reconstructed, e) + + +@with_numpy +def test__strided_from_memmap(tmpdir): + fname = tmpdir.join("test.mmap").strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + # This line creates the mmap file that is reused later + memmap_obj = np.memmap(fname, mode="w+", shape=size + offset) + # filename, dtype, mode, offset, order, shape, strides, total_buffer_len + memmap_obj = _strided_from_memmap( + fname, + dtype="uint8", + mode="r", + offset=offset, + order="C", + shape=size, + strides=None, + total_buffer_len=None, + unlink_on_gc_collect=False, + ) + assert isinstance(memmap_obj, np.memmap) + assert memmap_obj.offset == offset + memmap_backed_obj = _strided_from_memmap( + fname, + dtype="uint8", + mode="r", + offset=offset, + order="C", + shape=(size // 2,), + strides=(2,), + total_buffer_len=size, + unlink_on_gc_collect=False, + ) + assert _get_backing_memmap(memmap_backed_obj).offset == offset + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory", + [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"], +) +def test_pool_with_memmap(factory, tmpdir): + """Check that subprocess can access and update shared memory memmap""" + assert_array_equal = np.testing.assert_array_equal + + # Fork the subprocess before allocating the objects to be passed + pool_temp_folder = tmpdir.mkdir("pool").strpath + p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder) + try: + filename = tmpdir.join("test.mmap").strpath + a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode="w+") + a.fill(1.0) + + p.map( + inplace_double, + [(a, (i, j), 1.0) for i in range(a.shape[0]) for j in range(a.shape[1])], + ) + + assert_array_equal(a, 2 * np.ones(a.shape)) + + # Open a copy-on-write view on the previous data + b = np.memmap(filename, dtype=np.float32, shape=(5, 3), mode="c") + + p.map( + inplace_double, + [(b, (i, j), 2.0) for i in range(b.shape[0]) for j in range(b.shape[1])], + ) + + # Passing memmap instances to the pool should not trigger the creation + # of new files on the FS + assert os.listdir(pool_temp_folder) == [] + + # the original data is untouched + assert_array_equal(a, 2 * np.ones(a.shape)) + assert_array_equal(b, 2 * np.ones(b.shape)) + + # readonly maps can be read but not updated + c = np.memmap(filename, dtype=np.float32, shape=(10,), mode="r", offset=5 * 4) + + with raises(AssertionError): + p.map(check_array, [(c, i, 3.0) for i in range(c.shape[0])]) + + # depending on the version of numpy one can either get a RuntimeError + # or a ValueError + with raises((RuntimeError, ValueError)): + p.map(inplace_double, [(c, i, 2.0) for i in range(c.shape[0])]) + finally: + # Clean all filehandlers held by the pool + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory", + [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"], +) +def test_pool_with_memmap_array_view(factory, tmpdir): + """Check that subprocess can access and update shared memory array""" + assert_array_equal = np.testing.assert_array_equal + + # Fork the subprocess before allocating the objects to be passed + pool_temp_folder = tmpdir.mkdir("pool").strpath + p = factory(10, max_nbytes=2, temp_folder=pool_temp_folder) + try: + filename = tmpdir.join("test.mmap").strpath + a = np.memmap(filename, dtype=np.float32, shape=(3, 5), mode="w+") + a.fill(1.0) + + # Create an ndarray view on the memmap instance + a_view = np.asarray(a) + assert not isinstance(a_view, np.memmap) + assert has_shareable_memory(a_view) + + p.map( + inplace_double, + [ + (a_view, (i, j), 1.0) + for i in range(a.shape[0]) + for j in range(a.shape[1]) + ], + ) + + # Both a and the a_view have been updated + assert_array_equal(a, 2 * np.ones(a.shape)) + assert_array_equal(a_view, 2 * np.ones(a.shape)) + + # Passing memmap array view to the pool should not trigger the + # creation of new files on the FS + assert os.listdir(pool_temp_folder) == [] + + finally: + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_permission_error_windows_reference_cycle(backend): + # Non regression test for: + # https://github.com/joblib/joblib/issues/806 + # + # The issue happens when trying to delete a memory mapped file that has + # not yet been closed by one of the worker processes. + cmd = """if 1: + import numpy as np + from joblib import Parallel, delayed + + + data = np.random.rand(int(2e6)).reshape((int(1e6), 2)) + + # Build a complex cyclic reference that is likely to delay garbage + # collection of the memmapped array in the worker processes. + first_list = current_list = [data] + for i in range(10): + current_list = [current_list] + first_list.append(current_list) + + if __name__ == "__main__": + results = Parallel(n_jobs=2, backend="{b}")( + delayed(len)(current_list) for i in range(10)) + assert results == [1] * 10 + """.format(b=backend) + p = subprocess.Popen( + [sys.executable, "-c", cmd], stderr=subprocess.PIPE, stdout=subprocess.PIPE + ) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, out.decode() + "\n\n" + err.decode() + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_permission_error_windows_memmap_sent_to_parent(backend): + # Second non-regression test for: + # https://github.com/joblib/joblib/issues/806 + # previously, child process would not convert temporary memmaps to numpy + # arrays when sending the data back to the parent process. This would lead + # to permission errors on windows when deleting joblib's temporary folder, + # as the memmaped files handles would still opened in the parent process. + cmd = """if 1: + import os + import time + + import numpy as np + + from joblib import Parallel, delayed + from testutils import return_slice_of_data + + data = np.ones(int(2e6)) + + if __name__ == '__main__': + # warm-up call to launch the workers and start the resource_tracker + _ = Parallel(n_jobs=2, verbose=5, backend='{b}')( + delayed(id)(i) for i in range(20)) + + time.sleep(0.5) + + slice_of_data = Parallel(n_jobs=2, verbose=5, backend='{b}')( + delayed(return_slice_of_data)(data, 0, 20) for _ in range(10)) + """.format(b=backend) + + for _ in range(3): + env = os.environ.copy() + env["PYTHONPATH"] = os.path.dirname(__file__) + p = subprocess.Popen( + [sys.executable, "-c", cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + env=env, + ) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err + assert out == b"" + assert b"resource_tracker" not in err + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_parallel_isolated_temp_folders(backend): + # Test that consecutive Parallel call use isolated subfolders, even + # for the loky backend that reuses its executor instance across calls. + array = np.arange(int(1e2)) + [filename_1] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)( + delayed(getattr)(array, "filename") for _ in range(1) + ) + [filename_2] = Parallel(n_jobs=2, backend=backend, max_nbytes=10)( + delayed(getattr)(array, "filename") for _ in range(1) + ) + assert os.path.dirname(filename_2) != os.path.dirname(filename_1) + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_managed_backend_reuse_temp_folder(backend): + # Test that calls to a managed parallel object reuse the same memmaps. + array = np.arange(int(1e2)) + with Parallel(n_jobs=2, backend=backend, max_nbytes=10) as p: + [filename_1] = p(delayed(getattr)(array, "filename") for _ in range(1)) + [filename_2] = p(delayed(getattr)(array, "filename") for _ in range(1)) + assert os.path.dirname(filename_2) == os.path.dirname(filename_1) + + +@with_numpy +@with_multiprocessing +def test_memmapping_temp_folder_thread_safety(): + # Concurrent calls to Parallel with the loky backend will use the same + # executor, and thus the same reducers. Make sure that those reducers use + # different temporary folders depending on which Parallel objects called + # them, which is necessary to limit potential race conditions during the + # garbage collection of temporary memmaps. + array = np.arange(int(1e2)) + + temp_dirs_thread_1 = set() + temp_dirs_thread_2 = set() + + def concurrent_get_filename(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p(delayed(getattr)(array, "filename") for _ in range(1)) + temp_dirs.add(os.path.dirname(filename)) + + t1 = threading.Thread( + target=concurrent_get_filename, args=(array, temp_dirs_thread_1) + ) + t2 = threading.Thread( + target=concurrent_get_filename, args=(array, temp_dirs_thread_2) + ) + + t1.start() + t2.start() + + t1.join() + t2.join() + + assert len(temp_dirs_thread_1) == 1 + assert len(temp_dirs_thread_2) == 1 + + assert temp_dirs_thread_1 != temp_dirs_thread_2 + + +@with_numpy +@with_multiprocessing +def test_multithreaded_parallel_termination_resource_tracker_silent(): + # test that concurrent termination attempts of a same executor does not + # emit any spurious error from the resource_tracker. We test various + # situations making 0, 1 or both parallel call sending a task that will + # make the worker (and thus the whole Parallel call) error out. + cmd = """if 1: + import os + import numpy as np + from joblib import Parallel, delayed + from joblib.externals.loky.backend import resource_tracker + from concurrent.futures import ThreadPoolExecutor, wait + + resource_tracker.VERBOSE = 0 + + array = np.arange(int(1e2)) + + temp_dirs_thread_1 = set() + temp_dirs_thread_2 = set() + + + def raise_error(array): + raise ValueError + + + def parallel_get_filename(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(getattr)(array, "filename") for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + + def parallel_raise(array, temp_dirs): + with Parallel(backend="loky", n_jobs=2, max_nbytes=10) as p: + for i in range(10): + [filename] = p( + delayed(raise_error)(array) for _ in range(1) + ) + temp_dirs.add(os.path.dirname(filename)) + + + executor = ThreadPoolExecutor(max_workers=2) + + # both function calls will use the same loky executor, but with a + # different Parallel object. + future_1 = executor.submit({f1}, array, temp_dirs_thread_1) + future_2 = executor.submit({f2}, array, temp_dirs_thread_2) + + # Wait for both threads to terminate their backend + wait([future_1, future_2]) + + future_1.result() + future_2.result() + """ + functions_and_returncodes = [ + ("parallel_get_filename", "parallel_get_filename", 0), + ("parallel_get_filename", "parallel_raise", 1), + ("parallel_raise", "parallel_raise", 1), + ] + + for f1, f2, returncode in functions_and_returncodes: + p = subprocess.Popen( + [sys.executable, "-c", cmd.format(f1=f1, f2=f2)], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + ) + p.wait() + _, err = p.communicate() + assert p.returncode == returncode, err.decode() + assert b"resource_tracker" not in err, err.decode() + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_many_parallel_calls_on_same_object(backend): + # After #966 got merged, consecutive Parallel objects were sharing temp + # folder, which would lead to race conditions happening during the + # temporary resources management with the resource_tracker. This is a + # non-regression test that makes sure that consecutive Parallel operations + # on the same object do not error out. + cmd = """if 1: + import os + import time + + import numpy as np + + from joblib import Parallel, delayed + from testutils import return_slice_of_data + + data = np.ones(100) + + if __name__ == '__main__': + for i in range(5): + slice_of_data = Parallel( + n_jobs=2, max_nbytes=1, backend='{b}')( + delayed(return_slice_of_data)(data, 0, 20) + for _ in range(10) + ) + """.format(b=backend) + env = os.environ.copy() + env["PYTHONPATH"] = os.path.dirname(__file__) + p = subprocess.Popen( + [sys.executable, "-c", cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + env=env, + ) + p.wait() + out, err = p.communicate() + assert p.returncode == 0, err.decode() + assert out == b"", out.decode() + assert b"resource_tracker" not in err + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_memmap_returned_as_regular_array(backend): + data = np.ones(int(1e3)) + # Check that child processes send temporary memmaps back as numpy arrays. + [result] = Parallel(n_jobs=2, backend=backend, max_nbytes=100)( + delayed(check_memmap_and_send_back)(data) for _ in range(1) + ) + assert _get_backing_memmap(result) is None + + +@with_numpy +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky"]) +def test_resource_tracker_silent_when_reference_cycles(backend): + # There is a variety of reasons that can make joblib with loky backend + # output noisy warnings when a reference cycle is preventing a memmap from + # being garbage collected. Especially, joblib's main process finalizer + # deletes the temporary folder if it was not done before, which can + # interact badly with the resource_tracker. We don't risk leaking any + # resources, but this will likely make joblib output a lot of low-level + # confusing messages. + # + # This test makes sure that the resource_tracker is silent when a reference + # has been collected concurrently on non-Windows platforms. + # + # Note that the script in ``cmd`` is the exact same script as in + # test_permission_error_windows_reference_cycle. + if backend == "loky" and sys.platform.startswith("win"): + # XXX: on Windows, reference cycles can delay timely garbage collection + # and make it impossible to properly delete the temporary folder in the + # main process because of permission errors. + pytest.xfail( + "The temporary folder cannot be deleted on Windows in the " + "presence of a reference cycle" + ) + + cmd = """if 1: + import numpy as np + from joblib import Parallel, delayed + + + data = np.random.rand(int(2e6)).reshape((int(1e6), 2)) + + # Build a complex cyclic reference that is likely to delay garbage + # collection of the memmapped array in the worker processes. + first_list = current_list = [data] + for i in range(10): + current_list = [current_list] + first_list.append(current_list) + + if __name__ == "__main__": + results = Parallel(n_jobs=2, backend="{b}")( + delayed(len)(current_list) for i in range(10)) + assert results == [1] * 10 + """.format(b=backend) + p = subprocess.Popen( + [sys.executable, "-c", cmd], stderr=subprocess.PIPE, stdout=subprocess.PIPE + ) + p.wait() + out, err = p.communicate() + out = out.decode() + err = err.decode() + assert p.returncode == 0, out + "\n\n" + err + assert "resource_tracker" not in err, err + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory", + [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"], +) +def test_memmapping_pool_for_large_arrays(factory, tmpdir): + """Check that large arrays are not copied in memory""" + + # Check that the tempfolder is empty + assert os.listdir(tmpdir.strpath) == [] + + # Build an array reducers that automatically dump large array content + # to filesystem backed memmap instances to avoid memory explosion + p = factory(3, max_nbytes=40, temp_folder=tmpdir.strpath, verbose=2) + try: + # The temporary folder for the pool is not provisioned in advance + assert os.listdir(tmpdir.strpath) == [] + assert not os.path.exists(p._temp_folder) + + small = np.ones(5, dtype=np.float32) + assert small.nbytes == 20 + p.map(check_array, [(small, i, 1.0) for i in range(small.shape[0])]) + + # Memory has been copied, the pool filesystem folder is unused + assert os.listdir(tmpdir.strpath) == [] + + # Try with a file larger than the memmap threshold of 40 bytes + large = np.ones(100, dtype=np.float64) + assert large.nbytes == 800 + p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])]) + + # The data has been dumped in a temp folder for subprocess to share it + # without per-child memory copies + assert os.path.isdir(p._temp_folder) + dumped_filenames = os.listdir(p._temp_folder) + assert len(dumped_filenames) == 1 + + # Check that memory mapping is not triggered for arrays with + # dtype='object' + objects = np.array(["abc"] * 100, dtype="object") + results = p.map(has_shareable_memory, [objects]) + assert not results[0] + + finally: + # check FS garbage upon pool termination + p.terminate() + for i in range(10): + sleep(0.1) + if not os.path.exists(p._temp_folder): + break + else: # pragma: no cover + raise AssertionError( + "temporary folder {} was not deleted".format(p._temp_folder) + ) + del p + + +@with_numpy +@with_multiprocessing +@parametrize( + "backend", + [ + pytest.param( + "multiprocessing", + marks=pytest.mark.xfail( + reason="https://github.com/joblib/joblib/issues/1086" + ), + ), + "loky", + ], +) +def test_child_raises_parent_exits_cleanly(backend): + # When a task executed by a child process raises an error, the parent + # process's backend is notified, and calls abort_everything. + # In loky, abort_everything itself calls shutdown(kill_workers=True) which + # sends SIGKILL to the worker, preventing it from running the finalizers + # supposed to signal the resource_tracker when the worker is done using + # objects relying on a shared resource (e.g np.memmaps). Because this + # behavior is prone to : + # - cause a resource leak + # - make the resource tracker emit noisy resource warnings + # we explicitly test that, when the said situation occurs: + # - no resources are actually leaked + # - the temporary resources are deleted as soon as possible (typically, at + # the end of the failing Parallel call) + # - the resource_tracker does not emit any warnings. + cmd = """if 1: + import os + from pathlib import Path + from time import sleep + + import numpy as np + from joblib import Parallel, delayed + from testutils import print_filename_and_raise + + data = np.random.rand(1000) + + def get_temp_folder(parallel_obj, backend): + if "{b}" == "loky": + return Path(parallel_obj._backend._workers._temp_folder) + else: + return Path(parallel_obj._backend._pool._temp_folder) + + + if __name__ == "__main__": + try: + with Parallel(n_jobs=2, backend="{b}", max_nbytes=100) as p: + temp_folder = get_temp_folder(p, "{b}") + p(delayed(print_filename_and_raise)(data) + for i in range(1)) + except ValueError as e: + # the temporary folder should be deleted by the end of this + # call but apparently on some file systems, this takes + # some time to be visible. + # + # We attempt to write into the temporary folder to test for + # its existence and we wait for a maximum of 10 seconds. + for i in range(100): + try: + with open(temp_folder / "some_file.txt", "w") as f: + f.write("some content") + except FileNotFoundError: + # temp_folder has been deleted, all is fine + break + + # ... else, wait a bit and try again + sleep(.1) + else: + raise AssertionError( + str(temp_folder) + " was not deleted" + ) from e + """.format(b=backend) + env = os.environ.copy() + env["PYTHONPATH"] = os.path.dirname(__file__) + p = subprocess.Popen( + [sys.executable, "-c", cmd], + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + env=env, + ) + p.wait() + out, err = p.communicate() + out, err = out.decode(), err.decode() + filename = out.split("\n")[0] + assert p.returncode == 0, err or out + assert err == "" # no resource_tracker warnings. + assert not os.path.exists(filename) + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory", + [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"], +) +def test_memmapping_pool_for_large_arrays_disabled(factory, tmpdir): + """Check that large arrays memmapping can be disabled""" + # Set max_nbytes to None to disable the auto memmapping feature + p = factory(3, max_nbytes=None, temp_folder=tmpdir.strpath) + try: + # Check that the tempfolder is empty + assert os.listdir(tmpdir.strpath) == [] + + # Try with a file largish than the memmap threshold of 40 bytes + large = np.ones(100, dtype=np.float64) + assert large.nbytes == 800 + p.map(check_array, [(large, i, 1.0) for i in range(large.shape[0])]) + + # Check that the tempfolder is still empty + assert os.listdir(tmpdir.strpath) == [] + + finally: + # Cleanup open file descriptors + p.terminate() + del p + + +@with_numpy +@with_multiprocessing +@with_dev_shm +@parametrize( + "factory", + [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"], +) +def test_memmapping_on_large_enough_dev_shm(factory): + """Check that memmapping uses /dev/shm when possible""" + orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE + try: + # Make joblib believe that it can use /dev/shm even when running on a + # CI container where the size of the /dev/shm is not very large (that + # is at least 32 MB instead of 2 GB by default). + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(32e6) + p = factory(3, max_nbytes=10) + try: + # Check that the pool has correctly detected the presence of the + # shared memory filesystem. + pool_temp_folder = p._temp_folder + folder_prefix = "/dev/shm/joblib_memmapping_folder_" + assert pool_temp_folder.startswith(folder_prefix) + assert os.path.exists(pool_temp_folder) + + # Try with a file larger than the memmap threshold of 10 bytes + a = np.ones(100, dtype=np.float64) + assert a.nbytes == 800 + p.map(id, [a] * 10) + # a should have been memmapped to the pool temp folder: the joblib + # pickling procedure generate one .pkl file: + assert len(os.listdir(pool_temp_folder)) == 1 + + # create a new array with content that is different from 'a' so + # that it is mapped to a different file in the temporary folder of + # the pool. + b = np.ones(100, dtype=np.float64) * 2 + assert b.nbytes == 800 + p.map(id, [b] * 10) + # A copy of both a and b are now stored in the shared memory folder + assert len(os.listdir(pool_temp_folder)) == 2 + finally: + # Cleanup open file descriptors + p.terminate() + del p + + for i in range(100): + # The temp folder is cleaned up upon pool termination + if not os.path.exists(pool_temp_folder): + break + sleep(0.1) + else: # pragma: no cover + raise AssertionError("temporary folder of pool was not deleted") + finally: + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size + + +@with_numpy +@with_multiprocessing +@with_dev_shm +@parametrize( + "factory", + [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"], +) +def test_memmapping_on_too_small_dev_shm(factory): + orig_size = jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE + try: + # Make joblib believe that it cannot use /dev/shm unless there is + # 42 exabytes of available shared memory in /dev/shm + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = int(42e18) + + p = factory(3, max_nbytes=10) + try: + # Check that the pool has correctly detected the presence of the + # shared memory filesystem. + pool_temp_folder = p._temp_folder + assert not pool_temp_folder.startswith("/dev/shm") + finally: + # Cleanup open file descriptors + p.terminate() + del p + + # The temp folder is cleaned up upon pool termination + assert not os.path.exists(pool_temp_folder) + finally: + jmr.SYSTEM_SHARED_MEM_FS_MIN_SIZE = orig_size + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory", + [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"], +) +def test_memmapping_pool_for_large_arrays_in_return(factory, tmpdir): + """Check that large arrays are not copied in memory in return""" + assert_array_equal = np.testing.assert_array_equal + + # Build an array reducers that automatically dump large array content + # but check that the returned datastructure are regular arrays to avoid + # passing a memmap array pointing to a pool controlled temp folder that + # might be confusing to the user + + # The MemmappingPool user can always return numpy.memmap object explicitly + # to avoid memory copy + p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath) + try: + res = p.apply_async(np.ones, args=(1000,)) + large = res.get() + assert not has_shareable_memory(large) + assert_array_equal(large, np.ones(1000)) + finally: + p.terminate() + del p + + +def _worker_multiply(a, n_times): + """Multiplication function to be executed by subprocess""" + assert has_shareable_memory(a) + return a * n_times + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory", + [MemmappingPool, TestExecutor.get_memmapping_executor], + ids=["multiprocessing", "loky"], +) +def test_workaround_against_bad_memmap_with_copied_buffers(factory, tmpdir): + """Check that memmaps with a bad buffer are returned as regular arrays + + Unary operations and ufuncs on memmap instances return a new memmap + instance with an in-memory buffer (probably a numpy bug). + """ + assert_array_equal = np.testing.assert_array_equal + + p = factory(3, max_nbytes=10, temp_folder=tmpdir.strpath) + try: + # Send a complex, large-ish view on a array that will be converted to + # a memmap in the worker process + a = np.asarray(np.arange(6000).reshape((1000, 2, 3)), order="F")[:, :1, :] + + # Call a non-inplace multiply operation on the worker and memmap and + # send it back to the parent. + b = p.apply_async(_worker_multiply, args=(a, 3)).get() + assert not has_shareable_memory(b) + assert_array_equal(b, 3 * a) + finally: + p.terminate() + del p + + +def identity(arg): + return arg + + +@with_numpy +@with_multiprocessing +@parametrize( + "factory,retry_no", + list( + itertools.product( + [MemmappingPool, TestExecutor.get_memmapping_executor], range(3) + ) + ), + ids=[ + "{}, {}".format(x, y) + for x, y in itertools.product(["multiprocessing", "loky"], map(str, range(3))) + ], +) +def test_pool_memmap_with_big_offset(factory, retry_no, tmpdir): + # Test that numpy memmap offset is set correctly if greater than + # mmap.ALLOCATIONGRANULARITY, see + # https://github.com/joblib/joblib/issues/451 and + # https://github.com/numpy/numpy/pull/8443 for more details. + fname = tmpdir.join("test.mmap").strpath + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + obj = make_memmap(fname, mode="w+", shape=size, dtype="uint8", offset=offset) + + p = factory(2, temp_folder=tmpdir.strpath) + result = p.apply_async(identity, args=(obj,)).get() + assert isinstance(result, np.memmap) + assert result.offset == offset + np.testing.assert_array_equal(obj, result) + p.terminate() + + +def test_pool_get_temp_dir(tmpdir): + pool_folder_name = "test.tmpdir" + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, tmpdir.strpath) + assert shared_mem is False + assert pool_folder == tmpdir.join("test.tmpdir").strpath + + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None) + if sys.platform.startswith("win"): + assert shared_mem is False + assert pool_folder.endswith(pool_folder_name) + + +def test_pool_get_temp_dir_no_statvfs(tmpdir, monkeypatch): + """Check that _get_temp_dir works when os.statvfs is not defined + + Regression test for #902 + """ + pool_folder_name = "test.tmpdir" + import joblib._memmapping_reducer + + if hasattr(joblib._memmapping_reducer.os, "statvfs"): + # We are on Unix, since Windows doesn't have this function + monkeypatch.delattr(joblib._memmapping_reducer.os, "statvfs") + + pool_folder, shared_mem = _get_temp_dir(pool_folder_name, temp_folder=None) + if sys.platform.startswith("win"): + assert shared_mem is False + assert pool_folder.endswith(pool_folder_name) + + +@with_numpy +@skipif( + sys.platform == "win32", reason="This test fails with a PermissionError on Windows" +) +@parametrize("mmap_mode", ["r+", "w+"]) +def test_numpy_arrays_use_different_memory(mmap_mode): + def func(arr, value): + arr[:] = value + return arr + + arrays = [np.zeros((10, 10), dtype="float64") for i in range(10)] + + results = Parallel(mmap_mode=mmap_mode, max_nbytes=0, n_jobs=2)( + delayed(func)(arr, i) for i, arr in enumerate(arrays) + ) + + for i, arr in enumerate(results): + np.testing.assert_array_equal(arr, i) + + +@with_numpy +def test_weak_array_key_map(): + def assert_empty_after_gc_collect(container, retries=100): + for i in range(retries): + if len(container) == 0: + return + gc.collect() + sleep(0.1) + assert len(container) == 0 + + a = np.ones(42) + m = _WeakArrayKeyMap() + m.set(a, "a") + assert m.get(a) == "a" + + b = a + assert m.get(b) == "a" + m.set(b, "b") + assert m.get(a) == "b" + + del a + gc.collect() + assert len(m._data) == 1 + assert m.get(b) == "b" + + del b + assert_empty_after_gc_collect(m._data) + + c = np.ones(42) + m.set(c, "c") + assert len(m._data) == 1 + assert m.get(c) == "c" + + with raises(KeyError): + m.get(np.ones(42)) + + del c + assert_empty_after_gc_collect(m._data) + + # Check that creating and dropping numpy arrays with potentially the same + # object id will not cause the map to get confused. + def get_set_get_collect(m, i): + a = np.ones(42) + with raises(KeyError): + m.get(a) + m.set(a, i) + assert m.get(a) == i + return id(a) + + unique_ids = set([get_set_get_collect(m, i) for i in range(1000)]) + if platform.python_implementation() == "CPython": + # On CPython (at least) the same id is often reused many times for the + # temporary arrays created under the local scope of the + # get_set_get_collect function without causing any spurious lookups / + # insertions in the map. Apparently on free-threaded Python, the id is + # not reused as often. + max_len_unique_ids = 400 if IS_GIL_DISABLED else 100 + assert len(unique_ids) < max_len_unique_ids + + +def test_weak_array_key_map_no_pickling(): + m = _WeakArrayKeyMap() + with raises(pickle.PicklingError): + pickle.dumps(m) + + +@with_numpy +@with_multiprocessing +def test_direct_mmap(tmpdir): + testfile = str(tmpdir.join("arr.dat")) + a = np.arange(10, dtype="uint8") + a.tofile(testfile) + + def _read_array(): + with open(testfile) as fd: + mm = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ, offset=0) + return np.ndarray((10,), dtype=np.uint8, buffer=mm, offset=0) + + def func(x): + return x**2 + + arr = _read_array() + + # this gives the reference result of the function with an array + ref = Parallel(n_jobs=2)(delayed(func)(x) for x in [a]) + + # now test that it works with the mmap array + results = Parallel(n_jobs=2)(delayed(func)(x) for x in [arr]) + np.testing.assert_array_equal(results, ref) + + # also test that a mmap array read in the subprocess is correctly returned + results = Parallel(n_jobs=2)(delayed(_read_array)() for _ in range(1)) + np.testing.assert_array_equal(results[0], arr) + + +@with_numpy +@with_multiprocessing +def test_parallel_memmap2d_as_memmap_1d_base(tmpdir): + # non-regression test for https://github.com/joblib/joblib/issues/1703, + # where 2D arrays backed by 1D memmap had un-wanted order changes. + testfile = str(tmpdir.join("arr2.dat")) + a = np.arange(10, dtype="uint8").reshape(5, 2) + a.tofile(testfile) + + def _read_array(): + mm = np.memmap(testfile) + return mm.reshape(5, 2) + + def func(x): + return x**2 + + arr = _read_array() + + # this gives the reference result of the function with an array + ref = Parallel(n_jobs=2)(delayed(func)(x) for x in [a]) + + # now test that it works with a view on a 1D mmap array + results = Parallel(n_jobs=2)(delayed(func)(x) for x in [arr]) + assert not results[0].flags["F_CONTIGUOUS"] + np.testing.assert_array_equal(results, ref) + + # also test that returned memmap arrays are correctly ordered + results = Parallel(n_jobs=2)(delayed(_read_array)() for _ in range(1)) + np.testing.assert_array_equal(results[0], a) diff --git a/lib/python3.10/site-packages/joblib/test/test_memory.py b/lib/python3.10/site-packages/joblib/test/test_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc8dcf8f29c3bc0da0c41a603cb4b77b45180ac --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_memory.py @@ -0,0 +1,1577 @@ +""" +Test the memory module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2009 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import datetime +import functools +import gc +import logging +import os +import os.path +import pathlib +import pickle +import shutil +import sys +import textwrap +import time + +import pytest + +from joblib._store_backends import FileSystemStoreBackend, StoreBackendBase +from joblib.hashing import hash +from joblib.memory import ( + _FUNCTION_HASHES, + _STORE_BACKENDS, + JobLibCollisionWarning, + MemorizedFunc, + MemorizedResult, + Memory, + NotMemorizedFunc, + NotMemorizedResult, + _build_func_identifier, + _store_backend_factory, + expires_after, + register_store_backend, +) +from joblib.parallel import Parallel, delayed +from joblib.test.common import np, with_multiprocessing, with_numpy +from joblib.testing import parametrize, raises, warns + + +############################################################################### +# Module-level variables for the tests +def f(x, y=1): + """A module-level function for testing purposes.""" + return x**2 + y + + +############################################################################### +# Helper function for the tests +def check_identity_lazy(func, accumulator, location): + """Given a function and an accumulator (a list that grows every + time the function is called), check that the function can be + decorated by memory to be a lazy identity. + """ + # Call each function with several arguments, and check that it is + # evaluated only once per argument. + memory = Memory(location=location, verbose=0) + func = memory.cache(func) + for i in range(3): + for _ in range(2): + assert func(i) == i + assert len(accumulator) == i + 1 + + +def corrupt_single_cache_item(memory): + (single_cache_item,) = memory.store_backend.get_items() + output_filename = os.path.join(single_cache_item.path, "output.pkl") + with open(output_filename, "w") as f: + f.write("garbage") + + +def monkeypatch_cached_func_warn(func, monkeypatch_fixture): + # Need monkeypatch because pytest does not + # capture stdlib logging output (see + # https://github.com/pytest-dev/pytest/issues/2079) + + recorded = [] + + def append_to_record(item): + recorded.append(item) + + monkeypatch_fixture.setattr(func, "warn", append_to_record) + return recorded + + +############################################################################### +# Tests +def test_memory_integration(tmpdir): + """Simple test of memory lazy evaluation.""" + accumulator = list() + + # Rmk: this function has the same name than a module-level function, + # thus it serves as a test to see that both are identified + # as different. + def f(arg): + accumulator.append(1) + return arg + + check_identity_lazy(f, accumulator, tmpdir.strpath) + + # Now test clearing + for compress in (False, True): + for mmap_mode in ("r", None): + memory = Memory( + location=tmpdir.strpath, + verbose=10, + mmap_mode=mmap_mode, + compress=compress, + ) + # First clear the cache directory, to check that our code can + # handle that + # NOTE: this line would raise an exception, as the database file is + # still open; we ignore the error since we want to test what + # happens if the directory disappears + shutil.rmtree(tmpdir.strpath, ignore_errors=True) + g = memory.cache(f) + g(1) + g.clear(warn=False) + current_accumulator = len(accumulator) + out = g(1) + + assert len(accumulator) == current_accumulator + 1 + # Also, check that Memory.eval works similarly + assert memory.eval(f, 1) == out + assert len(accumulator) == current_accumulator + 1 + + # Now do a smoke test with a function defined in __main__, as the name + # mangling rules are more complex + f.__module__ = "__main__" + memory = Memory(location=tmpdir.strpath, verbose=0) + memory.cache(f)(1) + + +@parametrize("call_before_reducing", [True, False]) +def test_parallel_call_cached_function_defined_in_jupyter(tmpdir, call_before_reducing): + # Calling an interactively defined memory.cache()'d function inside a + # Parallel call used to clear the existing cache related to the said + # function (https://github.com/joblib/joblib/issues/1035) + + # This tests checks that this is no longer the case. + + # TODO: test that the cache related to the function cache persists across + # ipython sessions (provided that no code change were made to the + # function's source)? + + # The first part of the test makes the necessary low-level calls to emulate + # the definition of a function in an jupyter notebook cell. Joblib has + # some custom code to treat functions defined specifically in jupyter + # notebooks/ipython session -- we want to test this code, which requires + # the emulation to be rigorous. + for session_no in [0, 1]: + ipython_cell_source = """ + def f(x): + return x + """ + + ipython_cell_id = "".format(session_no) + + my_locals = {} + exec( + compile( + textwrap.dedent(ipython_cell_source), + filename=ipython_cell_id, + mode="exec", + ), + # TODO when Python 3.11 is the minimum supported version, use + # locals=my_locals instead of passing globals and locals in the + # next two lines as positional arguments + None, + my_locals, + ) + f = my_locals["f"] + f.__module__ = "__main__" + + # Preliminary sanity checks, and tests checking that joblib properly + # identified f as an interactive function defined in a jupyter notebook + assert f(1) == 1 + assert f.__code__.co_filename == ipython_cell_id + + memory = Memory(location=tmpdir.strpath, verbose=0) + cached_f = memory.cache(f) + + assert len(os.listdir(tmpdir / "joblib")) == 1 + f_cache_relative_directory = os.listdir(tmpdir / "joblib")[0] + assert "ipython-input" in f_cache_relative_directory + + f_cache_directory = tmpdir / "joblib" / f_cache_relative_directory + + if session_no == 0: + # The cache should be empty as cached_f has not been called yet. + assert os.listdir(f_cache_directory) == ["f"] + assert os.listdir(f_cache_directory / "f") == [] + + if call_before_reducing: + cached_f(3) + # Two files were just created, func_code.py, and a folder + # containing the information (inputs hash/ouptput) of + # cached_f(3) + assert len(os.listdir(f_cache_directory / "f")) == 2 + + # Now, testing #1035: when calling a cached function, joblib + # used to dynamically inspect the underlying function to + # extract its source code (to verify it matches the source code + # of the function as last inspected by joblib) -- however, + # source code introspection fails for dynamic functions sent to + # child processes - which would eventually make joblib clear + # the cache associated to f + Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2]) + else: + # Submit the function to the joblib child processes, although + # the function has never been called in the parent yet. This + # triggers a specific code branch inside + # MemorizedFunc.__reduce__. + Parallel(n_jobs=2)(delayed(cached_f)(i) for i in [1, 2]) + # Ensure the child process has time to close the file. + # Wait up to 5 seconds for slow CI runs + for _ in range(25): + if len(os.listdir(f_cache_directory / "f")) == 3: + break + time.sleep(0.2) # pragma: no cover + assert len(os.listdir(f_cache_directory / "f")) == 3 + + cached_f(3) + + # Making sure f's cache does not get cleared after the parallel + # calls, and contains ALL cached functions calls (f(1), f(2), f(3)) + # and 'func_code.py' + assert len(os.listdir(f_cache_directory / "f")) == 4 + else: + # For the second session, there should be an already existing cache + assert len(os.listdir(f_cache_directory / "f")) == 4 + + cached_f(3) + + # The previous cache should not be invalidated after calling the + # function in a new session + assert len(os.listdir(f_cache_directory / "f")) == 4 + + +def test_no_memory(): + """Test memory with location=None: no memoize""" + accumulator = list() + + def ff(arg): + accumulator.append(1) + return arg + + memory = Memory(location=None, verbose=0) + gg = memory.cache(ff) + for _ in range(4): + current_accumulator = len(accumulator) + gg(1) + assert len(accumulator) == current_accumulator + 1 + + +def test_memory_kwarg(tmpdir): + "Test memory with a function with keyword arguments." + accumulator = list() + + def g(arg1=None, arg2=1): + accumulator.append(1) + return arg1 + + check_identity_lazy(g, accumulator, tmpdir.strpath) + + memory = Memory(location=tmpdir.strpath, verbose=0) + g = memory.cache(g) + # Smoke test with an explicit keyword argument: + assert g(arg1=30, arg2=2) == 30 + + +def test_memory_lambda(tmpdir): + "Test memory with a function with a lambda." + accumulator = list() + + def helper(x): + """A helper function to define l as a lambda.""" + accumulator.append(1) + return x + + check_identity_lazy(lambda x: helper(x), accumulator, tmpdir.strpath) + + +def test_memory_name_collision(tmpdir): + "Check that name collisions with functions will raise warnings" + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def name_collision(x): + """A first function called name_collision""" + return x + + a = name_collision + + @memory.cache + def name_collision(x): + """A second function called name_collision""" + return x + + b = name_collision + + with warns(JobLibCollisionWarning) as warninfo: + a(1) + b(1) + + assert len(warninfo) == 1 + assert "collision" in str(warninfo[0].message) + + +def test_memory_warning_lambda_collisions(tmpdir): + # Check that multiple use of lambda will raise collisions + memory = Memory(location=tmpdir.strpath, verbose=0) + a = memory.cache(lambda x: x) + b = memory.cache(lambda x: x + 1) + + with warns(JobLibCollisionWarning) as warninfo: + assert a(0) == 0 + assert b(1) == 2 + assert a(1) == 1 + + # In recent Python versions, we can retrieve the code of lambdas, + # thus nothing is raised + assert len(warninfo) == 4 + + +def test_memory_warning_collision_detection(tmpdir): + # Check that collisions impossible to detect will raise appropriate + # warnings. + memory = Memory(location=tmpdir.strpath, verbose=0) + a1 = eval("lambda x: x") + a1 = memory.cache(a1) + b1 = eval("lambda x: x+1") + b1 = memory.cache(b1) + + with warns(JobLibCollisionWarning) as warninfo: + a1(1) + b1(1) + a1(0) + + assert len(warninfo) == 2 + assert "cannot detect" in str(warninfo[0].message).lower() + + +def test_memory_partial(tmpdir): + "Test memory with functools.partial." + accumulator = list() + + def func(x, y): + """A helper function to define l as a lambda.""" + accumulator.append(1) + return y + + import functools + + function = functools.partial(func, 1) + + check_identity_lazy(function, accumulator, tmpdir.strpath) + + +def test_memory_eval(tmpdir): + "Smoke test memory with a function with a function defined in an eval." + memory = Memory(location=tmpdir.strpath, verbose=0) + + m = eval("lambda x: x") + mm = memory.cache(m) + + assert mm(1) == 1 + + +def count_and_append(x=[]): + """A function with a side effect in its arguments. + + Return the length of its argument and append one element. + """ + len_x = len(x) + x.append(None) + return len_x + + +def test_argument_change(tmpdir): + """Check that if a function has a side effect in its arguments, it + should use the hash of changing arguments. + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + func = memory.cache(count_and_append) + # call the function for the first time, is should cache it with + # argument x=[] + assert func() == 0 + # the second time the argument is x=[None], which is not cached + # yet, so the functions should be called a second time + assert func() == 1 + + +@with_numpy +@parametrize("mmap_mode", [None, "r"]) +def test_memory_numpy(tmpdir, mmap_mode): + "Test memory with a function with numpy arrays." + accumulator = list() + + def n(arg=None): + accumulator.append(1) + return arg + + memory = Memory(location=tmpdir.strpath, mmap_mode=mmap_mode, verbose=0) + cached_n = memory.cache(n) + + rnd = np.random.RandomState(0) + for i in range(3): + a = rnd.random_sample((10, 10)) + for _ in range(3): + assert np.all(cached_n(a) == a) + assert len(accumulator) == i + 1 + + +@with_numpy +def test_memory_numpy_check_mmap_mode(tmpdir, monkeypatch): + """Check that mmap_mode is respected even at the first call""" + + memory = Memory(location=tmpdir.strpath, mmap_mode="r", verbose=0) + + @memory.cache() + def twice(a): + return a * 2 + + a = np.ones(3) + + b = twice(a) + c = twice(a) + + assert isinstance(c, np.memmap) + assert c.mode == "r" + + assert isinstance(b, np.memmap) + assert b.mode == "r" + + # Corrupts the file, Deleting b and c mmaps + # is necessary to be able edit the file + del b + del c + gc.collect() + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch) + d = twice(a) + assert len(recorded_warnings) == 1 + exception_msg = "Exception while loading results" + assert exception_msg in recorded_warnings[0] + # Asserts that the recomputation returns a mmap + assert isinstance(d, np.memmap) + assert d.mode == "r" + + +def test_memory_exception(tmpdir): + """Smoketest the exception handling of Memory.""" + memory = Memory(location=tmpdir.strpath, verbose=0) + + class MyException(Exception): + pass + + @memory.cache + def h(exc=0): + if exc: + raise MyException + + # Call once, to initialise the cache + h() + + for _ in range(3): + # Call 3 times, to be sure that the Exception is always raised + with raises(MyException): + h(1) + + +def test_memory_ignore(tmpdir): + "Test the ignore feature of memory" + memory = Memory(location=tmpdir.strpath, verbose=0) + accumulator = list() + + @memory.cache(ignore=["y"]) + def z(x, y=1): + accumulator.append(1) + + assert z.ignore == ["y"] + + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=2) + assert len(accumulator) == 1 + + +def test_memory_ignore_decorated(tmpdir): + "Test the ignore feature of memory on a decorated function" + memory = Memory(location=tmpdir.strpath, verbose=0) + accumulator = list() + + def decorate(f): + @functools.wraps(f) + def wrapped(*args, **kwargs): + return f(*args, **kwargs) + + return wrapped + + @memory.cache(ignore=["y"]) + @decorate + def z(x, y=1): + accumulator.append(1) + + assert z.ignore == ["y"] + + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=1) + assert len(accumulator) == 1 + z(0, y=2) + assert len(accumulator) == 1 + + +def test_memory_args_as_kwargs(tmpdir): + """Non-regression test against 0.12.0 changes. + + https://github.com/joblib/joblib/pull/751 + """ + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def plus_one(a): + return a + 1 + + # It's possible to call a positional arg as a kwarg. + assert plus_one(1) == 2 + assert plus_one(a=1) == 2 + + # However, a positional argument that joblib hadn't seen + # before would cause a failure if it was passed as a kwarg. + assert plus_one(a=2) == 3 + + +@parametrize("ignore, verbose, mmap_mode", [(["x"], 100, "r"), ([], 10, None)]) +def test_partial_decoration(tmpdir, ignore, verbose, mmap_mode): + "Check cache may be called with kwargs before decorating" + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache(ignore=ignore, verbose=verbose, mmap_mode=mmap_mode) + def z(x): + pass + + assert z.ignore == ignore + assert z._verbose == verbose + assert z.mmap_mode == mmap_mode + + +def test_func_dir(tmpdir): + # Test the creation of the memory cache directory for the function. + memory = Memory(location=tmpdir.strpath, verbose=0) + path = __name__.split(".") + path.append("f") + path = tmpdir.join("joblib", *path).strpath + + g = memory.cache(f) + # Test that the function directory is created on demand + func_id = _build_func_identifier(f) + location = os.path.join(g.store_backend.location, func_id) + assert location == path + assert os.path.exists(path) + assert memory.location == os.path.dirname(g.store_backend.location) + + # Test that the code is stored. + # For the following test to be robust to previous execution, we clear + # the in-memory store + _FUNCTION_HASHES.clear() + assert not g._check_previous_func_code() + assert os.path.exists(os.path.join(path, "func_code.py")) + assert g._check_previous_func_code() + + # Test the robustness to failure of loading previous results. + args_id = g._get_args_id(1) + output_dir = os.path.join(g.store_backend.location, g.func_id, args_id) + a = g(1) + assert os.path.exists(output_dir) + os.remove(os.path.join(output_dir, "output.pkl")) + assert a == g(1) + + +def test_persistence(tmpdir): + # Test the memorized functions can be pickled and restored. + memory = Memory(location=tmpdir.strpath, verbose=0) + g = memory.cache(f) + output = g(1) + + h = pickle.loads(pickle.dumps(g)) + + args_id = h._get_args_id(1) + output_dir = os.path.join(h.store_backend.location, h.func_id, args_id) + assert os.path.exists(output_dir) + assert output == h.store_backend.load_item([h.func_id, args_id]) + memory2 = pickle.loads(pickle.dumps(memory)) + assert memory.store_backend.location == memory2.store_backend.location + + # Smoke test that pickling a memory with location=None works + memory = Memory(location=None, verbose=0) + pickle.loads(pickle.dumps(memory)) + g = memory.cache(f) + gp = pickle.loads(pickle.dumps(g)) + gp(1) + + +@pytest.mark.parametrize("consider_cache_valid", [True, False]) +def test_check_call_in_cache(tmpdir, consider_cache_valid): + for func in ( + MemorizedFunc( + f, tmpdir.strpath, cache_validation_callback=lambda _: consider_cache_valid + ), + Memory(location=tmpdir.strpath, verbose=0).cache( + f, cache_validation_callback=lambda _: consider_cache_valid + ), + ): + result = func.check_call_in_cache(2) + assert isinstance(result, bool) + assert not result + assert func(2) == 5 + result = func.check_call_in_cache(2) + assert isinstance(result, bool) + assert result == consider_cache_valid + func.clear() + + func = NotMemorizedFunc(f) + assert not func.check_call_in_cache(2) + + +def test_call_and_shelve(tmpdir): + # Test MemorizedFunc outputting a reference to cache. + + for func, Result in zip( + ( + MemorizedFunc(f, tmpdir.strpath), + NotMemorizedFunc(f), + Memory(location=tmpdir.strpath, verbose=0).cache(f), + Memory(location=None).cache(f), + ), + (MemorizedResult, NotMemorizedResult, MemorizedResult, NotMemorizedResult), + ): + assert func(2) == 5 + result = func.call_and_shelve(2) + assert isinstance(result, Result) + assert result.get() == 5 + + result.clear() + with raises(KeyError): + result.get() + result.clear() # Do nothing if there is no cache. + + +def test_call_and_shelve_lazily_load_stored_result(tmpdir): + """Check call_and_shelve only load stored data if needed.""" + test_access_time_file = tmpdir.join("test_access") + test_access_time_file.write("test_access") + test_access_time = os.stat(test_access_time_file.strpath).st_atime + # check file system access time stats resolution is lower than test wait + # timings. + time.sleep(0.5) + assert test_access_time_file.read() == "test_access" + + if test_access_time == os.stat(test_access_time_file.strpath).st_atime: + # Skip this test when access time cannot be retrieved with enough + # precision from the file system (e.g. NTFS on windows). + pytest.skip("filesystem does not support fine-grained access time attribute") + + memory = Memory(location=tmpdir.strpath, verbose=0) + func = memory.cache(f) + args_id = func._get_args_id(2) + result_path = os.path.join( + memory.store_backend.location, func.func_id, args_id, "output.pkl" + ) + assert func(2) == 5 + first_access_time = os.stat(result_path).st_atime + time.sleep(1) + + # Should not access the stored data + result = func.call_and_shelve(2) + assert isinstance(result, MemorizedResult) + assert os.stat(result_path).st_atime == first_access_time + time.sleep(1) + + # Read the stored data => last access time is greater than first_access + assert result.get() == 5 + assert os.stat(result_path).st_atime > first_access_time + + +def test_memorized_pickling(tmpdir): + for func in (MemorizedFunc(f, tmpdir.strpath), NotMemorizedFunc(f)): + filename = tmpdir.join("pickling_test.dat").strpath + result = func.call_and_shelve(2) + with open(filename, "wb") as fp: + pickle.dump(result, fp) + with open(filename, "rb") as fp: + result2 = pickle.load(fp) + assert result2.get() == result.get() + os.remove(filename) + + +def test_memorized_repr(tmpdir): + func = MemorizedFunc(f, tmpdir.strpath) + result = func.call_and_shelve(2) + + func2 = MemorizedFunc(f, tmpdir.strpath) + result2 = func2.call_and_shelve(2) + assert result.get() == result2.get() + assert repr(func) == repr(func2) + + # Smoke test with NotMemorizedFunc + func = NotMemorizedFunc(f) + repr(func) + repr(func.call_and_shelve(2)) + + # Smoke test for message output (increase code coverage) + func = MemorizedFunc(f, tmpdir.strpath, verbose=11, timestamp=time.time()) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=11) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=5, timestamp=time.time()) + result = func.call_and_shelve(11) + result.get() + + func = MemorizedFunc(f, tmpdir.strpath, verbose=5) + result = func.call_and_shelve(11) + result.get() + + +def test_memory_file_modification(capsys, tmpdir, monkeypatch): + # Test that modifying a Python file after loading it does not lead to + # Recomputation + dir_name = tmpdir.mkdir("tmp_import").strpath + filename = os.path.join(dir_name, "tmp_joblib_.py") + content = "def f(x):\n print(x)\n return x\n" + with open(filename, "w") as module_file: + module_file.write(content) + + # Load the module: + monkeypatch.syspath_prepend(dir_name) + import tmp_joblib_ as tmp + + memory = Memory(location=tmpdir.strpath, verbose=0) + f = memory.cache(tmp.f) + # First call f a few times + f(1) + f(2) + f(1) + + # Now modify the module where f is stored without modifying f + with open(filename, "w") as module_file: + module_file.write("\n\n" + content) + + # And call f a couple more times + f(1) + f(1) + + # Flush the .pyc files + shutil.rmtree(dir_name) + os.mkdir(dir_name) + # Now modify the module where f is stored, modifying f + content = 'def f(x):\n print("x=%s" % x)\n return x\n' + with open(filename, "w") as module_file: + module_file.write(content) + + # And call f more times prior to reloading: the cache should not be + # invalidated at this point as the active function definition has not + # changed in memory yet. + f(1) + f(1) + + # Now reload + sys.stdout.write("Reloading\n") + sys.modules.pop("tmp_joblib_") + import tmp_joblib_ as tmp + + f = memory.cache(tmp.f) + + # And call f more times + f(1) + f(1) + + out, err = capsys.readouterr() + assert out == "1\n2\nReloading\nx=1\n" + + +def _function_to_cache(a, b): + # Just a place holder function to be mutated by tests + pass + + +def _sum(a, b): + return a + b + + +def _product(a, b): + return a * b + + +def test_memory_in_memory_function_code_change(tmpdir): + _function_to_cache.__code__ = _sum.__code__ + + memory = Memory(location=tmpdir.strpath, verbose=0) + f = memory.cache(_function_to_cache) + + assert f(1, 2) == 3 + assert f(1, 2) == 3 + + with warns(JobLibCollisionWarning): + # Check that inline function modification triggers a cache invalidation + _function_to_cache.__code__ = _product.__code__ + assert f(1, 2) == 2 + assert f(1, 2) == 2 + + +def test_clear_memory_with_none_location(): + memory = Memory(location=None) + memory.clear() + + +def func_with_kwonly_args(a, b, *, kw1="kw1", kw2="kw2"): + return a, b, kw1, kw2 + + +def func_with_signature(a: int, b: float) -> float: + return a + b + + +def test_memory_func_with_kwonly_args(tmpdir): + memory = Memory(location=tmpdir.strpath, verbose=0) + func_cached = memory.cache(func_with_kwonly_args) + + assert func_cached(1, 2, kw1=3) == (1, 2, 3, "kw2") + + # Making sure that providing a keyword-only argument by + # position raises an exception + with raises(ValueError) as excinfo: + func_cached(1, 2, 3, kw2=4) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional parameter") + + # Keyword-only parameter passed by position with cached call + # should still raise ValueError + func_cached(1, 2, kw1=3, kw2=4) + + with raises(ValueError) as excinfo: + func_cached(1, 2, 3, kw2=4) + excinfo.match("Keyword-only parameter 'kw1' was passed as positional parameter") + + # Test 'ignore' parameter + func_cached = memory.cache(func_with_kwonly_args, ignore=["kw2"]) + assert func_cached(1, 2, kw1=3, kw2=4) == (1, 2, 3, 4) + assert func_cached(1, 2, kw1=3, kw2="ignored") == (1, 2, 3, 4) + + +def test_memory_func_with_signature(tmpdir): + memory = Memory(location=tmpdir.strpath, verbose=0) + func_cached = memory.cache(func_with_signature) + + assert func_cached(1, 2.0) == 3.0 + + +def _setup_toy_cache(tmpdir, num_inputs=10): + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache() + def get_1000_bytes(arg): + return "a" * 1000 + + inputs = list(range(num_inputs)) + for arg in inputs: + get_1000_bytes(arg) + + func_id = _build_func_identifier(get_1000_bytes) + hash_dirnames = [get_1000_bytes._get_args_id(arg) for arg in inputs] + + full_hashdirs = [ + os.path.join(get_1000_bytes.store_backend.location, func_id, dirname) + for dirname in hash_dirnames + ] + return memory, full_hashdirs, get_1000_bytes + + +def test__get_items(tmpdir): + memory, expected_hash_dirs, _ = _setup_toy_cache(tmpdir) + items = memory.store_backend.get_items() + hash_dirs = [ci.path for ci in items] + assert set(hash_dirs) == set(expected_hash_dirs) + + def get_files_size(directory): + full_paths = [os.path.join(directory, fn) for fn in os.listdir(directory)] + return sum(os.path.getsize(fp) for fp in full_paths) + + expected_hash_cache_sizes = [get_files_size(hash_dir) for hash_dir in hash_dirs] + hash_cache_sizes = [ci.size for ci in items] + assert hash_cache_sizes == expected_hash_cache_sizes + + output_filenames = [os.path.join(hash_dir, "output.pkl") for hash_dir in hash_dirs] + + expected_last_accesses = [ + datetime.datetime.fromtimestamp(os.path.getatime(fn)) for fn in output_filenames + ] + last_accesses = [ci.last_access for ci in items] + assert last_accesses == expected_last_accesses + + +def test__get_items_to_delete(tmpdir): + # test empty cache + memory, _, _ = _setup_toy_cache(tmpdir, num_inputs=0) + items_to_delete = memory.store_backend._get_items_to_delete("1K") + assert items_to_delete == [] + + memory, expected_hash_cachedirs, _ = _setup_toy_cache(tmpdir) + items = memory.store_backend.get_items() + # bytes_limit set to keep only one cache item (each hash cache + # folder is about 1000 bytes + metadata) + items_to_delete = memory.store_backend._get_items_to_delete("2K") + nb_hashes = len(expected_hash_cachedirs) + assert set.issubset(set(items_to_delete), set(items)) + assert len(items_to_delete) == nb_hashes - 1 + + # Sanity check bytes_limit=2048 is the same as bytes_limit='2K' + items_to_delete_2048b = memory.store_backend._get_items_to_delete(2048) + assert sorted(items_to_delete) == sorted(items_to_delete_2048b) + + # bytes_limit greater than the size of the cache + items_to_delete_empty = memory.store_backend._get_items_to_delete("1M") + assert items_to_delete_empty == [] + + # All the cache items need to be deleted + bytes_limit_too_small = 500 + items_to_delete_500b = memory.store_backend._get_items_to_delete( + bytes_limit_too_small + ) + assert set(items_to_delete_500b), set(items) + + # Test LRU property: surviving cache items should all have a more + # recent last_access that the ones that have been deleted + items_to_delete_6000b = memory.store_backend._get_items_to_delete(6000) + surviving_items = set(items).difference(items_to_delete_6000b) + + assert max(ci.last_access for ci in items_to_delete_6000b) <= min( + ci.last_access for ci in surviving_items + ) + + +def test_memory_reduce_size_bytes_limit(tmpdir): + memory, _, _ = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default memory.bytes_limit is None and reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if bytes_limit greater than the size of + # the cache + memory.reduce_size(bytes_limit="1M") + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # bytes_limit is set so that only two cache items are kept + memory.reduce_size(bytes_limit="3K") + cache_items = memory.store_backend.get_items() + assert set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # bytes_limit set so that no cache item is kept + bytes_limit_too_small = 500 + memory.reduce_size(bytes_limit=bytes_limit_too_small) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_reduce_size_items_limit(tmpdir): + memory, _, _ = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if items_limit greater than the size of + # the cache + memory.reduce_size(items_limit=10) + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # items_limit is set so that only two cache items are kept + memory.reduce_size(items_limit=2) + cache_items = memory.store_backend.get_items() + assert set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # item_limit set so that no cache item is kept + memory.reduce_size(items_limit=0) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_reduce_size_age_limit(tmpdir): + import datetime + import time + + memory, _, put_cache = _setup_toy_cache(tmpdir) + ref_cache_items = memory.store_backend.get_items() + + # By default reduce_size is a noop + memory.reduce_size() + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # No cache items deleted if age_limit big. + memory.reduce_size(age_limit=datetime.timedelta(days=1)) + cache_items = memory.store_backend.get_items() + assert sorted(ref_cache_items) == sorted(cache_items) + + # age_limit is set so that only two cache items are kept + time.sleep(1) + put_cache(-1) + put_cache(-2) + memory.reduce_size(age_limit=datetime.timedelta(seconds=1)) + cache_items = memory.store_backend.get_items() + assert not set.issubset(set(cache_items), set(ref_cache_items)) + assert len(cache_items) == 2 + + # ensure age_limit is forced to be positive + with pytest.raises(ValueError, match="has to be a positive"): + memory.reduce_size(age_limit=datetime.timedelta(seconds=-1)) + + # age_limit set so that no cache item is kept + time.sleep(0.001) # make sure the age is different + memory.reduce_size(age_limit=datetime.timedelta(seconds=0)) + cache_items = memory.store_backend.get_items() + assert cache_items == [] + + +def test_memory_clear(tmpdir): + memory, _, g = _setup_toy_cache(tmpdir) + memory.clear() + + assert os.listdir(memory.store_backend.location) == [] + + # Check that the cache for functions hash is also reset. + assert not g._check_previous_func_code(stacklevel=4) + + +def fast_func_with_complex_output(): + complex_obj = ["a" * 1000] * 1000 + return complex_obj + + +def fast_func_with_conditional_complex_output(complex_output=True): + complex_obj = {str(i): i for i in range(int(1e5))} + return complex_obj if complex_output else "simple output" + + +@with_multiprocessing +def test_cached_function_race_condition_when_persisting_output(tmpdir, capfd): + # Test race condition where multiple processes are writing into + # the same output.pkl. See + # https://github.com/joblib/joblib/issues/490 for more details. + memory = Memory(location=tmpdir.strpath) + func_cached = memory.cache(fast_func_with_complex_output) + + Parallel(n_jobs=2)(delayed(func_cached)() for i in range(3)) + + stdout, stderr = capfd.readouterr() + + # Checking both stdout and stderr (ongoing PR #434 may change + # logging destination) to make sure there is no exception while + # loading the results + exception_msg = "Exception while loading results" + assert exception_msg not in stdout + assert exception_msg not in stderr + + +@with_multiprocessing +def test_cached_function_race_condition_when_persisting_output_2(tmpdir, capfd): + # Test race condition in first attempt at solving + # https://github.com/joblib/joblib/issues/490. The race condition + # was due to the delay between seeing the cache directory created + # (interpreted as the result being cached) and the output.pkl being + # pickled. + memory = Memory(location=tmpdir.strpath) + func_cached = memory.cache(fast_func_with_conditional_complex_output) + + Parallel(n_jobs=2)( + delayed(func_cached)(True if i % 2 == 0 else False) for i in range(3) + ) + + stdout, stderr = capfd.readouterr() + + # Checking both stdout and stderr (ongoing PR #434 may change + # logging destination) to make sure there is no exception while + # loading the results + exception_msg = "Exception while loading results" + assert exception_msg not in stdout + assert exception_msg not in stderr + + +def test_memory_recomputes_after_an_error_while_loading_results(tmpdir, monkeypatch): + memory = Memory(location=tmpdir.strpath) + + def func(arg): + # This makes sure that the timestamp returned by two calls of + # func are different. This is needed on Windows where + # time.time resolution may not be accurate enough + time.sleep(0.01) + return arg, time.time() + + cached_func = memory.cache(func) + input_arg = "arg" + arg, timestamp = cached_func(input_arg) + + # Make sure the function is correctly cached + assert arg == input_arg + + # Corrupting output.pkl to make sure that an error happens when + # loading the cached result + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(cached_func, monkeypatch) + recomputed_arg, recomputed_timestamp = cached_func(arg) + assert len(recorded_warnings) == 1 + exception_msg = "Exception while loading results" + assert exception_msg in recorded_warnings[0] + assert recomputed_arg == arg + assert recomputed_timestamp > timestamp + + # Corrupting output.pkl to make sure that an error happens when + # loading the cached result + corrupt_single_cache_item(memory) + reference = cached_func.call_and_shelve(arg) + try: + reference.get() + raise AssertionError( + "It normally not possible to load a corrupted MemorizedResult" + ) + except KeyError as e: + message = "is corrupted" + assert message in str(e.args) + + +class IncompleteStoreBackend(StoreBackendBase): + """This backend cannot be instantiated and should raise a TypeError.""" + + pass + + +class DummyStoreBackend(StoreBackendBase): + """A dummy store backend that does nothing.""" + + def _open_item(self, *args, **kwargs): + """Open an item on store.""" + "Does nothing" + + def _item_exists(self, location): + """Check if an item location exists.""" + "Does nothing" + + def _move_item(self, src, dst): + """Move an item from src to dst in store.""" + "Does nothing" + + def create_location(self, location): + """Create location on store.""" + "Does nothing" + + def exists(self, obj): + """Check if an object exists in the store""" + return False + + def clear_location(self, obj): + """Clear object on store""" + "Does nothing" + + def get_items(self): + """Returns the whole list of items available in cache.""" + return [] + + def configure(self, location, *args, **kwargs): + """Configure the store""" + "Does nothing" + + +@parametrize("invalid_prefix", [None, dict(), list()]) +def test_register_invalid_store_backends_key(invalid_prefix): + # verify the right exceptions are raised when passing a wrong backend key. + with raises(ValueError) as excinfo: + register_store_backend(invalid_prefix, None) + excinfo.match(r"Store backend name should be a string*") + + +def test_register_invalid_store_backends_object(): + # verify the right exceptions are raised when passing a wrong backend + # object. + with raises(ValueError) as excinfo: + register_store_backend("fs", None) + excinfo.match(r"Store backend should inherit StoreBackendBase*") + + +def test_memory_default_store_backend(): + # test an unknown backend falls back into a FileSystemStoreBackend + with raises(TypeError) as excinfo: + Memory(location="/tmp/joblib", backend="unknown") + excinfo.match(r"Unknown location*") + + +def test_warning_on_unknown_location_type(): + class NonSupportedLocationClass: + pass + + unsupported_location = NonSupportedLocationClass() + + with warns(UserWarning) as warninfo: + _store_backend_factory("local", location=unsupported_location) + + expected_mesage = ( + "Instantiating a backend using a " + "NonSupportedLocationClass as a location is not " + "supported by joblib" + ) + assert expected_mesage in str(warninfo[0].message) + + +def test_instanciate_incomplete_store_backend(): + # Verify that registering an external incomplete store backend raises an + # exception when one tries to instantiate it. + backend_name = "isb" + register_store_backend(backend_name, IncompleteStoreBackend) + assert (backend_name, IncompleteStoreBackend) in _STORE_BACKENDS.items() + with raises(TypeError) as excinfo: + _store_backend_factory(backend_name, "fake_location") + excinfo.match( + r"Can't instantiate abstract class IncompleteStoreBackend " + "(without an implementation for|with) abstract methods*" + ) + + +def test_dummy_store_backend(): + # Verify that registering an external store backend works. + + backend_name = "dsb" + register_store_backend(backend_name, DummyStoreBackend) + assert (backend_name, DummyStoreBackend) in _STORE_BACKENDS.items() + + backend_obj = _store_backend_factory(backend_name, "dummy_location") + assert isinstance(backend_obj, DummyStoreBackend) + + +def test_instanciate_store_backend_with_pathlib_path(): + # Instantiate a FileSystemStoreBackend using a pathlib.Path object + path = pathlib.Path("some_folder") + backend_obj = _store_backend_factory("local", path) + try: + assert backend_obj.location == "some_folder" + finally: # remove cache folder after test + shutil.rmtree("some_folder", ignore_errors=True) + + +def test_filesystem_store_backend_repr(tmpdir): + # Verify string representation of a filesystem store backend. + + repr_pattern = 'FileSystemStoreBackend(location="{location}")' + backend = FileSystemStoreBackend() + assert backend.location is None + + repr(backend) # Should not raise an exception + + assert str(backend) == repr_pattern.format(location=None) + + # backend location is passed explicitly via the configure method (called + # by the internal _store_backend_factory function) + backend.configure(tmpdir.strpath) + + assert str(backend) == repr_pattern.format(location=tmpdir.strpath) + + repr(backend) # Should not raise an exception + + +def test_memory_objects_repr(tmpdir): + # Verify printable reprs of MemorizedResult, MemorizedFunc and Memory. + + def my_func(a, b): + return a + b + + memory = Memory(location=tmpdir.strpath, verbose=0) + memorized_func = memory.cache(my_func) + + memorized_func_repr = "MemorizedFunc(func={func}, location={location})" + + assert str(memorized_func) == memorized_func_repr.format( + func=my_func, location=memory.store_backend.location + ) + + memorized_result = memorized_func.call_and_shelve(42, 42) + + memorized_result_repr = ( + 'MemorizedResult(location="{location}", func="{func}", args_id="{args_id}")' + ) + + assert str(memorized_result) == memorized_result_repr.format( + location=memory.store_backend.location, + func=memorized_result.func_id, + args_id=memorized_result.args_id, + ) + + assert str(memory) == "Memory(location={location})".format( + location=memory.store_backend.location + ) + + +def test_memorized_result_pickle(tmpdir): + # Verify a MemoryResult object can be pickled/depickled. Non regression + # test introduced following issue + # https://github.com/joblib/joblib/issues/747 + + memory = Memory(location=tmpdir.strpath) + + @memory.cache + def g(x): + return x**2 + + memorized_result = g.call_and_shelve(4) + memorized_result_pickle = pickle.dumps(memorized_result) + memorized_result_loads = pickle.loads(memorized_result_pickle) + + assert ( + memorized_result.store_backend.location + == memorized_result_loads.store_backend.location + ) + assert memorized_result.func == memorized_result_loads.func + assert memorized_result.args_id == memorized_result_loads.args_id + assert str(memorized_result) == str(memorized_result_loads) + + +def compare(left, right, ignored_attrs=None): + if ignored_attrs is None: + ignored_attrs = [] + + left_vars = vars(left) + right_vars = vars(right) + assert set(left_vars.keys()) == set(right_vars.keys()) + for attr in left_vars.keys(): + if attr in ignored_attrs: + continue + assert left_vars[attr] == right_vars[attr] + + +@pytest.mark.parametrize( + "memory_kwargs", + [ + {"compress": 3, "verbose": 2}, + {"mmap_mode": "r", "verbose": 5, "backend_options": {"parameter": "unused"}}, + ], +) +def test_memory_pickle_dump_load(tmpdir, memory_kwargs): + memory = Memory(location=tmpdir.strpath, **memory_kwargs) + + memory_reloaded = pickle.loads(pickle.dumps(memory)) + + # Compare Memory instance before and after pickle roundtrip + compare(memory.store_backend, memory_reloaded.store_backend) + compare( + memory, + memory_reloaded, + ignored_attrs=set(["store_backend", "timestamp", "_func_code_id"]), + ) + assert hash(memory) == hash(memory_reloaded) + + func_cached = memory.cache(f) + + func_cached_reloaded = pickle.loads(pickle.dumps(func_cached)) + + # Compare MemorizedFunc instance before/after pickle roundtrip + compare(func_cached.store_backend, func_cached_reloaded.store_backend) + compare( + func_cached, + func_cached_reloaded, + ignored_attrs=set(["store_backend", "timestamp", "_func_code_id"]), + ) + assert hash(func_cached) == hash(func_cached_reloaded) + + # Compare MemorizedResult instance before/after pickle roundtrip + memorized_result = func_cached.call_and_shelve(1) + memorized_result_reloaded = pickle.loads(pickle.dumps(memorized_result)) + + compare(memorized_result.store_backend, memorized_result_reloaded.store_backend) + compare( + memorized_result, + memorized_result_reloaded, + ignored_attrs=set(["store_backend", "timestamp", "_func_code_id"]), + ) + assert hash(memorized_result) == hash(memorized_result_reloaded) + + +def test_info_log(tmpdir, caplog): + caplog.set_level(logging.INFO) + x = 3 + + memory = Memory(location=tmpdir.strpath, verbose=20) + + @memory.cache + def f(x): + return x**2 + + _ = f(x) + assert "Querying" in caplog.text + caplog.clear() + + memory = Memory(location=tmpdir.strpath, verbose=0) + + @memory.cache + def f(x): + return x**2 + + _ = f(x) + assert "Querying" not in caplog.text + caplog.clear() + + +class TestCacheValidationCallback: + "Tests on parameter `cache_validation_callback`" + + def foo(self, x, d, delay=None): + d["run"] = True + if delay is not None: + time.sleep(delay) + return x * 2 + + def test_invalid_cache_validation_callback(self, memory): + "Test invalid values for `cache_validation_callback" + match = "cache_validation_callback needs to be callable. Got True." + with pytest.raises(ValueError, match=match): + memory.cache(cache_validation_callback=True) + + @pytest.mark.parametrize("consider_cache_valid", [True, False]) + def test_constant_cache_validation_callback(self, memory, consider_cache_valid): + "Test expiry of old results" + f = memory.cache( + self.foo, + cache_validation_callback=lambda _: consider_cache_valid, + ignore=["d"], + ) + + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1) == 4 + assert f(2, d2) == 4 + + assert d1["run"] + assert d2["run"] != consider_cache_valid + + def test_memory_only_cache_long_run(self, memory): + "Test cache validity based on run duration." + + def cache_validation_callback(metadata): + duration = metadata["duration"] + if duration > 0.1: + return True + + f = memory.cache( + self.foo, cache_validation_callback=cache_validation_callback, ignore=["d"] + ) + + # Short run are not cached + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1, delay=0) == 4 + assert f(2, d2, delay=0) == 4 + assert d1["run"] + assert d2["run"] + + # Longer run are cached + d1, d2 = {"run": False}, {"run": False} + assert f(2, d1, delay=0.2) == 4 + assert f(2, d2, delay=0.2) == 4 + assert d1["run"] + assert not d2["run"] + + def test_memory_expires_after(self, memory): + "Test expiry of old cached results" + + f = memory.cache( + self.foo, cache_validation_callback=expires_after(seconds=0.3), ignore=["d"] + ) + + d1, d2, d3 = {"run": False}, {"run": False}, {"run": False} + assert f(2, d1) == 4 + assert f(2, d2) == 4 + time.sleep(0.5) + assert f(2, d3) == 4 + + assert d1["run"] + assert not d2["run"] + assert d3["run"] + + +class TestMemorizedFunc: + "Tests for the MemorizedFunc and NotMemorizedFunc classes" + + @staticmethod + def f(x, counter): + counter[x] = counter.get(x, 0) + 1 + return counter[x] + + def test_call_method_memorized(self, memory): + "Test calling the function" + + f = memory.cache(self.f, ignore=["counter"]) + + counter = {} + assert f(2, counter) == 1 + assert f(2, counter) == 1 + + x, meta = f.call(2, counter) + assert x == 2, "f has not been called properly" + assert isinstance(meta, dict), ( + "Metadata are not returned by MemorizedFunc.call." + ) + + def test_call_method_not_memorized(self, memory): + "Test calling the function" + + f = NotMemorizedFunc(self.f) + + counter = {} + assert f(2, counter) == 1 + assert f(2, counter) == 2 + + x, meta = f.call(2, counter) + assert x == 3, "f has not been called properly" + assert isinstance(meta, dict), ( + "Metadata are not returned by MemorizedFunc.call." + ) + + +@with_numpy +@parametrize( + "location", + [ + "test_cache_dir", + pathlib.Path("test_cache_dir"), + pathlib.Path("test_cache_dir").resolve(), + ], +) +def test_memory_creates_gitignore(location): + """Test that using the memory object automatically creates a `.gitignore` file + within the new cache directory.""" + + mem = Memory(location) + arr = np.asarray([[1, 2, 3], [4, 5, 6]]) + costly_operation = mem.cache(np.square) + costly_operation(arr) + + location = pathlib.Path(location) + + try: + path_to_gitignore_file = os.path.join(location, ".gitignore") + gitignore_file_content = "# Created by joblib automatically.\n*\n" + with open(path_to_gitignore_file) as f: + assert gitignore_file_content == f.read() + + finally: # remove cache folder after test + shutil.rmtree(location, ignore_errors=True) diff --git a/lib/python3.10/site-packages/joblib/test/test_memory_async.py b/lib/python3.10/site-packages/joblib/test/test_memory_async.py new file mode 100644 index 0000000000000000000000000000000000000000..a22f3066985cbd12b87b9ffa9b7f2bb85e04a3bc --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_memory_async.py @@ -0,0 +1,180 @@ +import asyncio +import gc +import shutil + +import pytest + +from joblib.memory import ( + AsyncMemorizedFunc, + AsyncNotMemorizedFunc, + MemorizedResult, + Memory, + NotMemorizedResult, +) +from joblib.test.common import np, with_numpy +from joblib.testing import raises + +from .test_memory import corrupt_single_cache_item, monkeypatch_cached_func_warn + + +async def check_identity_lazy_async(func, accumulator, location): + """Similar to check_identity_lazy_async for coroutine functions""" + memory = Memory(location=location, verbose=0) + func = memory.cache(func) + for i in range(3): + for _ in range(2): + value = await func(i) + assert value == i + assert len(accumulator) == i + 1 + + +@pytest.mark.asyncio +async def test_memory_integration_async(tmpdir): + accumulator = list() + + async def f(n): + await asyncio.sleep(0.1) + accumulator.append(1) + return n + + await check_identity_lazy_async(f, accumulator, tmpdir.strpath) + + # Now test clearing + for compress in (False, True): + for mmap_mode in ("r", None): + memory = Memory( + location=tmpdir.strpath, + verbose=10, + mmap_mode=mmap_mode, + compress=compress, + ) + # First clear the cache directory, to check that our code can + # handle that + # NOTE: this line would raise an exception, as the database + # file is still open; we ignore the error since we want to + # test what happens if the directory disappears + shutil.rmtree(tmpdir.strpath, ignore_errors=True) + g = memory.cache(f) + await g(1) + g.clear(warn=False) + current_accumulator = len(accumulator) + out = await g(1) + + assert len(accumulator) == current_accumulator + 1 + # Also, check that Memory.eval works similarly + evaled = await memory.eval(f, 1) + assert evaled == out + assert len(accumulator) == current_accumulator + 1 + + # Now do a smoke test with a function defined in __main__, as the name + # mangling rules are more complex + f.__module__ = "__main__" + memory = Memory(location=tmpdir.strpath, verbose=0) + await memory.cache(f)(1) + + +@pytest.mark.asyncio +async def test_no_memory_async(): + accumulator = list() + + async def ff(x): + await asyncio.sleep(0.1) + accumulator.append(1) + return x + + memory = Memory(location=None, verbose=0) + gg = memory.cache(ff) + for _ in range(4): + current_accumulator = len(accumulator) + await gg(1) + assert len(accumulator) == current_accumulator + 1 + + +@with_numpy +@pytest.mark.asyncio +async def test_memory_numpy_check_mmap_mode_async(tmpdir, monkeypatch): + """Check that mmap_mode is respected even at the first call""" + + memory = Memory(location=tmpdir.strpath, mmap_mode="r", verbose=0) + + @memory.cache() + async def twice(a): + return a * 2 + + a = np.ones(3) + b = await twice(a) + c = await twice(a) + + assert isinstance(c, np.memmap) + assert c.mode == "r" + + assert isinstance(b, np.memmap) + assert b.mode == "r" + + # Corrupts the file, Deleting b and c mmaps + # is necessary to be able edit the file + del b + del c + gc.collect() + corrupt_single_cache_item(memory) + + # Make sure that corrupting the file causes recomputation and that + # a warning is issued. + recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch) + d = await twice(a) + assert len(recorded_warnings) == 1 + exception_msg = "Exception while loading results" + assert exception_msg in recorded_warnings[0] + # Asserts that the recomputation returns a mmap + assert isinstance(d, np.memmap) + assert d.mode == "r" + + +@pytest.mark.asyncio +async def test_call_and_shelve_async(tmpdir): + async def f(x, y=1): + await asyncio.sleep(0.1) + return x**2 + y + + # Test MemorizedFunc outputting a reference to cache. + for func, Result in zip( + ( + AsyncMemorizedFunc(f, tmpdir.strpath), + AsyncNotMemorizedFunc(f), + Memory(location=tmpdir.strpath, verbose=0).cache(f), + Memory(location=None).cache(f), + ), + ( + MemorizedResult, + NotMemorizedResult, + MemorizedResult, + NotMemorizedResult, + ), + ): + for _ in range(2): + result = await func.call_and_shelve(2) + assert isinstance(result, Result) + assert result.get() == 5 + + result.clear() + with raises(KeyError): + result.get() + result.clear() # Do nothing if there is no cache. + + +@pytest.mark.asyncio +async def test_memorized_func_call_async(memory): + async def ff(x, counter): + await asyncio.sleep(0.1) + counter[x] = counter.get(x, 0) + 1 + return counter[x] + + gg = memory.cache(ff, ignore=["counter"]) + + counter = {} + assert await gg(2, counter) == 1 + assert await gg(2, counter) == 1 + + x, meta = await gg.call(2, counter) + assert x == 2, "f has not been called properly" + assert isinstance(meta, dict), "Metadata are not returned by MemorizedFunc.call." diff --git a/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py b/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..55395c97feb64110e01631e9591d113a092ce992 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_missing_multiprocessing.py @@ -0,0 +1,36 @@ +""" +Pyodide and other single-threaded Python builds will be missing the +_multiprocessing module. Test that joblib still works in this environment. +""" + +import os +import subprocess +import sys + + +def test_missing_multiprocessing(tmp_path): + """ + Test that import joblib works even if _multiprocessing is missing. + + pytest has already imported everything from joblib. The most reasonable way + to test importing joblib with modified environment is to invoke a separate + Python process. This also ensures that we don't break other tests by + importing a bad `_multiprocessing` module. + """ + (tmp_path / "_multiprocessing.py").write_text( + 'raise ImportError("No _multiprocessing module!")' + ) + env = dict(os.environ) + # For subprocess, use current sys.path with our custom version of + # multiprocessing inserted. + env["PYTHONPATH"] = ":".join([str(tmp_path)] + sys.path) + subprocess.check_call( + [ + sys.executable, + "-c", + "import joblib, math; " + "joblib.Parallel(n_jobs=1)(" + "joblib.delayed(math.sqrt)(i**2) for i in range(10))", + ], + env=env, + ) diff --git a/lib/python3.10/site-packages/joblib/test/test_module.py b/lib/python3.10/site-packages/joblib/test/test_module.py new file mode 100644 index 0000000000000000000000000000000000000000..66863e2d3165e783d2e8085e4b52e67b5409df95 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_module.py @@ -0,0 +1,55 @@ +import sys + +import joblib +from joblib.test.common import with_multiprocessing +from joblib.testing import check_subprocess_call + + +def test_version(): + assert hasattr(joblib, "__version__"), ( + "There are no __version__ argument on the joblib module" + ) + + +@with_multiprocessing +def test_no_start_method_side_effect_on_import(): + # check that importing joblib does not implicitly set the global + # start_method for multiprocessing. + code = """if True: + import joblib + import multiprocessing as mp + # The following line would raise RuntimeError if the + # start_method is already set. + mp.set_start_method("loky") + """ + check_subprocess_call([sys.executable, "-c", code]) + + +@with_multiprocessing +def test_no_semaphore_tracker_on_import(): + # check that importing joblib does not implicitly spawn a resource tracker + # or a semaphore tracker + code = """if True: + import joblib + from multiprocessing import semaphore_tracker + # The following line would raise RuntimeError if the + # start_method is already set. + msg = "multiprocessing.semaphore_tracker has been spawned on import" + assert semaphore_tracker._semaphore_tracker._fd is None, msg""" + if sys.version_info >= (3, 8): + # semaphore_tracker was renamed in Python 3.8: + code = code.replace("semaphore_tracker", "resource_tracker") + check_subprocess_call([sys.executable, "-c", code]) + + +@with_multiprocessing +def test_no_resource_tracker_on_import(): + code = """if True: + import joblib + from joblib.externals.loky.backend import resource_tracker + # The following line would raise RuntimeError if the + # start_method is already set. + msg = "loky.resource_tracker has been spawned on import" + assert resource_tracker._resource_tracker._fd is None, msg + """ + check_subprocess_call([sys.executable, "-c", code]) diff --git a/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py b/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..ed320497b42eec7a5d1c09c530ff33b28d944653 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_numpy_pickle.py @@ -0,0 +1,1225 @@ +"""Test the numpy pickler as a replacement of the standard pickler.""" + +import bz2 +import copy +import gzip +import io +import mmap +import os +import pickle +import random +import re +import socket +import sys +import warnings +import zlib +from contextlib import closing +from pathlib import Path + +try: + import lzma +except ImportError: + lzma = None + +import pytest + +# numpy_pickle is not a drop-in replacement of pickle, as it takes +# filenames instead of open files as arguments. +from joblib import numpy_pickle, register_compressor +from joblib.compressor import ( + _COMPRESSORS, + _LZ4_PREFIX, + LZ4_NOT_INSTALLED_ERROR, + BinaryZlibFile, + CompressorWrapper, +) +from joblib.numpy_pickle_utils import ( + _IO_BUFFER_SIZE, + _detect_compressor, + _ensure_native_byte_order, + _is_numpy_array_byte_order_mismatch, +) +from joblib.test import data +from joblib.test.common import ( + memory_used, + np, + with_lz4, + with_memory_profiler, + with_numpy, + without_lz4, +) +from joblib.testing import parametrize, raises, warns + +############################################################################### +# Define a list of standard types. +# Borrowed from dill, initial author: Micheal McKerns: +# http://dev.danse.us/trac/pathos/browser/dill/dill_test2.py + +typelist = [] + +# testing types +_none = None +typelist.append(_none) +_type = type +typelist.append(_type) +_bool = bool(1) +typelist.append(_bool) +_int = int(1) +typelist.append(_int) +_float = float(1) +typelist.append(_float) +_complex = complex(1) +typelist.append(_complex) +_string = str(1) +typelist.append(_string) +_tuple = () +typelist.append(_tuple) +_list = [] +typelist.append(_list) +_dict = {} +typelist.append(_dict) +_builtin = len +typelist.append(_builtin) + + +def _function(x): + yield x + + +class _class: + def _method(self): + pass + + +class _newclass(object): + def _method(self): + pass + + +typelist.append(_function) +typelist.append(_class) +typelist.append(_newclass) # +_instance = _class() +typelist.append(_instance) +_object = _newclass() +typelist.append(_object) # + + +############################################################################### +# Tests + + +@parametrize("compress", [0, 1]) +@parametrize("member", typelist) +def test_standard_types(tmpdir, compress, member): + # Test pickling and saving with standard types. + filename = tmpdir.join("test.pkl").strpath + numpy_pickle.dump(member, filename, compress=compress) + _member = numpy_pickle.load(filename) + # We compare the pickled instance to the reloaded one only if it + # can be compared to a copied one + if member == copy.deepcopy(member): + assert member == _member + + +def test_value_error(): + # Test inverting the input arguments to dump + with raises(ValueError): + numpy_pickle.dump("foo", dict()) + + +@parametrize("wrong_compress", [-1, 10, dict()]) +def test_compress_level_error(wrong_compress): + # Verify that passing an invalid compress argument raises an error. + exception_msg = 'Non valid compress level given: "{0}"'.format(wrong_compress) + with raises(ValueError) as excinfo: + numpy_pickle.dump("dummy", "foo", compress=wrong_compress) + excinfo.match(exception_msg) + + +@with_numpy +@parametrize("compress", [False, True, 0, 3, "zlib"]) +def test_numpy_persistence(tmpdir, compress): + filename = tmpdir.join("test.pkl").strpath + rnd = np.random.RandomState(0) + a = rnd.random_sample((10, 2)) + # We use 'a.T' to have a non C-contiguous array. + for index, obj in enumerate(((a,), (a.T,), (a, a), [a, a, a])): + filenames = numpy_pickle.dump(obj, filename, compress=compress) + + # All is cached in one file + assert len(filenames) == 1 + # Check that only one file was created + assert filenames[0] == filename + # Check that this file does exist + assert os.path.exists(filenames[0]) + + # Unpickle the object + obj_ = numpy_pickle.load(filename) + # Check that the items are indeed arrays + for item in obj_: + assert isinstance(item, np.ndarray) + # And finally, check that all the values are equal. + np.testing.assert_array_equal(np.array(obj), np.array(obj_)) + + # Now test with an array subclass + obj = np.memmap(filename + "mmap", mode="w+", shape=4, dtype=np.float64) + filenames = numpy_pickle.dump(obj, filename, compress=compress) + # All is cached in one file + assert len(filenames) == 1 + + obj_ = numpy_pickle.load(filename) + if type(obj) is not np.memmap and hasattr(obj, "__array_prepare__"): + # We don't reconstruct memmaps + assert isinstance(obj_, type(obj)) + + np.testing.assert_array_equal(obj_, obj) + + # Test with an object containing multiple numpy arrays + obj = ComplexTestObject() + filenames = numpy_pickle.dump(obj, filename, compress=compress) + # All is cached in one file + assert len(filenames) == 1 + + obj_loaded = numpy_pickle.load(filename) + assert isinstance(obj_loaded, type(obj)) + np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float) + np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int) + np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj) + + +@with_numpy +def test_numpy_persistence_bufferred_array_compression(tmpdir): + big_array = np.ones((_IO_BUFFER_SIZE + 100), dtype=np.uint8) + filename = tmpdir.join("test.pkl").strpath + numpy_pickle.dump(big_array, filename, compress=True) + arr_reloaded = numpy_pickle.load(filename) + + np.testing.assert_array_equal(big_array, arr_reloaded) + + +@with_numpy +def test_memmap_persistence(tmpdir): + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + filename = tmpdir.join("test1.pkl").strpath + numpy_pickle.dump(a, filename) + b = numpy_pickle.load(filename, mmap_mode="r") + + assert isinstance(b, np.memmap) + + # Test with an object containing multiple numpy arrays + filename = tmpdir.join("test2.pkl").strpath + obj = ComplexTestObject() + numpy_pickle.dump(obj, filename) + obj_loaded = numpy_pickle.load(filename, mmap_mode="r") + assert isinstance(obj_loaded, type(obj)) + assert isinstance(obj_loaded.array_float, np.memmap) + assert not obj_loaded.array_float.flags.writeable + assert isinstance(obj_loaded.array_int, np.memmap) + assert not obj_loaded.array_int.flags.writeable + # Memory map not allowed for numpy object arrays + assert not isinstance(obj_loaded.array_obj, np.memmap) + np.testing.assert_array_equal(obj_loaded.array_float, obj.array_float) + np.testing.assert_array_equal(obj_loaded.array_int, obj.array_int) + np.testing.assert_array_equal(obj_loaded.array_obj, obj.array_obj) + + # Test we can write in memmapped arrays + obj_loaded = numpy_pickle.load(filename, mmap_mode="r+") + assert obj_loaded.array_float.flags.writeable + obj_loaded.array_float[0:10] = 10.0 + assert obj_loaded.array_int.flags.writeable + obj_loaded.array_int[0:10] = 10 + + obj_reloaded = numpy_pickle.load(filename, mmap_mode="r") + np.testing.assert_array_equal(obj_reloaded.array_float, obj_loaded.array_float) + np.testing.assert_array_equal(obj_reloaded.array_int, obj_loaded.array_int) + + # Test w+ mode is caught and the mode has switched to r+ + numpy_pickle.load(filename, mmap_mode="w+") + assert obj_loaded.array_int.flags.writeable + assert obj_loaded.array_int.mode == "r+" + assert obj_loaded.array_float.flags.writeable + assert obj_loaded.array_float.mode == "r+" + + +@with_numpy +def test_memmap_persistence_mixed_dtypes(tmpdir): + # loading datastructures that have sub-arrays with dtype=object + # should not prevent memmapping on fixed size dtype sub-arrays. + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + b = np.array([1, "b"], dtype=object) + construct = (a, b) + filename = tmpdir.join("test.pkl").strpath + numpy_pickle.dump(construct, filename) + a_clone, b_clone = numpy_pickle.load(filename, mmap_mode="r") + + # the floating point array has been memory mapped + assert isinstance(a_clone, np.memmap) + + # the object-dtype array has been loaded in memory + assert not isinstance(b_clone, np.memmap) + + +@with_numpy +def test_masked_array_persistence(tmpdir): + # The special-case picker fails, because saving masked_array + # not implemented, but it just delegates to the standard pickler. + rnd = np.random.RandomState(0) + a = rnd.random_sample(10) + a = np.ma.masked_greater(a, 0.5) + filename = tmpdir.join("test.pkl").strpath + numpy_pickle.dump(a, filename) + b = numpy_pickle.load(filename, mmap_mode="r") + assert isinstance(b, np.ma.masked_array) + + +@with_numpy +def test_compress_mmap_mode_warning(tmpdir): + # Test the warning in case of compress + mmap_mode + rnd = np.random.RandomState(0) + obj = rnd.random_sample(10) + this_filename = tmpdir.join("test.pkl").strpath + numpy_pickle.dump(obj, this_filename, compress=1) + with warns(UserWarning) as warninfo: + reloaded_obj = numpy_pickle.load(this_filename, mmap_mode="r+") + debug_msg = "\n".join([str(w) for w in warninfo]) + warninfo = [w.message for w in warninfo] + assert not isinstance(reloaded_obj, np.memmap) + np.testing.assert_array_equal(obj, reloaded_obj) + assert len(warninfo) == 1, debug_msg + assert ( + str(warninfo[0]) == 'mmap_mode "r+" is not compatible with compressed ' + f'file {this_filename}. "r+" flag will be ignored.' + ) + + +@with_numpy +@with_memory_profiler +@parametrize("compress", [True, False]) +def test_memory_usage(tmpdir, compress): + # Verify memory stays within expected bounds. + filename = tmpdir.join("test.pkl").strpath + small_array = np.ones((10, 10)) + big_array = np.ones(shape=100 * int(1e6), dtype=np.uint8) + + for obj in (small_array, big_array): + size = obj.nbytes / 1e6 + obj_filename = filename + str(np.random.randint(0, 1000)) + mem_used = memory_used(numpy_pickle.dump, obj, obj_filename, compress=compress) + + # The memory used to dump the object shouldn't exceed the buffer + # size used to write array chunks (16MB). + write_buf_size = _IO_BUFFER_SIZE + 16 * 1024**2 / 1e6 + assert mem_used <= write_buf_size + + mem_used = memory_used(numpy_pickle.load, obj_filename) + # memory used should be less than array size + buffer size used to + # read the array chunk by chunk. + read_buf_size = 32 + _IO_BUFFER_SIZE # MiB + assert mem_used < size + read_buf_size + + +@with_numpy +def test_compressed_pickle_dump_and_load(tmpdir): + expected_list = [ + np.arange(5, dtype=np.dtype("i8")), + np.arange(5, dtype=np.dtype("f8")), + np.array([1, "abc", {"a": 1, "b": 2}], dtype="O"), + np.arange(256, dtype=np.uint8).tobytes(), + "C'est l'\xe9t\xe9 !", + ] + + fname = tmpdir.join("temp.pkl.gz").strpath + + dumped_filenames = numpy_pickle.dump(expected_list, fname, compress=1) + assert len(dumped_filenames) == 1 + result_list = numpy_pickle.load(fname) + for result, expected in zip(result_list, expected_list): + if isinstance(expected, np.ndarray): + expected = _ensure_native_byte_order(expected) + assert result.dtype == expected.dtype + np.testing.assert_equal(result, expected) + else: + assert result == expected + + +@with_numpy +def test_memmap_load(tmpdir): + little_endian_dtype = np.dtype("i8") + all_dtypes = (little_endian_dtype, big_endian_dtype) + + le_array = np.arange(5, dtype=little_endian_dtype) + be_array = np.arange(5, dtype=big_endian_dtype) + + fname = tmpdir.join("temp.pkl").strpath + + numpy_pickle.dump([le_array, be_array], fname) + + le_array_native_load, be_array_native_load = numpy_pickle.load( + fname, ensure_native_byte_order=True + ) + + assert le_array_native_load.dtype == be_array_native_load.dtype + assert le_array_native_load.dtype in all_dtypes + + le_array_nonnative_load, be_array_nonnative_load = numpy_pickle.load( + fname, ensure_native_byte_order=False + ) + + assert le_array_nonnative_load.dtype == le_array.dtype + assert be_array_nonnative_load.dtype == be_array.dtype + + +def test_invalid_parameters_raise(): + expected_msg = ( + "Native byte ordering can only be enforced if 'mmap_mode' parameter " + "is set to None, but got 'mmap_mode=r+' instead." + ) + + with raises(ValueError, match=re.escape(expected_msg)): + numpy_pickle.load( + "/path/to/some/dump.pkl", ensure_native_byte_order=True, mmap_mode="r+" + ) + + +def _check_pickle(filename, expected_list, mmap_mode=None): + """Helper function to test joblib pickle content. + + Note: currently only pickles containing an iterable are supported + by this function. + """ + version_match = re.match(r".+py(\d)(\d).+", filename) + py_version_used_for_writing = int(version_match.group(1)) + + py_version_to_default_pickle_protocol = {2: 2, 3: 3} + pickle_reading_protocol = py_version_to_default_pickle_protocol.get(3, 4) + pickle_writing_protocol = py_version_to_default_pickle_protocol.get( + py_version_used_for_writing, 4 + ) + if pickle_reading_protocol >= pickle_writing_protocol: + try: + with warnings.catch_warnings(record=True) as warninfo: + warnings.simplefilter("always") + result_list = numpy_pickle.load(filename, mmap_mode=mmap_mode) + filename_base = os.path.basename(filename) + expected_nb_deprecation_warnings = ( + 1 if ("_0.9" in filename_base or "_0.8.4" in filename_base) else 0 + ) + + expected_nb_user_warnings = ( + 3 + if (re.search("_0.1.+.pkl$", filename_base) and mmap_mode is not None) + else 0 + ) + expected_nb_warnings = ( + expected_nb_deprecation_warnings + expected_nb_user_warnings + ) + assert len(warninfo) == expected_nb_warnings, ( + "Did not get the expected number of warnings. Expected " + f"{expected_nb_warnings} but got warnings: " + f"{[w.message for w in warninfo]}" + ) + + deprecation_warnings = [ + w for w in warninfo if issubclass(w.category, DeprecationWarning) + ] + user_warnings = [w for w in warninfo if issubclass(w.category, UserWarning)] + for w in deprecation_warnings: + assert ( + str(w.message) + == "The file '{0}' has been generated with a joblib " + "version less than 0.10. Please regenerate this " + "pickle file.".format(filename) + ) + + for w in user_warnings: + escaped_filename = re.escape(filename) + assert re.search( + f"memmapped.+{escaped_filename}.+segmentation fault", str(w.message) + ) + + for result, expected in zip(result_list, expected_list): + if isinstance(expected, np.ndarray): + expected = _ensure_native_byte_order(expected) + assert result.dtype == expected.dtype + np.testing.assert_equal(result, expected) + else: + assert result == expected + except Exception as exc: + # When trying to read with python 3 a pickle generated + # with python 2 we expect a user-friendly error + if py_version_used_for_writing == 2: + assert isinstance(exc, ValueError) + message = ( + "You may be trying to read with " + "python 3 a joblib pickle generated with python 2." + ) + assert message in str(exc) + elif filename.endswith(".lz4") and with_lz4.args[0]: + assert isinstance(exc, ValueError) + assert LZ4_NOT_INSTALLED_ERROR in str(exc) + else: + raise + else: + # Pickle protocol used for writing is too high. We expect a + # "unsupported pickle protocol" error message + try: + numpy_pickle.load(filename) + raise AssertionError( + "Numpy pickle loading should have raised a ValueError exception" + ) + except ValueError as e: + message = "unsupported pickle protocol: {0}".format(pickle_writing_protocol) + assert message in str(e.args) + + +@with_numpy +def test_joblib_pickle_across_python_versions(): + # We need to be specific about dtypes in particular endianness + # because the pickles can be generated on one architecture and + # the tests run on another one. See + # https://github.com/joblib/joblib/issues/279. + expected_list = [ + np.arange(5, dtype=np.dtype("i8"), ("", ">f8")]), + np.arange(3, dtype=np.dtype(">i8")), + np.arange(3, dtype=np.dtype(">f8")), + ] + + # Verify the byteorder mismatch is correctly detected. + for array in be_arrays: + if sys.byteorder == "big": + assert not _is_numpy_array_byte_order_mismatch(array) + else: + assert _is_numpy_array_byte_order_mismatch(array) + converted = _ensure_native_byte_order(array) + if converted.dtype.fields: + for f in converted.dtype.fields.values(): + f[0].byteorder == "=" + else: + assert converted.dtype.byteorder == "=" + + # List of numpy arrays with little endian byteorder. + le_arrays = [ + np.array([(1, 2.0), (3, 4.0)], dtype=[("", " size + np.testing.assert_array_equal(obj, memmaps) + + +def test_register_compressor(tmpdir): + # Check that registering compressor file works. + compressor_name = "test-name" + compressor_prefix = "test-prefix" + + class BinaryCompressorTestFile(io.BufferedIOBase): + pass + + class BinaryCompressorTestWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__( + self, obj=BinaryCompressorTestFile, prefix=compressor_prefix + ) + + register_compressor(compressor_name, BinaryCompressorTestWrapper()) + + assert _COMPRESSORS[compressor_name].fileobj_factory == BinaryCompressorTestFile + assert _COMPRESSORS[compressor_name].prefix == compressor_prefix + + # Remove this dummy compressor file from extra compressors because other + # tests might fail because of this + _COMPRESSORS.pop(compressor_name) + + +@parametrize("invalid_name", [1, (), {}]) +def test_register_compressor_invalid_name(invalid_name): + # Test that registering an invalid compressor name is not allowed. + with raises(ValueError) as excinfo: + register_compressor(invalid_name, None) + excinfo.match("Compressor name should be a string") + + +def test_register_compressor_invalid_fileobj(): + # Test that registering an invalid file object is not allowed. + + class InvalidFileObject: + pass + + class InvalidFileObjectWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__(self, obj=InvalidFileObject, prefix=b"prefix") + + with raises(ValueError) as excinfo: + register_compressor("invalid", InvalidFileObjectWrapper()) + + excinfo.match( + "Compressor 'fileobj_factory' attribute should implement " + "the file object interface" + ) + + +class AnotherZlibCompressorWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__(self, obj=BinaryZlibFile, prefix=b"prefix") + + +class StandardLibGzipCompressorWrapper(CompressorWrapper): + def __init__(self): + CompressorWrapper.__init__(self, obj=gzip.GzipFile, prefix=b"prefix") + + +def test_register_compressor_already_registered(): + # Test registration of existing compressor files. + compressor_name = "test-name" + + # register a test compressor + register_compressor(compressor_name, AnotherZlibCompressorWrapper()) + + with raises(ValueError) as excinfo: + register_compressor(compressor_name, StandardLibGzipCompressorWrapper()) + excinfo.match("Compressor '{}' already registered.".format(compressor_name)) + + register_compressor(compressor_name, StandardLibGzipCompressorWrapper(), force=True) + + assert compressor_name in _COMPRESSORS + assert _COMPRESSORS[compressor_name].fileobj_factory == gzip.GzipFile + + # Remove this dummy compressor file from extra compressors because other + # tests might fail because of this + _COMPRESSORS.pop(compressor_name) + + +@with_lz4 +def test_lz4_compression(tmpdir): + # Check that lz4 can be used when dependency is available. + import lz4.frame + + compressor = "lz4" + assert compressor in _COMPRESSORS + assert _COMPRESSORS[compressor].fileobj_factory == lz4.frame.LZ4FrameFile + + fname = tmpdir.join("test.pkl").strpath + data = "test data" + numpy_pickle.dump(data, fname, compress=compressor) + + with open(fname, "rb") as f: + assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX + assert numpy_pickle.load(fname) == data + + # Test that LZ4 is applied based on file extension + numpy_pickle.dump(data, fname + ".lz4") + with open(fname, "rb") as f: + assert f.read(len(_LZ4_PREFIX)) == _LZ4_PREFIX + assert numpy_pickle.load(fname) == data + + +@without_lz4 +def test_lz4_compression_without_lz4(tmpdir): + # Check that lz4 cannot be used when dependency is not available. + fname = tmpdir.join("test.nolz4").strpath + data = "test data" + msg = LZ4_NOT_INSTALLED_ERROR + with raises(ValueError) as excinfo: + numpy_pickle.dump(data, fname, compress="lz4") + excinfo.match(msg) + + with raises(ValueError) as excinfo: + numpy_pickle.dump(data, fname + ".lz4") + excinfo.match(msg) + + +protocols = [pickle.DEFAULT_PROTOCOL] +if pickle.HIGHEST_PROTOCOL != pickle.DEFAULT_PROTOCOL: + protocols.append(pickle.HIGHEST_PROTOCOL) + + +@with_numpy +@parametrize("protocol", protocols) +def test_memmap_alignment_padding(tmpdir, protocol): + # Test that memmaped arrays returned by numpy.load are correctly aligned + fname = tmpdir.join("test.mmap").strpath + + a = np.random.randn(2) + numpy_pickle.dump(a, fname, protocol=protocol) + memmap = numpy_pickle.load(fname, mmap_mode="r") + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(a, memmap) + assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0 + assert memmap.flags.aligned + + array_list = [ + np.random.randn(2), + np.random.randn(2), + np.random.randn(2), + np.random.randn(2), + ] + + # On Windows OSError 22 if reusing the same path for memmap ... + fname = tmpdir.join("test1.mmap").strpath + numpy_pickle.dump(array_list, fname, protocol=protocol) + l_reloaded = numpy_pickle.load(fname, mmap_mode="r") + + for idx, memmap in enumerate(l_reloaded): + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(array_list[idx], memmap) + assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0 + assert memmap.flags.aligned + + array_dict = { + "a0": np.arange(2, dtype=np.uint8), + "a1": np.arange(3, dtype=np.uint8), + "a2": np.arange(5, dtype=np.uint8), + "a3": np.arange(7, dtype=np.uint8), + "a4": np.arange(11, dtype=np.uint8), + "a5": np.arange(13, dtype=np.uint8), + "a6": np.arange(17, dtype=np.uint8), + "a7": np.arange(19, dtype=np.uint8), + "a8": np.arange(23, dtype=np.uint8), + } + + # On Windows OSError 22 if reusing the same path for memmap ... + fname = tmpdir.join("test2.mmap").strpath + numpy_pickle.dump(array_dict, fname, protocol=protocol) + d_reloaded = numpy_pickle.load(fname, mmap_mode="r") + + for key, memmap in d_reloaded.items(): + assert isinstance(memmap, np.memmap) + np.testing.assert_array_equal(array_dict[key], memmap) + assert memmap.ctypes.data % numpy_pickle.NUMPY_ARRAY_ALIGNMENT_BYTES == 0 + assert memmap.flags.aligned diff --git a/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py b/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..98aab72fb88627291c64328288e296e97b51d5ad --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_compat.py @@ -0,0 +1,16 @@ +"""Test the old numpy pickler, compatibility version.""" + +# numpy_pickle is not a drop-in replacement of pickle, as it takes +# filenames instead of open files as arguments. +from joblib import numpy_pickle_compat + + +def test_z_file(tmpdir): + # Test saving and loading data with Zfiles. + filename = tmpdir.join("test.pkl").strpath + data = numpy_pickle_compat.asbytes("Foo, \n Bar, baz, \n\nfoobar") + with open(filename, "wb") as f: + numpy_pickle_compat.write_zfile(f, data) + with open(filename, "rb") as f: + data_read = numpy_pickle_compat.read_zfile(f) + assert data == data_read diff --git a/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py b/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3e3c88a905ed265275b052147575c9fb32875568 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_numpy_pickle_utils.py @@ -0,0 +1,9 @@ +from joblib.compressor import BinaryZlibFile +from joblib.testing import parametrize + + +@parametrize("filename", ["test", "test"]) # testing str and unicode names +def test_binary_zlib_file(tmpdir, filename): + """Testing creation of files depending on the type of the filenames.""" + binary_file = BinaryZlibFile(tmpdir.join(filename).strpath, mode="wb") + binary_file.close() diff --git a/lib/python3.10/site-packages/joblib/test/test_parallel.py b/lib/python3.10/site-packages/joblib/test/test_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..db6218f981c3ca6b9a643a12dc5a8aeef50cfc9f --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_parallel.py @@ -0,0 +1,2250 @@ +""" +Test the parallel module. +""" + +# Author: Gael Varoquaux +# Copyright (c) 2010-2011 Gael Varoquaux +# License: BSD Style, 3 clauses. + +import mmap +import os +import re +import sys +import threading +import time +import warnings +import weakref +from contextlib import nullcontext +from math import sqrt +from multiprocessing import TimeoutError +from pickle import PicklingError +from time import sleep +from traceback import format_exception + +import pytest + +import joblib +from joblib import dump, load, parallel +from joblib._multiprocessing_helpers import mp +from joblib.test.common import ( + IS_GIL_DISABLED, + np, + with_multiprocessing, + with_numpy, +) +from joblib.testing import check_subprocess_call, parametrize, raises, skipif, warns + +if mp is not None: + # Loky is not available if multiprocessing is not + from joblib.externals.loky import get_reusable_executor + +from queue import Queue + +try: + import posix +except ImportError: + posix = None + +try: + from ._openmp_test_helper.parallel_sum import parallel_sum +except ImportError: + parallel_sum = None + +try: + import distributed +except ImportError: + distributed = None + +from joblib._parallel_backends import ( + LokyBackend, + MultiprocessingBackend, + ParallelBackendBase, + SequentialBackend, + ThreadingBackend, +) +from joblib.parallel import ( + BACKENDS, + Parallel, + cpu_count, + delayed, + effective_n_jobs, + mp, + parallel_backend, + parallel_config, + register_parallel_backend, +) + +RETURN_GENERATOR_BACKENDS = BACKENDS.copy() +RETURN_GENERATOR_BACKENDS.pop("multiprocessing", None) + +ALL_VALID_BACKENDS = [None] + sorted(BACKENDS.keys()) +# Add instances of backend classes deriving from ParallelBackendBase +ALL_VALID_BACKENDS += [BACKENDS[backend_str]() for backend_str in BACKENDS] +if mp is None: + PROCESS_BACKENDS = [] +else: + PROCESS_BACKENDS = ["multiprocessing", "loky"] +PARALLEL_BACKENDS = PROCESS_BACKENDS + ["threading"] + +if hasattr(mp, "get_context"): + # Custom multiprocessing context in Python 3.4+ + ALL_VALID_BACKENDS.append(mp.get_context("spawn")) + + +def get_default_backend_instance(): + # The default backend can be changed before running the tests through + # JOBLIB_DEFAULT_PARALLEL_BACKEND environment variable so we need to use + # parallel.DEFAULT_BACKEND here and not + # from joblib.parallel import DEFAULT_BACKEND + return BACKENDS[parallel.DEFAULT_BACKEND] + + +def get_workers(backend): + return getattr(backend, "_pool", getattr(backend, "_workers", None)) + + +def division(x, y): + return x / y + + +def square(x): + return x**2 + + +class MyExceptionWithFinickyInit(Exception): + """An exception class with non trivial __init__""" + + def __init__(self, a, b, c, d): + pass + + +def exception_raiser(x, custom_exception=False): + if x == 7: + raise ( + MyExceptionWithFinickyInit("a", "b", "c", "d") + if custom_exception + else ValueError + ) + return x + + +def interrupt_raiser(x): + time.sleep(0.05) + raise KeyboardInterrupt + + +def f(x, y=0, z=0): + """A module-level function so that it can be spawn with + multiprocessing. + """ + return x**2 + y + z + + +def _active_backend_type(): + return type(parallel.get_active_backend()[0]) + + +def parallel_func(inner_n_jobs, backend): + return Parallel(n_jobs=inner_n_jobs, backend=backend)( + delayed(square)(i) for i in range(3) + ) + + +############################################################################### +def test_cpu_count(): + assert cpu_count() > 0 + + +def test_effective_n_jobs(): + assert effective_n_jobs() > 0 + + +@parametrize("context", [parallel_config, parallel_backend]) +@pytest.mark.parametrize( + "backend_n_jobs, expected_n_jobs", + [(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)], + ids=["positive-int", "negative-int", "None"], +) +@with_multiprocessing +def test_effective_n_jobs_None(context, backend_n_jobs, expected_n_jobs): + # check the number of effective jobs when `n_jobs=None` + # non-regression test for https://github.com/joblib/joblib/issues/984 + with context("threading", n_jobs=backend_n_jobs): + # when using a backend, the default of number jobs will be the one set + # in the backend + assert effective_n_jobs(n_jobs=None) == expected_n_jobs + # without any backend, None will default to a single job + assert effective_n_jobs(n_jobs=None) == 1 + + +############################################################################### +# Test parallel + + +@parametrize("backend", ALL_VALID_BACKENDS) +@parametrize("n_jobs", [1, 2, -1, -2]) +@parametrize("verbose", [2, 11, 100]) +def test_simple_parallel(backend, n_jobs, verbose): + assert [square(x) for x in range(5)] == Parallel( + n_jobs=n_jobs, backend=backend, verbose=verbose + )(delayed(square)(x) for x in range(5)) + + +@parametrize("backend", ALL_VALID_BACKENDS) +@parametrize("n_jobs", [1, 2]) +def test_parallel_pretty_print(backend, n_jobs): + n_tasks = 100 + pattern = re.compile(r"(Done\s+\d+ out of \d+ \|)") + + class ParallelLog(Parallel): + messages = [] + + def _print(self, msg): + self.messages.append(msg) + + executor = ParallelLog(n_jobs=n_jobs, backend=backend, verbose=10000) + executor([delayed(f)(i) for i in range(n_tasks)]) + lens = set() + for message in executor.messages: + if s := pattern.search(message): + a, b = s.span() + lens.add(b - a) + assert len(lens) == 1 + + +@parametrize("backend", ALL_VALID_BACKENDS) +def test_main_thread_renamed_no_warning(backend, monkeypatch): + # Check that no default backend relies on the name of the main thread: + # https://github.com/joblib/joblib/issues/180#issuecomment-253266247 + # Some programs use a different name for the main thread. This is the case + # for uWSGI apps for instance. + monkeypatch.setattr( + target=threading.current_thread(), + name="name", + value="some_new_name_for_the_main_thread", + ) + + with warnings.catch_warnings(record=True) as warninfo: + results = Parallel(n_jobs=2, backend=backend)( + delayed(square)(x) for x in range(3) + ) + assert results == [0, 1, 4] + + # Due to the default parameters of LokyBackend, there is a chance that + # warninfo catches Warnings from worker timeouts. We remove it if it exists + # We also remove DeprecationWarnings which could lead to false negatives. + warninfo = [ + w + for w in warninfo + if "worker timeout" not in str(w.message) + and not isinstance(w.message, DeprecationWarning) + ] + + # Under Python 3.13 if backend='multiprocessing', you will get a + # warning saying that forking a multi-threaded process is not a good idea, + # we ignore them in this test + if backend in [None, "multiprocessing"] or isinstance( + backend, MultiprocessingBackend + ): + message_part = "multi-threaded, use of fork() may lead to deadlocks" + warninfo = [w for w in warninfo if message_part not in str(w.message)] + + # The multiprocessing backend will raise a warning when detecting that is + # started from the non-main thread. Let's check that there is no false + # positive because of the name change. + assert len(warninfo) == 0 + + +def _assert_warning_nested(backend, inner_n_jobs, expected): + with warnings.catch_warnings(record=True) as warninfo: + warnings.simplefilter("always") + parallel_func(backend=backend, inner_n_jobs=inner_n_jobs) + + warninfo = [w.message for w in warninfo] + if expected: + if warninfo: + warnings_are_correct = all( + "backed parallel loops cannot" in each.args[0] for each in warninfo + ) + # With free-threaded Python, when the outer backend is threading, + # we might see more that one warning + warnings_have_the_right_length = ( + len(warninfo) >= 1 if IS_GIL_DISABLED else len(warninfo) == 1 + ) + return warnings_are_correct and warnings_have_the_right_length + + return False + else: + assert not warninfo + return True + + +@with_multiprocessing +@parametrize( + "parent_backend,child_backend,expected", + [ + ("loky", "multiprocessing", True), + ("loky", "loky", False), + ("multiprocessing", "multiprocessing", True), + ("multiprocessing", "loky", True), + ("threading", "multiprocessing", True), + ("threading", "loky", True), + ], +) +def test_nested_parallel_warnings(parent_backend, child_backend, expected): + # no warnings if inner_n_jobs=1 + Parallel(n_jobs=2, backend=parent_backend)( + delayed(_assert_warning_nested)( + backend=child_backend, inner_n_jobs=1, expected=False + ) + for _ in range(5) + ) + + # warnings if inner_n_jobs != 1 and expected + res = Parallel(n_jobs=2, backend=parent_backend)( + delayed(_assert_warning_nested)( + backend=child_backend, inner_n_jobs=2, expected=expected + ) + for _ in range(5) + ) + + # warning handling is not thread safe. One thread might see multiple + # warning or no warning at all. + if parent_backend == "threading": + assert any(res) + else: + assert all(res) + + +@with_multiprocessing +@parametrize("backend", ["loky", "multiprocessing", "threading"]) +def test_background_thread_parallelism(backend): + is_run_parallel = [False] + + def background_thread(is_run_parallel): + with warnings.catch_warnings(record=True) as warninfo: + Parallel(n_jobs=2)(delayed(sleep)(0.1) for _ in range(4)) + print(len(warninfo)) + is_run_parallel[0] = len(warninfo) == 0 + + t = threading.Thread(target=background_thread, args=(is_run_parallel,)) + t.start() + t.join() + assert is_run_parallel[0] + + +def nested_loop(backend): + Parallel(n_jobs=2, backend=backend)(delayed(square)(0.01) for _ in range(2)) + + +@parametrize("child_backend", BACKENDS) +@parametrize("parent_backend", BACKENDS) +def test_nested_loop(parent_backend, child_backend): + Parallel(n_jobs=2, backend=parent_backend)( + delayed(nested_loop)(child_backend) for _ in range(2) + ) + + +def raise_exception(backend): + raise ValueError + + +@with_multiprocessing +def test_nested_loop_with_exception_with_loky(): + with raises(ValueError): + with Parallel(n_jobs=2, backend="loky") as parallel: + parallel([delayed(nested_loop)("loky"), delayed(raise_exception)("loky")]) + + +def test_mutate_input_with_threads(): + """Input is mutable when using the threading backend""" + q = Queue(maxsize=5) + Parallel(n_jobs=2, backend="threading")(delayed(q.put)(1) for _ in range(5)) + assert q.full() + + +@parametrize("n_jobs", [1, 2, 3]) +def test_parallel_kwargs(n_jobs): + """Check the keyword argument processing of pmap.""" + lst = range(10) + assert [f(x, y=1) for x in lst] == Parallel(n_jobs=n_jobs)( + delayed(f)(x, y=1) for x in lst + ) + + +@parametrize("backend", PARALLEL_BACKENDS) +def test_parallel_as_context_manager(backend): + lst = range(10) + expected = [f(x, y=1) for x in lst] + + with Parallel(n_jobs=4, backend=backend) as p: + # Internally a pool instance has been eagerly created and is managed + # via the context manager protocol + managed_backend = p._backend + + # We make call with the managed parallel object several times inside + # the managed block: + assert expected == p(delayed(f)(x, y=1) for x in lst) + assert expected == p(delayed(f)(x, y=1) for x in lst) + + # Those calls have all used the same pool instance: + if mp is not None: + assert get_workers(managed_backend) is get_workers(p._backend) + + # As soon as we exit the context manager block, the pool is terminated and + # no longer referenced from the parallel object: + if mp is not None: + assert get_workers(p._backend) is None + + # It's still possible to use the parallel instance in non-managed mode: + assert expected == p(delayed(f)(x, y=1) for x in lst) + if mp is not None: + assert get_workers(p._backend) is None + + +@with_multiprocessing +def test_parallel_pickling(): + """Check that pmap captures the errors when it is passed an object + that cannot be pickled. + """ + + class UnpicklableObject(object): + def __reduce__(self): + raise RuntimeError("123") + + with raises(PicklingError, match=r"the task to send"): + Parallel(n_jobs=2, backend="loky")( + delayed(id)(UnpicklableObject()) for _ in range(10) + ) + + +@with_numpy +@with_multiprocessing +@parametrize("byteorder", ["<", ">", "="]) +@parametrize("max_nbytes", [1, "1M"]) +def test_parallel_byteorder_corruption(byteorder, max_nbytes): + def inspect_byteorder(x): + return x, x.dtype.byteorder + + x = np.arange(6).reshape((2, 3)).view(f"{byteorder}i4") + + initial_np_byteorder = x.dtype.byteorder + + result = Parallel(n_jobs=2, backend="loky", max_nbytes=max_nbytes)( + delayed(inspect_byteorder)(x) for _ in range(3) + ) + + for x_returned, byteorder_in_worker in result: + assert byteorder_in_worker == initial_np_byteorder + assert byteorder_in_worker == x_returned.dtype.byteorder + np.testing.assert_array_equal(x, x_returned) + + +@parametrize("backend", PARALLEL_BACKENDS) +def test_parallel_timeout_success(backend): + # Check that timeout isn't thrown when function is fast enough + assert ( + len( + Parallel(n_jobs=2, backend=backend, timeout=30)( + delayed(sleep)(0.001) for x in range(10) + ) + ) + == 10 + ) + + +@with_multiprocessing +@parametrize("backend", PARALLEL_BACKENDS) +def test_parallel_timeout_fail(backend): + # Check that timeout properly fails when function is too slow + with raises(TimeoutError): + Parallel(n_jobs=2, backend=backend, timeout=0.01)( + delayed(sleep)(10) for x in range(10) + ) + + +@with_multiprocessing +@parametrize("backend", set(RETURN_GENERATOR_BACKENDS) - {"sequential"}) +@parametrize("return_as", ["generator", "generator_unordered"]) +def test_parallel_timeout_fail_with_generator(backend, return_as): + # Check that timeout properly fails when function is too slow with + # return_as=generator + with raises(TimeoutError): + list( + Parallel(n_jobs=2, backend=backend, return_as=return_as, timeout=0.1)( + delayed(sleep)(10) for x in range(10) + ) + ) + + # Fast tasks and high timeout should not raise + list( + Parallel(n_jobs=2, backend=backend, return_as=return_as, timeout=10)( + delayed(sleep)(0.01) for x in range(10) + ) + ) + + +@with_multiprocessing +@parametrize("backend", PROCESS_BACKENDS) +def test_error_capture(backend): + # Check that error are captured, and that correct exceptions + # are raised. + if mp is not None: + with raises(ZeroDivisionError): + Parallel(n_jobs=2, backend=backend)( + [delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))] + ) + + with raises(KeyboardInterrupt): + Parallel(n_jobs=2, backend=backend)( + [delayed(interrupt_raiser)(x) for x in (1, 0)] + ) + + # Try again with the context manager API + with Parallel(n_jobs=2, backend=backend) as parallel: + assert get_workers(parallel._backend) is not None + original_workers = get_workers(parallel._backend) + + with raises(ZeroDivisionError): + parallel([delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))]) + + # The managed pool should still be available and be in a working + # state despite the previously raised (and caught) exception + assert get_workers(parallel._backend) is not None + + # The pool should have been interrupted and restarted: + assert get_workers(parallel._backend) is not original_workers + + assert [f(x, y=1) for x in range(10)] == parallel( + delayed(f)(x, y=1) for x in range(10) + ) + + original_workers = get_workers(parallel._backend) + with raises(KeyboardInterrupt): + parallel([delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # The pool should still be available despite the exception + assert get_workers(parallel._backend) is not None + + # The pool should have been interrupted and restarted: + assert get_workers(parallel._backend) is not original_workers + + assert [f(x, y=1) for x in range(10)] == parallel( + delayed(f)(x, y=1) for x in range(10) + ), ( + parallel._iterating, + parallel.n_completed_tasks, + parallel.n_dispatched_tasks, + parallel._aborting, + ) + + # Check that the inner pool has been terminated when exiting the + # context manager + assert get_workers(parallel._backend) is None + else: + with raises(KeyboardInterrupt): + Parallel(n_jobs=2)([delayed(interrupt_raiser)(x) for x in (1, 0)]) + + # wrapped exceptions should inherit from the class of the original + # exception to make it easy to catch them + with raises(ZeroDivisionError): + Parallel(n_jobs=2)([delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))]) + + with raises(MyExceptionWithFinickyInit): + Parallel(n_jobs=2, verbose=0)( + (delayed(exception_raiser)(i, custom_exception=True) for i in range(30)) + ) + + +@with_multiprocessing +@parametrize("backend", BACKENDS) +def test_error_in_task_iterator(backend): + def my_generator(raise_at=0): + for i in range(20): + if i == raise_at: + raise ValueError("Iterator Raising Error") + yield i + + with Parallel(n_jobs=2, backend=backend) as p: + # The error is raised in the pre-dispatch phase + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=0)) + + # The error is raised when dispatching a new task after the + # pre-dispatch (likely to happen in a different thread) + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=5)) + + # Same, but raises long after the pre-dispatch phase + with raises(ValueError, match="Iterator Raising Error"): + p(delayed(square)(i) for i in my_generator(raise_at=19)) + + +def consumer(queue, item): + queue.append("Consumed %s" % item) + + +@parametrize("backend", BACKENDS) +@parametrize( + "batch_size, expected_queue", + [ + ( + 1, + [ + "Produced 0", + "Consumed 0", + "Produced 1", + "Consumed 1", + "Produced 2", + "Consumed 2", + "Produced 3", + "Consumed 3", + "Produced 4", + "Consumed 4", + "Produced 5", + "Consumed 5", + ], + ), + ( + 4, + [ # First Batch + "Produced 0", + "Produced 1", + "Produced 2", + "Produced 3", + "Consumed 0", + "Consumed 1", + "Consumed 2", + "Consumed 3", + # Second batch + "Produced 4", + "Produced 5", + "Consumed 4", + "Consumed 5", + ], + ), + ], +) +def test_dispatch_one_job(backend, batch_size, expected_queue): + """Test that with only one job, Parallel does act as a iterator.""" + queue = list() + + def producer(): + for i in range(6): + queue.append("Produced %i" % i) + yield i + + Parallel(n_jobs=1, batch_size=batch_size, backend=backend)( + delayed(consumer)(queue, x) for x in producer() + ) + assert queue == expected_queue + assert len(queue) == 12 + + +@with_multiprocessing +@parametrize("backend", PARALLEL_BACKENDS) +def test_dispatch_multiprocessing(backend): + """Check that using pre_dispatch Parallel does indeed dispatch items + lazily. + """ + manager = mp.Manager() + queue = manager.list() + + def producer(): + for i in range(6): + queue.append("Produced %i" % i) + yield i + + Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)( + delayed(consumer)(queue, "any") for _ in producer() + ) + + queue_contents = list(queue) + assert queue_contents[0] == "Produced 0" + + # Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only + # after any of the first 3 jobs have completed. + first_consumption_index = queue_contents[:4].index("Consumed any") + assert first_consumption_index > -1 + + produced_3_index = queue_contents.index("Produced 3") # 4th task produced + assert produced_3_index > first_consumption_index + + assert len(queue) == 12 + + +def test_batching_auto_threading(): + # batching='auto' with the threading backend leaves the effective batch + # size to 1 (no batching) as it has been found to never be beneficial with + # this low-overhead backend. + + with Parallel(n_jobs=2, batch_size="auto", backend="threading") as p: + p(delayed(id)(i) for i in range(5000)) # many very fast tasks + assert p._backend.compute_batch_size() == 1 + + +@with_multiprocessing +@parametrize("backend", PROCESS_BACKENDS) +def test_batching_auto_subprocesses(backend): + with Parallel(n_jobs=2, batch_size="auto", backend=backend) as p: + p(delayed(id)(i) for i in range(5000)) # many very fast tasks + + # It should be strictly larger than 1 but as we don't want heisen + # failures on clogged CI worker environment be safe and only check that + # it's a strictly positive number. + assert p._backend.compute_batch_size() > 0 + + +def test_exception_dispatch(): + """Make sure that exception raised during dispatch are indeed captured""" + with raises(ValueError): + Parallel(n_jobs=2, pre_dispatch=16, verbose=0)( + delayed(exception_raiser)(i) for i in range(30) + ) + + +def nested_function_inner(i): + Parallel(n_jobs=2)(delayed(exception_raiser)(j) for j in range(30)) + + +def nested_function_outer(i): + Parallel(n_jobs=2)(delayed(nested_function_inner)(j) for j in range(30)) + + +@with_multiprocessing +@parametrize("backend", PARALLEL_BACKENDS) +@pytest.mark.xfail(reason="https://github.com/joblib/loky/pull/255") +def test_nested_exception_dispatch(backend): + """Ensure errors for nested joblib cases gets propagated + + We rely on the Python 3 built-in __cause__ system that already + report this kind of information to the user. + """ + with raises(ValueError) as excinfo: + Parallel(n_jobs=2, backend=backend)( + delayed(nested_function_outer)(i) for i in range(30) + ) + + # Check that important information such as function names are visible + # in the final error message reported to the user + report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb) + report = "".join(report_lines) + assert "nested_function_outer" in report + assert "nested_function_inner" in report + assert "exception_raiser" in report + + assert type(excinfo.value) is ValueError + + +class FakeParallelBackend(SequentialBackend): + """Pretends to run concurrently while running sequentially.""" + + def configure(self, n_jobs=1, parallel=None, **backend_args): + self.n_jobs = self.effective_n_jobs(n_jobs) + self.parallel = parallel + return n_jobs + + def effective_n_jobs(self, n_jobs=1): + if n_jobs < 0: + n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1) + return n_jobs + + +def test_invalid_backend(): + with raises(ValueError, match="Invalid backend:"): + Parallel(backend="unit-testing") + + with raises(ValueError, match="Invalid backend:"): + with parallel_config(backend="unit-testing"): + pass + + with raises(ValueError, match="Invalid backend:"): + with parallel_config(backend="unit-testing"): + pass + + +@parametrize("backend", ALL_VALID_BACKENDS) +def test_invalid_njobs(backend): + with raises(ValueError) as excinfo: + Parallel(n_jobs=0, backend=backend)._initialize_backend() + assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs=0.5, backend=backend)._initialize_backend() + assert "n_jobs == 0 in Parallel has no meaning" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs="2.3", backend=backend)._initialize_backend() + assert "n_jobs could not be converted to int" in str(excinfo.value) + + with raises(ValueError) as excinfo: + Parallel(n_jobs="invalid_str", backend=backend)._initialize_backend() + assert "n_jobs could not be converted to int" in str(excinfo.value) + + +@with_multiprocessing +@parametrize("backend", PARALLEL_BACKENDS) +@parametrize("n_jobs", ["2", 2.3, 2]) +def test_njobs_converted_to_int(backend, n_jobs): + p = Parallel(n_jobs=n_jobs, backend=backend) + assert p._effective_n_jobs() == 2 + + res = p(delayed(square)(i) for i in range(10)) + assert all(r == square(i) for i, r in enumerate(res)) + + +def test_register_parallel_backend(): + try: + register_parallel_backend("test_backend", FakeParallelBackend) + assert "test_backend" in BACKENDS + assert BACKENDS["test_backend"] == FakeParallelBackend + finally: + del BACKENDS["test_backend"] + + +def test_overwrite_default_backend(): + default_backend_orig = parallel.DEFAULT_BACKEND + assert _active_backend_type() == get_default_backend_instance() + try: + register_parallel_backend("threading", BACKENDS["threading"], make_default=True) + assert _active_backend_type() == ThreadingBackend + finally: + # Restore the global default manually + parallel.DEFAULT_BACKEND = default_backend_orig + assert _active_backend_type() == get_default_backend_instance() + + +@skipif(mp is not None, reason="Only without multiprocessing") +def test_backend_no_multiprocessing(): + with warns(UserWarning, match="joblib backend '.*' is not available on.*"): + Parallel(backend="loky")(delayed(square)(i) for i in range(3)) + + # The below should now work without problems + with parallel_config(backend="loky"): + Parallel()(delayed(square)(i) for i in range(3)) + + +def check_backend_context_manager(context, backend_name): + with context(backend_name, n_jobs=3): + active_backend, active_n_jobs = parallel.get_active_backend() + assert active_n_jobs == 3 + assert effective_n_jobs(3) == 3 + p = Parallel() + assert p.n_jobs == 3 + if backend_name == "multiprocessing": + assert type(active_backend) is MultiprocessingBackend + assert type(p._backend) is MultiprocessingBackend + elif backend_name == "loky": + assert type(active_backend) is LokyBackend + assert type(p._backend) is LokyBackend + elif backend_name == "threading": + assert type(active_backend) is ThreadingBackend + assert type(p._backend) is ThreadingBackend + elif backend_name.startswith("test_"): + assert type(active_backend) is FakeParallelBackend + assert type(p._backend) is FakeParallelBackend + + +all_backends_for_context_manager = PARALLEL_BACKENDS[:] +all_backends_for_context_manager.extend(["test_backend_%d" % i for i in range(3)]) + + +@with_multiprocessing +@parametrize("backend", all_backends_for_context_manager) +@parametrize("context", [parallel_backend, parallel_config]) +def test_backend_context_manager(monkeypatch, backend, context): + if backend not in BACKENDS: + monkeypatch.setitem(BACKENDS, backend, FakeParallelBackend) + + assert _active_backend_type() == get_default_backend_instance() + # check that this possible to switch parallel backends sequentially + check_backend_context_manager(context, backend) + + # The default backend is restored + assert _active_backend_type() == get_default_backend_instance() + + # Check that context manager switching is thread safe: + Parallel(n_jobs=2, backend="threading")( + delayed(check_backend_context_manager)(context, b) + for b in all_backends_for_context_manager + if not b + ) + + # The default backend is again restored + assert _active_backend_type() == get_default_backend_instance() + + +class ParameterizedParallelBackend(SequentialBackend): + """Pretends to run conncurrently while running sequentially.""" + + def __init__(self, param=None): + if param is None: + raise ValueError("param should not be None") + self.param = param + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_parameterized_backend_context_manager(monkeypatch, context): + monkeypatch.setitem(BACKENDS, "param_backend", ParameterizedParallelBackend) + assert _active_backend_type() == get_default_backend_instance() + + with context("param_backend", param=42, n_jobs=3): + active_backend, active_n_jobs = parallel.get_active_backend() + assert type(active_backend) is ParameterizedParallelBackend + assert active_backend.param == 42 + assert active_n_jobs == 3 + p = Parallel() + assert p.n_jobs == 3 + assert p._backend is active_backend + results = p(delayed(sqrt)(i) for i in range(5)) + assert results == [sqrt(i) for i in range(5)] + + # The default backend is again restored + assert _active_backend_type() == get_default_backend_instance() + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_directly_parameterized_backend_context_manager(context): + assert _active_backend_type() == get_default_backend_instance() + + # Check that it's possible to pass a backend instance directly, + # without registration + with context(ParameterizedParallelBackend(param=43), n_jobs=5): + active_backend, active_n_jobs = parallel.get_active_backend() + assert type(active_backend) is ParameterizedParallelBackend + assert active_backend.param == 43 + assert active_n_jobs == 5 + p = Parallel() + assert p.n_jobs == 5 + assert p._backend is active_backend + results = p(delayed(sqrt)(i) for i in range(5)) + assert results == [sqrt(i) for i in range(5)] + + # The default backend is again restored + assert _active_backend_type() == get_default_backend_instance() + + +def sleep_and_return_pid(): + sleep(0.1) + return os.getpid() + + +def get_nested_pids(): + assert _active_backend_type() == ThreadingBackend + # Assert that the nested backend does not change the default number of + # jobs used in Parallel + assert Parallel()._effective_n_jobs() == 1 + + # Assert that the tasks are running only on one process + return Parallel(n_jobs=2)(delayed(sleep_and_return_pid)() for _ in range(2)) + + +class MyBackend(joblib._parallel_backends.LokyBackend): + """Backend to test backward compatibility with older backends""" + + def get_nested_backend( + self, + ): + # Older backends only return a backend, without n_jobs indications. + return super(MyBackend, self).get_nested_backend()[0] + + +register_parallel_backend("back_compat_backend", MyBackend) + + +@with_multiprocessing +@parametrize("backend", ["threading", "loky", "multiprocessing", "back_compat_backend"]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_backend_context_manager(context, backend): + # Check that by default, nested parallel calls will always use the + # ThreadingBackend + + with context(backend): + pid_groups = Parallel(n_jobs=2)(delayed(get_nested_pids)() for _ in range(10)) + for pid_group in pid_groups: + assert len(set(pid_group)) == 1 + + +@with_multiprocessing +@parametrize("n_jobs", [2, -1, None]) +@parametrize("backend", PARALLEL_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_backend_in_sequential(backend, n_jobs, context): + # Check that by default, nested parallel calls will always use the + # ThreadingBackend + + def check_nested_backend(expected_backend_type, expected_n_job): + # Assert that the sequential backend at top level, does not change the + # backend for nested calls. + assert _active_backend_type() == BACKENDS[expected_backend_type] + + # Assert that the nested backend in SequentialBackend does not change + # the default number of jobs used in Parallel + expected_n_job = effective_n_jobs(expected_n_job) + assert Parallel()._effective_n_jobs() == expected_n_job + + Parallel(n_jobs=1)( + delayed(check_nested_backend)(parallel.DEFAULT_BACKEND, 1) for _ in range(10) + ) + + with context(backend, n_jobs=n_jobs): + Parallel(n_jobs=1)( + delayed(check_nested_backend)(backend, n_jobs) for _ in range(10) + ) + + +def check_nesting_level(context, inner_backend, expected_level): + with context(inner_backend) as ctx: + if context is parallel_config: + backend = ctx["backend"] + if context is parallel_backend: + backend = ctx[0] + assert backend.nesting_level == expected_level + + +@with_multiprocessing +@parametrize("outer_backend", PARALLEL_BACKENDS) +@parametrize("inner_backend", PARALLEL_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_nesting_level(context, outer_backend, inner_backend): + # Check that the nesting level for the backend is correctly set + check_nesting_level(context, outer_backend, 0) + + Parallel(n_jobs=2, backend=outer_backend)( + delayed(check_nesting_level)(context, inner_backend, 1) for _ in range(10) + ) + + with context(inner_backend, n_jobs=2): + Parallel()( + delayed(check_nesting_level)(context, inner_backend, 1) for _ in range(10) + ) + + +@with_multiprocessing +@parametrize("context", [parallel_config, parallel_backend]) +@parametrize("with_retrieve_callback", [True, False]) +def test_retrieval_context(context, with_retrieve_callback): + import contextlib + + class MyBackend(ThreadingBackend): + i = 0 + supports_retrieve_callback = with_retrieve_callback + + @contextlib.contextmanager + def retrieval_context(self): + self.i += 1 + yield + + register_parallel_backend("retrieval", MyBackend) + + def nested_call(n): + return Parallel(n_jobs=2)(delayed(id)(i) for i in range(n)) + + with context("retrieval") as ctx: + Parallel(n_jobs=2)(delayed(nested_call)(i) for i in range(5)) + if context is parallel_config: + assert ctx["backend"].i == 1 + if context is parallel_backend: + assert ctx[0].i == 1 + + +############################################################################### +# Test helpers + + +@parametrize("batch_size", [0, -1, 1.42]) +def test_invalid_batch_size(batch_size): + with raises(ValueError): + Parallel(batch_size=batch_size) + + +@parametrize( + "n_tasks, n_jobs, pre_dispatch, batch_size", + [ + (2, 2, "all", "auto"), + (2, 2, "n_jobs", "auto"), + (10, 2, "n_jobs", "auto"), + (517, 2, "n_jobs", "auto"), + (10, 2, "n_jobs", "auto"), + (10, 4, "n_jobs", "auto"), + (200, 12, "n_jobs", "auto"), + (25, 12, "2 * n_jobs", 1), + (250, 12, "all", 1), + (250, 12, "2 * n_jobs", 7), + (200, 12, "2 * n_jobs", "auto"), + ], +) +def test_dispatch_race_condition(n_tasks, n_jobs, pre_dispatch, batch_size): + # Check that using (async-)dispatch does not yield a race condition on the + # iterable generator that is not thread-safe natively. + # This is a non-regression test for the "Pool seems closed" class of error + params = {"n_jobs": n_jobs, "pre_dispatch": pre_dispatch, "batch_size": batch_size} + expected = [square(i) for i in range(n_tasks)] + results = Parallel(**params)(delayed(square)(i) for i in range(n_tasks)) + assert results == expected + + +@with_multiprocessing +def test_default_mp_context(): + mp_start_method = mp.get_start_method() + p = Parallel(n_jobs=2, backend="multiprocessing") + context = p._backend_kwargs.get("context") + start_method = context.get_start_method() + assert start_method == mp_start_method + + +@with_numpy +@with_multiprocessing +@parametrize("backend", PROCESS_BACKENDS) +def test_no_blas_crash_or_freeze_with_subprocesses(backend): + if backend == "multiprocessing": + # Use the spawn backend that is both robust and available on all + # platforms + backend = mp.get_context("spawn") + + # Check that on recent Python version, the 'spawn' start method can make + # it possible to use multiprocessing in conjunction of any BLAS + # implementation that happens to be used by numpy with causing a freeze or + # a crash + rng = np.random.RandomState(42) + + # call BLAS DGEMM to force the initialization of the internal thread-pool + # in the main process + a = rng.randn(1000, 1000) + np.dot(a, a.T) + + # check that the internal BLAS thread-pool is not in an inconsistent state + # in the worker processes managed by multiprocessing + Parallel(n_jobs=2, backend=backend)(delayed(np.dot)(a, a.T) for i in range(2)) + + +UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN = """\ +from joblib import Parallel, delayed + +def square(x): + return x ** 2 + +backend = "{}" +if backend == "spawn": + from multiprocessing import get_context + backend = get_context(backend) + +print(Parallel(n_jobs=2, backend=backend)( + delayed(square)(i) for i in range(5))) +""" + + +@with_multiprocessing +@parametrize("backend", PROCESS_BACKENDS) +def test_parallel_with_interactively_defined_functions(backend): + # When using the "-c" flag, interactive functions defined in __main__ + # should work with any backend. + if backend == "multiprocessing" and mp.get_start_method() != "fork": + pytest.skip( + "Require fork start method to use interactively defined " + "functions with multiprocessing." + ) + code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_NO_MAIN.format(backend) + check_subprocess_call( + [sys.executable, "-c", code], timeout=10, stdout_regex=r"\[0, 1, 4, 9, 16\]" + ) + + +UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN = """\ +import sys +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed + +def run(f, x): + return f(x) + +{define_func} + +if __name__ == "__main__": + backend = "{backend}" + if backend == "spawn": + from multiprocessing import get_context + backend = get_context(backend) + + callable_position = "{callable_position}" + if callable_position == "delayed": + print(Parallel(n_jobs=2, backend=backend)( + delayed(square)(i) for i in range(5))) + elif callable_position == "args": + print(Parallel(n_jobs=2, backend=backend)( + delayed(run)(square, i) for i in range(5))) + else: + print(Parallel(n_jobs=2, backend=backend)( + delayed(run)(f=square, x=i) for i in range(5))) +""" + +SQUARE_MAIN = """\ +def square(x): + return x ** 2 +""" +SQUARE_LOCAL = """\ +def gen_square(): + def square(x): + return x ** 2 + return square +square = gen_square() +""" +SQUARE_LAMBDA = """\ +square = lambda x: x ** 2 +""" + + +@with_multiprocessing +@parametrize("backend", PROCESS_BACKENDS + ([] if mp is None else ["spawn"])) +@parametrize("define_func", [SQUARE_MAIN, SQUARE_LOCAL, SQUARE_LAMBDA]) +@parametrize("callable_position", ["delayed", "args", "kwargs"]) +def test_parallel_with_unpicklable_functions_in_args( + backend, define_func, callable_position, tmpdir +): + if backend in ["multiprocessing", "spawn"] and ( + define_func != SQUARE_MAIN or sys.platform == "win32" + ): + pytest.skip("Not picklable with pickle") + code = UNPICKLABLE_CALLABLE_SCRIPT_TEMPLATE_MAIN.format( + define_func=define_func, + backend=backend, + callable_position=callable_position, + joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__)), + ) + code_file = tmpdir.join("unpicklable_func_script.py") + code_file.write(code) + check_subprocess_call( + [sys.executable, code_file.strpath], + timeout=10, + stdout_regex=r"\[0, 1, 4, 9, 16\]", + ) + + +INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT = """\ +import sys +import faulthandler +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed +from functools import partial + +class MyClass: + '''Class defined in the __main__ namespace''' + def __init__(self, value): + self.value = value + + +def square(x, ignored=None, ignored2=None): + '''Function defined in the __main__ namespace''' + return x.value ** 2 + + +square2 = partial(square, ignored2='something') + +# Here, we do not need the `if __name__ == "__main__":` safeguard when +# using the default `loky` backend (even on Windows). + +# To make debugging easier +faulthandler.dump_traceback_later(30, exit=True) + +# The following baroque function call is meant to check that joblib +# introspection rightfully uses cloudpickle instead of the (faster) pickle +# module of the standard library when necessary. In particular cloudpickle is +# necessary for functions and instances of classes interactively defined in the +# __main__ module. + +print(Parallel(backend="loky", n_jobs=2)( + delayed(square2)(MyClass(i), ignored=[dict(a=MyClass(1))]) + for i in range(5) +)) +""".format(joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__))) + + +@with_multiprocessing +def test_parallel_with_interactively_defined_functions_loky(tmpdir): + # loky accepts interactive functions defined in __main__ and does not + # require if __name__ == '__main__' even when the __main__ module is + # defined by the result of the execution of a filesystem script. + script = tmpdir.join("joblib_interactively_defined_function.py") + script.write(INTERACTIVE_DEFINED_FUNCTION_AND_CLASS_SCRIPT_CONTENT) + check_subprocess_call( + [sys.executable, script.strpath], + stdout_regex=r"\[0, 1, 4, 9, 16\]", + timeout=None, # rely on faulthandler to kill the process + ) + + +INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT = """\ +import sys +# Make sure that joblib is importable in the subprocess launching this +# script. This is needed in case we run the tests from the joblib root +# folder without having installed joblib +sys.path.insert(0, {joblib_root_folder!r}) + +from joblib import Parallel, delayed, hash +import multiprocessing as mp +mp.util.log_to_stderr(5) + +class MyList(list): + '''MyList is interactively defined by MyList.append is a built-in''' + def __hash__(self): + # XXX: workaround limitation in cloudpickle + return hash(self).__hash__() + +l = MyList() + +print(Parallel(backend="loky", n_jobs=2)( + delayed(l.append)(i) for i in range(3) +)) +""".format(joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__))) + + +@with_multiprocessing +def test_parallel_with_interactively_defined_bound_method_loky(tmpdir): + script = tmpdir.join("joblib_interactive_bound_method_script.py") + script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT) + check_subprocess_call( + [sys.executable, script.strpath], + stdout_regex=r"\[None, None, None\]", + stderr_regex=r"LokyProcess", + timeout=15, + ) + + +def test_parallel_with_exhausted_iterator(): + exhausted_iterator = iter([]) + assert Parallel(n_jobs=2)(exhausted_iterator) == [] + + +def check_memmap(a): + if not isinstance(a, np.memmap): + raise TypeError("Expected np.memmap instance, got %r", type(a)) + return a.copy() # return a regular array instead of a memmap + + +@with_numpy +@with_multiprocessing +@parametrize("backend", PROCESS_BACKENDS) +def test_auto_memmap_on_arrays_from_generator(backend): + # Non-regression test for a problem with a bad interaction between the + # GC collecting arrays recently created during iteration inside the + # parallel dispatch loop and the auto-memmap feature of Parallel. + # See: https://github.com/joblib/joblib/pull/294 + def generate_arrays(n): + for i in range(n): + yield np.ones(10, dtype=np.float32) * i + + # Use max_nbytes=1 to force the use of memory-mapping even for small + # arrays + results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)( + delayed(check_memmap)(a) for a in generate_arrays(100) + ) + for result, expected in zip(results, generate_arrays(len(results))): + np.testing.assert_array_equal(expected, result) + + # Second call to force loky to adapt the executor by growing the number + # of worker processes. This is a non-regression test for: + # https://github.com/joblib/joblib/issues/629. + results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)( + delayed(check_memmap)(a) for a in generate_arrays(100) + ) + for result, expected in zip(results, generate_arrays(len(results))): + np.testing.assert_array_equal(expected, result) + + +def identity(arg): + return arg + + +@with_numpy +@with_multiprocessing +def test_memmap_with_big_offset(tmpdir): + fname = tmpdir.join("test.mmap").strpath + size = mmap.ALLOCATIONGRANULARITY + obj = [np.zeros(size, dtype="uint8"), np.ones(size, dtype="uint8")] + dump(obj, fname) + memmap = load(fname, mmap_mode="r") + (result,) = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0]) + assert isinstance(memmap[1], np.memmap) + assert memmap[1].offset > size + np.testing.assert_array_equal(obj, result) + + +def test_warning_about_timeout_not_supported_by_backend(): + with warnings.catch_warnings(record=True) as warninfo: + Parallel(n_jobs=1, timeout=1)(delayed(square)(i) for i in range(50)) + assert len(warninfo) == 1 + w = warninfo[0] + assert isinstance(w.message, UserWarning) + assert str(w.message) == ( + "The backend class 'SequentialBackend' does not support timeout. " + "You have set 'timeout=1' in Parallel but the 'timeout' parameter " + "will not be used." + ) + + +def set_list_value(input_list, index, value): + input_list[index] = value + return value + + +@pytest.mark.parametrize("n_jobs", [1, 2, 4]) +def test_parallel_return_order_with_return_as_generator_parameter(n_jobs): + # This test inserts values in a list in some expected order + # in sequential computing, and then checks that this order has been + # respected by Parallel output generator. + input_list = [0] * 5 + result = Parallel(n_jobs=n_jobs, return_as="generator", backend="threading")( + delayed(set_list_value)(input_list, i, i) for i in range(5) + ) + + # Ensure that all the tasks are completed before checking the result + result = list(result) + + assert all(v == r for v, r in zip(input_list, result)) + + +def _sqrt_with_delay(e, delay): + if delay: + sleep(30) + return sqrt(e) + + +# Use a private function so it can also be called for the dask backend in +# test_dask.py without triggering the test twice. +# We isolate the test with the dask backend to simplify optional deps +# management and leaking environment variables. +def _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs): + # This test submits 10 tasks, but the second task is super slow. This test + # checks that the 9 other tasks return before the slow task is done, when + # `return_as` parameter is set to `'generator_unordered'` + result = Parallel(n_jobs=n_jobs, return_as="generator_unordered", backend=backend)( + delayed(_sqrt_with_delay)(i**2, (i == 1)) for i in range(10) + ) + + quickly_returned = sorted(next(result) for _ in range(9)) + + expected_quickly_returned = [0] + list(range(2, 10)) + + assert all(v == r for v, r in zip(expected_quickly_returned, quickly_returned)) + + del result + + +@pytest.mark.parametrize("n_jobs", [2, 4]) +# NB: for this test to work, the backend must be allowed to process tasks +# concurrently, so at least two jobs with a non-sequential backend are +# mandatory. +@with_multiprocessing +@parametrize("backend", set(RETURN_GENERATOR_BACKENDS) - {"sequential"}) +def test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs): + _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs) + + +@parametrize("backend", ALL_VALID_BACKENDS) +@parametrize("n_jobs", [1, 2, -2, -1]) +def test_abort_backend(n_jobs, backend): + delays = ["a"] + [10] * 100 + with raises(TypeError): + t_start = time.time() + Parallel(n_jobs=n_jobs, backend=backend)(delayed(time.sleep)(i) for i in delays) + dt = time.time() - t_start + assert dt < 20 + + +def get_large_object(arg): + result = np.ones(int(5 * 1e5), dtype=bool) + result[0] = False + return result + + +# Use a private function so it can also be called for the dask backend in +# test_dask.py without triggering the test twice. +# We isolate the test with the dask backend to simplify optional deps +# management and leaking environment variables. +def _test_deadlock_with_generator(backend, return_as, n_jobs): + # Non-regression test for a race condition in the backends when the pickler + # is delayed by a large object. + with Parallel(n_jobs=n_jobs, backend=backend, return_as=return_as) as parallel: + result = parallel(delayed(get_large_object)(i) for i in range(10)) + next(result) + next(result) + del result + + +@with_numpy +@parametrize("backend", RETURN_GENERATOR_BACKENDS) +@parametrize("return_as", ["generator", "generator_unordered"]) +@parametrize("n_jobs", [1, 2, -2, -1]) +def test_deadlock_with_generator(backend, return_as, n_jobs): + _test_deadlock_with_generator(backend, return_as, n_jobs) + + +@parametrize("backend", RETURN_GENERATOR_BACKENDS) +@parametrize("return_as", ["generator", "generator_unordered"]) +@parametrize("n_jobs", [1, 2, -2, -1]) +def test_multiple_generator_call(backend, return_as, n_jobs): + # Non-regression test that ensures the dispatch of the tasks starts + # immediately when Parallel.__call__ is called. This test relies on the + # assumption that only one generator can be submitted at a time. + with raises(RuntimeError, match="This Parallel instance is already running"): + parallel = Parallel(n_jobs, backend=backend, return_as=return_as) + g = parallel(delayed(sleep)(1) for _ in range(10)) # noqa: F841 + t_start = time.time() + gen2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841 + + # Make sure that the error is raised quickly + assert time.time() - t_start < 2, ( + "The error should be raised immediately when submitting a new task " + "but it took more than 2s." + ) + + del g + + +@parametrize("backend", RETURN_GENERATOR_BACKENDS) +@parametrize("return_as", ["generator", "generator_unordered"]) +@parametrize("n_jobs", [1, 2, -2, -1]) +def test_multiple_generator_call_managed(backend, return_as, n_jobs): + # Non-regression test that ensures the dispatch of the tasks starts + # immediately when Parallel.__call__ is called. This test relies on the + # assumption that only one generator can be submitted at a time. + with Parallel(n_jobs, backend=backend, return_as=return_as) as parallel: + g = parallel(delayed(sleep)(10) for _ in range(10)) # noqa: F841 + t_start = time.time() + with raises(RuntimeError, match="This Parallel instance is already running"): + g2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841 + + # Make sure that the error is raised quickly + assert time.time() - t_start < 2, ( + "The error should be raised immediately when submitting a new task " + "but it took more than 2s." + ) + + del g + + +@parametrize("backend", RETURN_GENERATOR_BACKENDS) +@parametrize("return_as_1", ["generator", "generator_unordered"]) +@parametrize("return_as_2", ["generator", "generator_unordered"]) +@parametrize("n_jobs", [1, 2, -2, -1]) +def test_multiple_generator_call_separated(backend, return_as_1, return_as_2, n_jobs): + # Check that for separated Parallel, both tasks are correctly returned. + g = Parallel(n_jobs, backend=backend, return_as=return_as_1)( + delayed(sqrt)(i**2) for i in range(10) + ) + g2 = Parallel(n_jobs, backend=backend, return_as=return_as_2)( + delayed(sqrt)(i**2) for i in range(10, 20) + ) + + if return_as_1 == "generator_unordered": + g = sorted(g) + + if return_as_2 == "generator_unordered": + g2 = sorted(g2) + + assert all(res == i for res, i in zip(g, range(10))) + assert all(res == i for res, i in zip(g2, range(10, 20))) + + +@parametrize( + "backend, error", + [ + ("loky", True), + ("threading", False), + ("sequential", False), + ], +) +@parametrize("return_as_1", ["generator", "generator_unordered"]) +@parametrize("return_as_2", ["generator", "generator_unordered"]) +def test_multiple_generator_call_separated_gc(backend, return_as_1, return_as_2, error): + if (backend == "loky") and (mp is None): + pytest.skip("Requires multiprocessing") + + # Check that in loky, only one call can be run at a time with + # a single executor. + parallel = Parallel(2, backend=backend, return_as=return_as_1) + g = parallel(delayed(sleep)(10) for i in range(10)) + g_wr = weakref.finalize(g, lambda: print("Generator collected")) + ctx = ( + raises(RuntimeError, match="The executor underlying Parallel") + if error + else nullcontext() + ) + with ctx: + # For loky, this call will raise an error as the gc of the previous + # generator will shutdown the shared executor. + # For the other backends, as the worker pools are not shared between + # the two calls, this should proceed correctly. + t_start = time.time() + g = Parallel(2, backend=backend, return_as=return_as_2)( + delayed(sqrt)(i**2) for i in range(10, 20) + ) + + if return_as_2 == "generator_unordered": + g = sorted(g) + + assert all(res == i for res, i in zip(g, range(10, 20))) + + assert time.time() - t_start < 5 + + # Make sure that the computation are stopped for the gc'ed generator + retry = 0 + while g_wr.alive and retry < 3: + retry += 1 + time.sleep(0.5) + assert time.time() - t_start < 5 + + if parallel._effective_n_jobs() != 1: + # check that the first parallel object is aborting (the final _aborted + # state might be delayed). + assert parallel._aborting + + +@with_numpy +@with_multiprocessing +@parametrize("backend", PROCESS_BACKENDS) +def test_memmapping_leaks(backend, tmpdir): + # Non-regression test for memmapping backends. Ensure that the data + # does not stay too long in memory + tmpdir = tmpdir.strpath + + # Use max_nbytes=1 to force the use of memory-mapping even for small + # arrays + with Parallel(n_jobs=2, max_nbytes=1, backend=backend, temp_folder=tmpdir) as p: + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + # The memmap folder should not be clean in the context scope + assert len(os.listdir(tmpdir)) > 0 + + # Make sure that the shared memory is cleaned at the end when we exit + # the context + for _ in range(100): + if not os.listdir(tmpdir): + break + sleep(0.1) + else: + raise AssertionError("temporary directory of Parallel was not removed") + + # Make sure that the shared memory is cleaned at the end of a call + p = Parallel(n_jobs=2, max_nbytes=1, backend=backend) + p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2) + + for _ in range(100): + if not os.listdir(tmpdir): + break + sleep(0.1) + else: + raise AssertionError("temporary directory of Parallel was not removed") + + +@parametrize( + "backend", ([None, "threading"] if mp is None else [None, "loky", "threading"]) +) +def test_lambda_expression(backend): + # cloudpickle is used to pickle delayed callables + results = Parallel(n_jobs=2, backend=backend)( + delayed(lambda x: x**2)(i) for i in range(10) + ) + assert results == [i**2 for i in range(10)] + + +@with_multiprocessing +@parametrize("backend", PROCESS_BACKENDS) +def test_backend_batch_statistics_reset(backend): + """Test that a parallel backend correctly resets its batch statistics.""" + n_jobs = 2 + n_inputs = 500 + task_time = 2.0 / n_inputs + + p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend) + p(delayed(time.sleep)(task_time) for i in range(n_inputs)) + assert p._backend._effective_batch_size == p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE + assert ( + p._backend._smoothed_batch_duration + == p._backend._DEFAULT_SMOOTHED_BATCH_DURATION + ) + + p(delayed(time.sleep)(task_time) for i in range(n_inputs)) + assert p._backend._effective_batch_size == p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE + assert ( + p._backend._smoothed_batch_duration + == p._backend._DEFAULT_SMOOTHED_BATCH_DURATION + ) + + +@with_multiprocessing +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_hinting_and_constraints(context): + for n_jobs in [1, 2, -1]: + assert type(Parallel(n_jobs=n_jobs)._backend) is get_default_backend_instance() + + p = Parallel(n_jobs=n_jobs, prefer="threads") + assert type(p._backend) is ThreadingBackend + + p = Parallel(n_jobs=n_jobs, prefer="processes") + assert type(p._backend) is LokyBackend + + p = Parallel(n_jobs=n_jobs, require="sharedmem") + assert type(p._backend) is ThreadingBackend + + # Explicit backend selection can override backend hinting although it + # is useless to pass a hint when selecting a backend. + p = Parallel(n_jobs=2, backend="loky", prefer="threads") + assert type(p._backend) is LokyBackend + + with context("loky", n_jobs=2): + # Explicit backend selection by the user with the context manager + # should be respected when combined with backend hints only. + p = Parallel(prefer="threads") + assert type(p._backend) is LokyBackend + assert p.n_jobs == 2 + + with context("loky", n_jobs=2): + # Locally hard-coded n_jobs value is respected. + p = Parallel(n_jobs=3, prefer="threads") + assert type(p._backend) is LokyBackend + assert p.n_jobs == 3 + + with context("loky", n_jobs=2): + # Explicit backend selection by the user with the context manager + # should be ignored when the Parallel call has hard constraints. + # In this case, the default backend that supports shared mem is + # used an the default number of processes is used. + p = Parallel(require="sharedmem") + assert type(p._backend) is ThreadingBackend + assert p.n_jobs == 1 + + with context("loky", n_jobs=2): + p = Parallel(n_jobs=3, require="sharedmem") + assert type(p._backend) is ThreadingBackend + assert p.n_jobs == 3 + + +@parametrize("n_jobs", [1, 2]) +@parametrize("prefer", [None, "processes", "threads"]) +def test_backend_hinting_always_running(n_jobs, prefer): + # Check that the backend hinting never results in an error + # Non-regression test for https://github.com/joblib/joblib/issues/1720 + expected_results = [i**2 for i in range(10)] + + results = Parallel(n_jobs=n_jobs, prefer=prefer)( + delayed(square)(i) for i in range(10) + ) + assert results == expected_results + + with parallel_config(prefer=prefer, n_jobs=n_jobs): + results = Parallel()(delayed(square)(i) for i in range(10)) + assert results == expected_results + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_backend_hinting_and_constraints_with_custom_backends(capsys, context): + # Custom backends can declare that they use threads and have shared memory + # semantics: + class MyCustomThreadingBackend(ParallelBackendBase): + supports_sharedmem = True + use_threads = True + + def apply_async(self): + pass + + def effective_n_jobs(self, n_jobs): + return n_jobs + + with context(MyCustomThreadingBackend()): + p = Parallel(n_jobs=2, prefer="processes") # ignored + assert type(p._backend) is MyCustomThreadingBackend + + p = Parallel(n_jobs=2, require="sharedmem") + assert type(p._backend) is MyCustomThreadingBackend + + class MyCustomProcessingBackend(ParallelBackendBase): + supports_sharedmem = False + use_threads = False + + def apply_async(self): + pass + + def effective_n_jobs(self, n_jobs): + return n_jobs + + with context(MyCustomProcessingBackend()): + p = Parallel(n_jobs=2, prefer="processes") + assert type(p._backend) is MyCustomProcessingBackend + + out, err = capsys.readouterr() + assert out == "" + assert err == "" + + p = Parallel(n_jobs=2, require="sharedmem", verbose=10) + assert type(p._backend) is ThreadingBackend + + out, err = capsys.readouterr() + expected = ( + "Using ThreadingBackend as joblib backend " + "instead of MyCustomProcessingBackend as the latter " + "does not provide shared memory semantics." + ) + assert out.strip() == expected + assert err == "" + + with raises(ValueError): + Parallel(backend=MyCustomProcessingBackend(), require="sharedmem") + + +def test_invalid_backend_hinting_and_constraints(): + with raises(ValueError): + Parallel(prefer="invalid") + + with raises(ValueError): + Parallel(require="invalid") + + with raises(ValueError): + # It is inconsistent to prefer process-based parallelism while + # requiring shared memory semantics. + Parallel(prefer="processes", require="sharedmem") + + if mp is not None: + # It is inconsistent to ask explicitly for a process-based + # parallelism while requiring shared memory semantics. + with raises(ValueError): + Parallel(backend="loky", require="sharedmem") + with raises(ValueError): + Parallel(backend="multiprocessing", require="sharedmem") + + +def _recursive_backend_info(limit=3, **kwargs): + """Perform nested parallel calls and introspect the backend on the way""" + + with Parallel(n_jobs=2) as p: + this_level = [(type(p._backend).__name__, p._backend.nesting_level)] + if limit == 0: + return this_level + results = p( + delayed(_recursive_backend_info)(limit=limit - 1, **kwargs) + for i in range(1) + ) + return this_level + results[0] + + +@with_multiprocessing +@parametrize("backend", ["loky", "threading"]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_nested_parallelism_limit(context, backend): + with context(backend, n_jobs=2): + backend_types_and_levels = _recursive_backend_info() + + top_level_backend_type = backend.title() + "Backend" + expected_types_and_levels = [ + (top_level_backend_type, 0), + ("ThreadingBackend", 1), + ("SequentialBackend", 2), + ("SequentialBackend", 2), + ] + assert backend_types_and_levels == expected_types_and_levels + + +def _recursive_parallel(nesting_limit=None): + """A horrible function that does recursive parallel calls""" + return Parallel()(delayed(_recursive_parallel)() for i in range(2)) + + +@pytest.mark.no_cover +@parametrize("context", [parallel_config, parallel_backend]) +@parametrize("backend", (["threading"] if mp is None else ["loky", "threading"])) +def test_thread_bomb_mitigation(context, backend): + # Test that recursive parallelism raises a recursion rather than + # saturating the operating system resources by creating a unbounded number + # of threads. + with context(backend, n_jobs=2): + with raises(BaseException) as excinfo: + _recursive_parallel() + exc = excinfo.value + if backend == "loky": + # Local import because loky may not be importable for lack of + # multiprocessing + from joblib.externals.loky.process_executor import TerminatedWorkerError # noqa + + if isinstance(exc, (TerminatedWorkerError, PicklingError)): + # The recursion exception can itself cause an error when + # pickling it to be send back to the parent process. In this + # case the worker crashes but the original traceback is still + # printed on stderr. This could be improved but does not seem + # simple to do and this is not critical for users (as long + # as there is no process or thread bomb happening). + pytest.xfail("Loky worker crash when serializing RecursionError") + + assert isinstance(exc, RecursionError) + + +def _run_parallel_sum(): + env_vars = {} + for var in [ + "OMP_NUM_THREADS", + "OPENBLAS_NUM_THREADS", + "MKL_NUM_THREADS", + "VECLIB_MAXIMUM_THREADS", + "NUMEXPR_NUM_THREADS", + "NUMBA_NUM_THREADS", + "ENABLE_IPC", + ]: + env_vars[var] = os.environ.get(var) + return env_vars, parallel_sum(100) + + +@parametrize("backend", ([None, "loky"] if mp is not None else [None])) +@skipif(parallel_sum is None, reason="Need OpenMP helper compiled") +def test_parallel_thread_limit(backend): + results = Parallel(n_jobs=2, backend=backend)( + delayed(_run_parallel_sum)() for _ in range(2) + ) + expected_num_threads = max(cpu_count() // 2, 1) + for worker_env_vars, omp_num_threads in results: + assert omp_num_threads == expected_num_threads + for name, value in worker_env_vars.items(): + if name.endswith("_THREADS"): + assert value == str(expected_num_threads) + else: + assert name == "ENABLE_IPC" + assert value == "1" + + +@parametrize("context", [parallel_config, parallel_backend]) +@skipif(distributed is not None, reason="This test requires dask") +def test_dask_backend_when_dask_not_installed(context): + with raises(ValueError, match="Please install dask"): + context("dask") + + +@parametrize("context", [parallel_config, parallel_backend]) +def test_zero_worker_backend(context): + # joblib.Parallel should reject with an explicit error message parallel + # backends that have no worker. + class ZeroWorkerBackend(ThreadingBackend): + def configure(self, *args, **kwargs): + return 0 + + def apply_async(self, func, callback=None): # pragma: no cover + raise TimeoutError("No worker available") + + def effective_n_jobs(self, n_jobs): # pragma: no cover + return 0 + + expected_msg = "ZeroWorkerBackend has no active worker" + with context(ZeroWorkerBackend()): + with pytest.raises(RuntimeError, match=expected_msg): + Parallel(n_jobs=2)(delayed(id)(i) for i in range(2)) + + +def test_globals_update_at_each_parallel_call(): + # This is a non-regression test related to joblib issues #836 and #833. + # Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global + # variables changes in a parent process between two calls to + # joblib.Parallel would not be propagated into the workers. + global MY_GLOBAL_VARIABLE + MY_GLOBAL_VARIABLE = "original value" + + def check_globals(): + global MY_GLOBAL_VARIABLE + return MY_GLOBAL_VARIABLE + + assert check_globals() == "original value" + + workers_global_variable = Parallel(n_jobs=2)( + delayed(check_globals)() for i in range(2) + ) + assert set(workers_global_variable) == {"original value"} + + # Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets + # propagated into the workers environment + MY_GLOBAL_VARIABLE = "changed value" + assert check_globals() == "changed value" + + workers_global_variable = Parallel(n_jobs=2)( + delayed(check_globals)() for i in range(2) + ) + assert set(workers_global_variable) == {"changed value"} + + +############################################################################## +# Test environment variable in child env, in particular for limiting +# the maximal number of threads in C-library threadpools. +# + + +def _check_numpy_threadpool_limits(): + import numpy as np + + # Let's call BLAS on a Matrix Matrix multiplication with dimensions large + # enough to ensure that the threadpool managed by the underlying BLAS + # implementation is actually used so as to force its initialization. + a = np.random.randn(100, 100) + np.dot(a, a) + threadpoolctl = pytest.importorskip("threadpoolctl") + return threadpoolctl.threadpool_info() + + +def _parent_max_num_threads_for(child_module, parent_info): + for parent_module in parent_info: + if parent_module["filepath"] == child_module["filepath"]: + return parent_module["num_threads"] + raise ValueError( + "An unexpected module was loaded in child:\n{}".format(child_module) + ) + + +def check_child_num_threads(workers_info, parent_info, num_threads): + # Check that the number of threads reported in workers_info is consistent + # with the expectation. We need to be careful to handle the cases where + # the requested number of threads is below max_num_thread for the library. + for child_threadpool_info in workers_info: + for child_module in child_threadpool_info: + parent_max_num_threads = _parent_max_num_threads_for( + child_module, parent_info + ) + expected = {min(num_threads, parent_max_num_threads), num_threads} + assert child_module["num_threads"] in expected + + +@with_numpy +@with_multiprocessing +@parametrize("n_jobs", [2, 4, -2, -1]) +def test_threadpool_limitation_in_child_loky(n_jobs): + # Check that the protection against oversubscription in workers is working + # using threadpoolctl functionalities. + + # Skip this test if numpy is not linked to a BLAS library + parent_info = _check_numpy_threadpool_limits() + if len(parent_info) == 0: + pytest.skip(reason="Need a version of numpy linked to BLAS") + + workers_threadpool_infos = Parallel(backend="loky", n_jobs=n_jobs)( + delayed(_check_numpy_threadpool_limits)() for i in range(2) + ) + + n_jobs = effective_n_jobs(n_jobs) + if n_jobs == 1: + expected_child_num_threads = parent_info[0]["num_threads"] + else: + expected_child_num_threads = max(cpu_count() // n_jobs, 1) + + check_child_num_threads( + workers_threadpool_infos, parent_info, expected_child_num_threads + ) + + +@with_numpy +@with_multiprocessing +@parametrize("inner_max_num_threads", [1, 2, 4, None]) +@parametrize("n_jobs", [2, -1]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_context(context, n_jobs, inner_max_num_threads): + # Check that the protection against oversubscription in workers is working + # using threadpoolctl functionalities. + + # Skip this test if numpy is not linked to a BLAS library + parent_info = _check_numpy_threadpool_limits() + if len(parent_info) == 0: + pytest.skip(reason="Need a version of numpy linked to BLAS") + + with context("loky", inner_max_num_threads=inner_max_num_threads): + workers_threadpool_infos = Parallel(n_jobs=n_jobs)( + delayed(_check_numpy_threadpool_limits)() for i in range(2) + ) + + n_jobs = effective_n_jobs(n_jobs) + if n_jobs == 1: + expected_child_num_threads = parent_info[0]["num_threads"] + elif inner_max_num_threads is None: + expected_child_num_threads = max(cpu_count() // n_jobs, 1) + else: + expected_child_num_threads = inner_max_num_threads + + check_child_num_threads( + workers_threadpool_infos, parent_info, expected_child_num_threads + ) + + +@with_multiprocessing +@parametrize("n_jobs", [2, -1]) +@parametrize("var_name", ["OPENBLAS_NUM_THREADS", "MKL_NUM_THREADS", "OMP_NUM_THREADS"]) +@parametrize("context", [parallel_config, parallel_backend]) +def test_threadpool_limitation_in_child_override(context, n_jobs, var_name): + # Check that environment variables set by the user on the main process + # always have the priority. + + # Skip this test if the process is run sequetially + if effective_n_jobs(n_jobs) == 1: + pytest.skip("Skip test when n_jobs == 1") + + # Clean up the existing executor because we change the environment of the + # parent at runtime and it is not detected in loky intentionally. + get_reusable_executor(reuse=True).shutdown() + + def _get_env(var_name): + return os.environ.get(var_name) + + original_var_value = os.environ.get(var_name) + try: + os.environ[var_name] = "4" + # Skip this test if numpy is not linked to a BLAS library + results = Parallel(n_jobs=n_jobs)(delayed(_get_env)(var_name) for i in range(2)) + assert results == ["4", "4"] + + with context("loky", inner_max_num_threads=1): + results = Parallel(n_jobs=n_jobs)( + delayed(_get_env)(var_name) for i in range(2) + ) + assert results == ["1", "1"] + + finally: + if original_var_value is None: + del os.environ[var_name] + else: + os.environ[var_name] = original_var_value + + +@with_multiprocessing +@parametrize("n_jobs", [2, 4, -1]) +def test_loky_reuse_workers(n_jobs): + # Non-regression test for issue #967 where the workers are not reused when + # calling multiple Parallel loops. + + def parallel_call(n_jobs): + x = range(10) + Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10)) + + # Run a parallel loop and get the workers used for computations + parallel_call(n_jobs) + first_executor = get_reusable_executor(reuse=True) + + # Ensure that the workers are reused for the next calls, as the executor is + # not restarted. + for _ in range(10): + parallel_call(n_jobs) + executor = get_reusable_executor(reuse=True) + assert executor == first_executor + + +def _set_initialized(status): + status[os.getpid()] = "initialized" + + +def _check_status(status, n_jobs, wait_workers=False): + pid = os.getpid() + state = status.get(pid, None) + assert state in ("initialized", "started"), ( + f"worker should have been in initialized state, got {state}" + ) + if not wait_workers: + return + + status[pid] = "started" + # wait up to 30 seconds for the workers to be initialized + deadline = time.time() + 30 + n_started = len([pid for pid, v in status.items() if v == "started"]) + while time.time() < deadline and n_started < n_jobs: + time.sleep(0.1) + n_started = len([pid for pid, v in status.items() if v == "started"]) + + if time.time() >= deadline: + raise TimeoutError("Waited more than 30s to start all the workers") + + return pid + + +@with_multiprocessing +@parametrize("n_jobs", [2, 4]) +@parametrize("backend", PROCESS_BACKENDS) +@parametrize("context", [parallel_config, parallel_backend]) +def test_initializer_context(n_jobs, backend, context): + manager = mp.Manager() + status = manager.dict() + + # pass the initializer to the backend context + with context( + backend=backend, + n_jobs=n_jobs, + initializer=_set_initialized, + initargs=(status,), + ): + # check_status checks that the initializer is correctly call + Parallel()(delayed(_check_status)(status, n_jobs) for i in range(100)) + + +@with_multiprocessing +@parametrize("n_jobs", [2, 4]) +@parametrize("backend", PROCESS_BACKENDS) +def test_initializer_parallel(n_jobs, backend): + manager = mp.Manager() + status = manager.dict() + + # pass the initializer directly to the Parallel call + # check_status checks that the initializer is called in all tasks + Parallel( + backend=backend, + n_jobs=n_jobs, + initializer=_set_initialized, + initargs=(status,), + )(delayed(_check_status)(status, n_jobs) for i in range(100)) + + +@with_multiprocessing +@pytest.mark.parametrize("n_jobs", [2, 4]) +def test_initializer_reused(n_jobs): + # Check that it is possible to pass initializer config via the `Parallel` + # call directly and the worker are reused when the arguments are the same. + n_repetitions = 3 + manager = mp.Manager() + status = manager.dict() + + pids = set() + for i in range(n_repetitions): + results = Parallel( + backend="loky", + n_jobs=n_jobs, + initializer=_set_initialized, + initargs=(status,), + )( + delayed(_check_status)(status, n_jobs, wait_workers=True) + for i in range(n_jobs) + ) + pids = pids.union(set(results)) + assert len(pids) == n_jobs, ( + "The workers should be reused when the initializer is the same" + ) + + +@with_multiprocessing +@pytest.mark.parametrize("n_jobs", [2, 4]) +def test_initializer_not_reused(n_jobs): + # Check that when changing the initializer arguments, each parallel call uses its + # own initializer args, independently of the previous calls, hence the loky workers + # are not reused. + n_repetitions = 3 + manager = mp.Manager() + + pids = set() + for i in range(n_repetitions): + status = manager.dict() + results = Parallel( + backend="loky", + n_jobs=n_jobs, + initializer=_set_initialized, + initargs=(status,), + )( + delayed(_check_status)(status, n_jobs, wait_workers=True) + for i in range(n_jobs) + ) + pids = pids.union(set(results)) + assert len(pids) == n_repetitions * n_jobs, ( + "The workers should not be reused when the initializer arguments change" + ) diff --git a/lib/python3.10/site-packages/joblib/test/test_store_backends.py b/lib/python3.10/site-packages/joblib/test/test_store_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..d480455b50aa41113096ecb5455a69d0d0fc612e --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_store_backends.py @@ -0,0 +1,94 @@ +try: + # Python 2.7: use the C pickle to speed up + # test_concurrency_safe_write which pickles big python objects + import cPickle as cpickle +except ImportError: + import pickle as cpickle +import functools +import time +from pickle import PicklingError + +import pytest + +from joblib import Parallel, delayed +from joblib._store_backends import ( + CacheWarning, + FileSystemStoreBackend, + concurrency_safe_write, +) +from joblib.backports import concurrency_safe_rename +from joblib.test.common import with_multiprocessing +from joblib.testing import parametrize, timeout + + +def write_func(output, filename): + with open(filename, "wb") as f: + cpickle.dump(output, f) + + +def load_func(expected, filename): + for i in range(10): + try: + with open(filename, "rb") as f: + reloaded = cpickle.load(f) + break + except (OSError, IOError): + # On Windows you can have WindowsError ([Error 5] Access + # is denied or [Error 13] Permission denied) when reading the file, + # probably because a writer process has a lock on the file + time.sleep(0.1) + else: + raise + assert expected == reloaded + + +def concurrency_safe_write_rename(to_write, filename, write_func): + temporary_filename = concurrency_safe_write(to_write, filename, write_func) + concurrency_safe_rename(temporary_filename, filename) + + +@timeout(0) # No timeout as this test can be long +@with_multiprocessing +@parametrize("backend", ["multiprocessing", "loky", "threading"]) +def test_concurrency_safe_write(tmpdir, backend): + # Add one item to cache + filename = tmpdir.join("test.pkl").strpath + + obj = {str(i): i for i in range(int(1e5))} + funcs = [ + functools.partial(concurrency_safe_write_rename, write_func=write_func) + if i % 3 != 2 + else load_func + for i in range(12) + ] + Parallel(n_jobs=2, backend=backend)(delayed(func)(obj, filename) for func in funcs) + + +def test_warning_on_dump_failure(tmpdir): + # Check that a warning is raised when the dump fails for any reason but + # a PicklingError. + class UnpicklableObject(object): + def __reduce__(self): + raise RuntimeError("some exception") + + backend = FileSystemStoreBackend() + backend.location = tmpdir.join("test_warning_on_pickling_error").strpath + backend.compress = None + + with pytest.warns(CacheWarning, match="some exception"): + backend.dump_item("testpath", UnpicklableObject()) + + +def test_warning_on_pickling_error(tmpdir): + # This is separate from test_warning_on_dump_failure because in the + # future we will turn this into an exception. + class UnpicklableObject(object): + def __reduce__(self): + raise PicklingError("not picklable") + + backend = FileSystemStoreBackend() + backend.location = tmpdir.join("test_warning_on_pickling_error").strpath + backend.compress = None + + with pytest.warns(FutureWarning, match="not picklable"): + backend.dump_item("testpath", UnpicklableObject()) diff --git a/lib/python3.10/site-packages/joblib/test/test_testing.py b/lib/python3.10/site-packages/joblib/test/test_testing.py new file mode 100644 index 0000000000000000000000000000000000000000..f9c8d93d1cc148f7e4cb0a804a98f811e5d1ecf1 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_testing.py @@ -0,0 +1,87 @@ +import re +import sys + +from joblib.testing import check_subprocess_call, raises + + +def test_check_subprocess_call(): + code = "\n".join( + ["result = 1 + 2 * 3", "print(result)", "my_list = [1, 2, 3]", "print(my_list)"] + ) + + check_subprocess_call([sys.executable, "-c", code]) + + # Now checking stdout with a regex + check_subprocess_call( + [sys.executable, "-c", code], + # Regex needed for platform-specific line endings + stdout_regex=r"7\s{1,2}\[1, 2, 3\]", + ) + + +def test_check_subprocess_call_non_matching_regex(): + code = "42" + non_matching_pattern = "_no_way_this_matches_anything_" + + with raises(ValueError) as excinfo: + check_subprocess_call( + [sys.executable, "-c", code], stdout_regex=non_matching_pattern + ) + excinfo.match("Unexpected stdout.+{}".format(non_matching_pattern)) + + +def test_check_subprocess_call_wrong_command(): + wrong_command = "_a_command_that_does_not_exist_" + with raises(OSError): + check_subprocess_call([wrong_command]) + + +def test_check_subprocess_call_non_zero_return_code(): + code_with_non_zero_exit = "\n".join( + [ + "import sys", + 'print("writing on stdout")', + 'sys.stderr.write("writing on stderr")', + "sys.exit(123)", + ] + ) + + pattern = re.compile( + "Non-zero return code: 123.+" + "Stdout:\nwriting on stdout.+" + "Stderr:\nwriting on stderr", + re.DOTALL, + ) + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, "-c", code_with_non_zero_exit]) + excinfo.match(pattern) + + +def test_check_subprocess_call_timeout(): + code_timing_out = "\n".join( + [ + "import time", + "import sys", + 'print("before sleep on stdout")', + "sys.stdout.flush()", + 'sys.stderr.write("before sleep on stderr")', + "sys.stderr.flush()", + # We need to sleep for at least 2 * timeout seconds in case the SIGKILL + # is triggered. + "time.sleep(10)", + 'print("process should have be killed before")', + "sys.stdout.flush()", + ] + ) + + pattern = re.compile( + "Non-zero return code:.+" + "Stdout:\nbefore sleep on stdout\\s+" + "Stderr:\nbefore sleep on stderr", + re.DOTALL, + ) + + with raises(ValueError) as excinfo: + check_subprocess_call([sys.executable, "-c", code_timing_out], timeout=1) + excinfo.match(pattern) diff --git a/lib/python3.10/site-packages/joblib/test/test_utils.py b/lib/python3.10/site-packages/joblib/test/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..37ac47c344a2f80f0da43c8a0b91f6c4870923fc --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/test_utils.py @@ -0,0 +1,25 @@ +import pytest + +from joblib._utils import eval_expr + + +@pytest.mark.parametrize( + "expr", + ["exec('import os')", "print(1)", "import os", "1+1; import os", "1^1"], +) +def test_eval_expr_invalid(expr): + with pytest.raises(ValueError, match="is not a valid or supported arithmetic"): + eval_expr(expr) + + +@pytest.mark.parametrize( + "expr, result", + [ + ("2*6", 12), + ("2**6", 64), + ("1 + 2*3**(4) / (6 + -7)", -161.0), + ("(20 // 3) % 5", 1), + ], +) +def test_eval_expr_valid(expr, result): + assert eval_expr(expr) == result diff --git a/lib/python3.10/site-packages/joblib/test/testutils.py b/lib/python3.10/site-packages/joblib/test/testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..765b9a157c06b428f35e57666d4df962910d0de3 --- /dev/null +++ b/lib/python3.10/site-packages/joblib/test/testutils.py @@ -0,0 +1,9 @@ +def return_slice_of_data(arr, start_idx, end_idx): + return arr[start_idx:end_idx] + + +def print_filename_and_raise(arr): + from joblib._memmapping_reducer import _get_backing_memmap + + print(_get_backing_memmap(arr).filename) + raise ValueError diff --git a/lib/python3.10/site-packages/jsonschema-4.25.0.dist-info/licenses/COPYING b/lib/python3.10/site-packages/jsonschema-4.25.0.dist-info/licenses/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..af9cfbdb134f42e5205ecbad597421d778826481 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema-4.25.0.dist-info/licenses/COPYING @@ -0,0 +1,19 @@ +Copyright (c) 2013 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45fa7681b8204a5084681353aa60c90b45109058 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/__main__.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e21a1526cd38b34535d24eb0fc72d009c212583e Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/__main__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/_format.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..923eb47ecdfb1e9967864edd2c9321de51ce8eb8 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/_format.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/_keywords.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/_keywords.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6925d37122ec55922559c355177af9dab338b855 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/_keywords.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/_legacy_keywords.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/_legacy_keywords.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4332ac53ccc48e261b20e35bab00b9d30a27c573 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/_legacy_keywords.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/_types.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..078b05e968caec6f6f49b7989089b35feddbc2a1 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/_types.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/_typing.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..331e1d846b068e948ed3081ddda0f5aec93ec191 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/_typing.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/_utils.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..320f161def7601114418b458d1697d0ce89ac5ad Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/_utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/cli.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1532b9da6cedeccb651a2d1e90906165076b605e Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/cli.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/exceptions.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f56f5d1a1c15ddeb3737630c774c52e81050500e Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/exceptions.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/protocols.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/protocols.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fd46d9e1b26e6b01e800ac843089e31da49a894 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/protocols.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/__pycache__/validators.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/__pycache__/validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57c0fc8051ed3a9fb2074c4c0a48895ee37942a6 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/__pycache__/validators.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__init__.py b/lib/python3.10/site-packages/jsonschema/benchmarks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e3dcc689930da95198c251ce806637d6413c8b1e --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/__init__.py @@ -0,0 +1,5 @@ +""" +Benchmarks for validation. + +This package is *not* public API. +""" diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d3a7a07fe129f186ac7d083576d8699423766e0 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..410a129995da9650da52586424a9bfb6791dabcf Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b71ef7ebac5a4a104746d54566f5002d3d9ea70 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d32a9e207941f185638fe9e427b30d71192ebf3 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..378479dbbdd1631985a6697bc595e2dd502ba935 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/nested_schemas.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/nested_schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5207d8906f82b41e0e7f2db4ed5c40f8ef07b0b0 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/nested_schemas.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20a17db2a7f75164c3e8ec578f0fe40cc11b96f1 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a23b47d46c817069236511f254748cd0efae9c57 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df385edc79e4ddfe157fe2f5f2fa633ee2923e02 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1817ee366b78594885148aa22a57664b6e49d284 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/validator_creation.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/validator_creation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec52a094afddf3da5b06426e601b94bfd3dc7014 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/validator_creation.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/const_vs_enum.py b/lib/python3.10/site-packages/jsonschema/benchmarks/const_vs_enum.py new file mode 100644 index 0000000000000000000000000000000000000000..c6fecd10f6d8b845c675be9c19e9b504b08d30b9 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/const_vs_enum.py @@ -0,0 +1,30 @@ +""" +A benchmark for comparing equivalent validation of `const` and `enum`. +""" + +from pyperf import Runner + +from jsonschema import Draft202012Validator + +value = [37] * 100 +const_schema = {"const": list(value)} +enum_schema = {"enum": [list(value)]} + +valid = list(value) +invalid = [*valid, 73] + +const = Draft202012Validator(const_schema) +enum = Draft202012Validator(enum_schema) + +assert const.is_valid(valid) +assert enum.is_valid(valid) +assert not const.is_valid(invalid) +assert not enum.is_valid(invalid) + + +if __name__ == "__main__": + runner = Runner() + runner.bench_func("const valid", lambda: const.is_valid(valid)) + runner.bench_func("const invalid", lambda: const.is_valid(invalid)) + runner.bench_func("enum valid", lambda: enum.is_valid(valid)) + runner.bench_func("enum invalid", lambda: enum.is_valid(invalid)) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/contains.py b/lib/python3.10/site-packages/jsonschema/benchmarks/contains.py new file mode 100644 index 0000000000000000000000000000000000000000..739cd044cceb807b4029dca9447e954214a24809 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/contains.py @@ -0,0 +1,28 @@ +""" +A benchmark for validation of the `contains` keyword. +""" + +from pyperf import Runner + +from jsonschema import Draft202012Validator + +schema = { + "type": "array", + "contains": {"const": 37}, +} +validator = Draft202012Validator(schema) + +size = 1000 +beginning = [37] + [0] * (size - 1) +middle = [0] * (size // 2) + [37] + [0] * (size // 2) +end = [0] * (size - 1) + [37] +invalid = [0] * size + + +if __name__ == "__main__": + runner = Runner() + runner.bench_func("baseline", lambda: validator.is_valid([])) + runner.bench_func("beginning", lambda: validator.is_valid(beginning)) + runner.bench_func("middle", lambda: validator.is_valid(middle)) + runner.bench_func("end", lambda: validator.is_valid(end)) + runner.bench_func("invalid", lambda: validator.is_valid(invalid)) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/issue232.py b/lib/python3.10/site-packages/jsonschema/benchmarks/issue232.py new file mode 100644 index 0000000000000000000000000000000000000000..efd07154822e4b0609900482eb26636fc3c100eb --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/issue232.py @@ -0,0 +1,25 @@ +""" +A performance benchmark using the example from issue #232. + +See https://github.com/python-jsonschema/jsonschema/pull/232. +""" +from pathlib import Path + +from pyperf import Runner +from referencing import Registry + +from jsonschema.tests._suite import Version +import jsonschema + +issue232 = Version( + path=Path(__file__).parent / "issue232", + remotes=Registry(), + name="issue232", +) + + +if __name__ == "__main__": + issue232.benchmark( + runner=Runner(), + Validator=jsonschema.Draft4Validator, + ) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/issue232/issue.json b/lib/python3.10/site-packages/jsonschema/benchmarks/issue232/issue.json new file mode 100644 index 0000000000000000000000000000000000000000..804c340845fa0694e8db1083cfe7c290c04f36b1 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/issue232/issue.json @@ -0,0 +1,2653 @@ +[ + { + "description": "Petstore", + "schema": { + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } + }, + "tests": [ + { + "description": "Example petsore", + "data": { + "swagger": "2.0", + "info": { + "description": "This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters.", + "version": "1.0.0", + "title": "Swagger Petstore", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "email": "apiteam@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + } + }, + "host": "petstore.swagger.io", + "basePath": "/v2", + "tags": [ + { + "name": "pet", + "description": "Everything about your Pets", + "externalDocs": { + "description": "Find out more", + "url": "http://swagger.io" + } + }, + { + "name": "store", + "description": "Access to Petstore orders" + }, + { + "name": "user", + "description": "Operations about user", + "externalDocs": { + "description": "Find out more about our store", + "url": "http://swagger.io" + } + } + ], + "schemes": [ + "http" + ], + "paths": { + "/pet": { + "post": { + "tags": [ + "pet" + ], + "summary": "Add a new pet to the store", + "description": "", + "operationId": "addPet", + "consumes": [ + "application/json", + "application/xml" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Pet object that needs to be added to the store", + "required": true, + "schema": { + "$ref": "#/definitions/Pet" + } + } + ], + "responses": { + "405": { + "description": "Invalid input" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + }, + "put": { + "tags": [ + "pet" + ], + "summary": "Update an existing pet", + "description": "", + "operationId": "updatePet", + "consumes": [ + "application/json", + "application/xml" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Pet object that needs to be added to the store", + "required": true, + "schema": { + "$ref": "#/definitions/Pet" + } + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + }, + "405": { + "description": "Validation exception" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/findByStatus": { + "get": { + "tags": [ + "pet" + ], + "summary": "Finds Pets by status", + "description": "Multiple status values can be provided with comma separated strings", + "operationId": "findPetsByStatus", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "status", + "in": "query", + "description": "Status values that need to be considered for filter", + "required": true, + "type": "array", + "items": { + "type": "string", + "enum": [ + "available", + "pending", + "sold" + ], + "default": "available" + }, + "collectionFormat": "multi" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "400": { + "description": "Invalid status value" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/findByTags": { + "get": { + "tags": [ + "pet" + ], + "summary": "Finds Pets by tags", + "description": "Muliple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing.", + "operationId": "findPetsByTags", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "tags", + "in": "query", + "description": "Tags to filter by", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "400": { + "description": "Invalid tag value" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ], + "deprecated": true + } + }, + "/pet/{petId}": { + "get": { + "tags": [ + "pet" + ], + "summary": "Find pet by ID", + "description": "Returns a single pet", + "operationId": "getPetById", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet to return", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + } + }, + "security": [ + { + "api_key": [] + } + ] + }, + "post": { + "tags": [ + "pet" + ], + "summary": "Updates a pet in the store with form data", + "description": "", + "operationId": "updatePetWithForm", + "consumes": [ + "application/x-www-form-urlencoded" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be updated", + "required": true, + "type": "integer", + "format": "int64" + }, + { + "name": "name", + "in": "formData", + "description": "Updated name of the pet", + "required": false, + "type": "string" + }, + { + "name": "status", + "in": "formData", + "description": "Updated status of the pet", + "required": false, + "type": "string" + } + ], + "responses": { + "405": { + "description": "Invalid input" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + }, + "delete": { + "tags": [ + "pet" + ], + "summary": "Deletes a pet", + "description": "", + "operationId": "deletePet", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "api_key", + "in": "header", + "required": false, + "type": "string" + }, + { + "name": "petId", + "in": "path", + "description": "Pet id to delete", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/{petId}/uploadImage": { + "post": { + "tags": [ + "pet" + ], + "summary": "uploads an image", + "description": "", + "operationId": "uploadFile", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet to update", + "required": true, + "type": "integer", + "format": "int64" + }, + { + "name": "additionalMetadata", + "in": "formData", + "description": "Additional data to pass to server", + "required": false, + "type": "string" + }, + { + "name": "file", + "in": "formData", + "description": "file to upload", + "required": false, + "type": "file" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/ApiResponse" + } + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/store/inventory": { + "get": { + "tags": [ + "store" + ], + "summary": "Returns pet inventories by status", + "description": "Returns a map of status codes to quantities", + "operationId": "getInventory", + "produces": [ + "application/json" + ], + "parameters": [], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "int32" + } + } + } + }, + "security": [ + { + "api_key": [] + } + ] + } + }, + "/store/order": { + "post": { + "tags": [ + "store" + ], + "summary": "Place an order for a pet", + "description": "", + "operationId": "placeOrder", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "order placed for purchasing the pet", + "required": true, + "schema": { + "$ref": "#/definitions/Order" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Order" + } + }, + "400": { + "description": "Invalid Order" + } + } + } + }, + "/store/order/{orderId}": { + "get": { + "tags": [ + "store" + ], + "summary": "Find purchase order by ID", + "description": "For valid response try integer IDs with value >= 1 and <= 10. Other values will generated exceptions", + "operationId": "getOrderById", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "orderId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "integer", + "maximum": 10.0, + "minimum": 1.0, + "format": "int64" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Order" + } + }, + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Order not found" + } + } + }, + "delete": { + "tags": [ + "store" + ], + "summary": "Delete purchase order by ID", + "description": "For valid response try integer IDs with positive integer value. Negative or non-integer values will generate API errors", + "operationId": "deleteOrder", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "orderId", + "in": "path", + "description": "ID of the order that needs to be deleted", + "required": true, + "type": "integer", + "minimum": 1.0, + "format": "int64" + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Order not found" + } + } + } + }, + "/user": { + "post": { + "tags": [ + "user" + ], + "summary": "Create user", + "description": "This can only be done by the logged in user.", + "operationId": "createUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Created user object", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/createWithArray": { + "post": { + "tags": [ + "user" + ], + "summary": "Creates list of users with given input array", + "description": "", + "operationId": "createUsersWithArrayInput", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "List of user object", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/User" + } + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/createWithList": { + "post": { + "tags": [ + "user" + ], + "summary": "Creates list of users with given input array", + "description": "", + "operationId": "createUsersWithListInput", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "List of user object", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/User" + } + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/login": { + "get": { + "tags": [ + "user" + ], + "summary": "Logs user into the system", + "description": "", + "operationId": "loginUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "query", + "description": "The user name for login", + "required": true, + "type": "string" + }, + { + "name": "password", + "in": "query", + "description": "The password for login in clear text", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "string" + }, + "headers": { + "X-Rate-Limit": { + "type": "integer", + "format": "int32", + "description": "calls per hour allowed by the user" + }, + "X-Expires-After": { + "type": "string", + "format": "date-time", + "description": "date in UTC when token expires" + } + } + }, + "400": { + "description": "Invalid username/password supplied" + } + } + } + }, + "/user/logout": { + "get": { + "tags": [ + "user" + ], + "summary": "Logs out current logged in user session", + "description": "", + "operationId": "logoutUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/{username}": { + "get": { + "tags": [ + "user" + ], + "summary": "Get user by user name", + "description": "", + "operationId": "getUserByName", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "The name that needs to be fetched. Use user1 for testing. ", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/User" + } + }, + "400": { + "description": "Invalid username supplied" + }, + "404": { + "description": "User not found" + } + } + }, + "put": { + "tags": [ + "user" + ], + "summary": "Updated user", + "description": "This can only be done by the logged in user.", + "operationId": "updateUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "name that need to be updated", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "description": "Updated user object", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "400": { + "description": "Invalid user supplied" + }, + "404": { + "description": "User not found" + } + } + }, + "delete": { + "tags": [ + "user" + ], + "summary": "Delete user", + "description": "This can only be done by the logged in user.", + "operationId": "deleteUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "The name that needs to be deleted", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "Invalid username supplied" + }, + "404": { + "description": "User not found" + } + } + } + } + }, + "securityDefinitions": { + "petstore_auth": { + "type": "oauth2", + "authorizationUrl": "http://petstore.swagger.io/oauth/dialog", + "flow": "implicit", + "scopes": { + "write:pets": "modify pets in your account", + "read:pets": "read your pets" + } + }, + "api_key": { + "type": "apiKey", + "name": "api_key", + "in": "header" + } + }, + "definitions": { + "Order": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "petId": { + "type": "integer", + "format": "int64" + }, + "quantity": { + "type": "integer", + "format": "int32" + }, + "shipDate": { + "type": "string", + "format": "date-time" + }, + "status": { + "type": "string", + "description": "Order Status", + "enum": [ + "placed", + "approved", + "delivered" + ] + }, + "complete": { + "type": "boolean", + "default": false + } + }, + "xml": { + "name": "Order" + } + }, + "Category": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + }, + "xml": { + "name": "Category" + } + }, + "User": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "username": { + "type": "string" + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + }, + "email": { + "type": "string" + }, + "password": { + "type": "string" + }, + "phone": { + "type": "string" + }, + "userStatus": { + "type": "integer", + "format": "int32", + "description": "User Status" + } + }, + "xml": { + "name": "User" + } + }, + "Tag": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + }, + "xml": { + "name": "Tag" + } + }, + "Pet": { + "type": "object", + "required": [ + "name", + "photoUrls" + ], + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "category": { + "$ref": "#/definitions/Category" + }, + "name": { + "type": "string", + "example": "doggie" + }, + "photoUrls": { + "type": "array", + "xml": { + "name": "photoUrl", + "wrapped": true + }, + "items": { + "type": "string" + } + }, + "tags": { + "type": "array", + "xml": { + "name": "tag", + "wrapped": true + }, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "status": { + "type": "string", + "description": "pet status in the store", + "enum": [ + "available", + "pending", + "sold" + ] + } + }, + "xml": { + "name": "Pet" + } + }, + "ApiResponse": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string" + }, + "message": { + "type": "string" + } + } + } + }, + "externalDocs": { + "description": "Find out more about Swagger", + "url": "http://swagger.io" + } + }, + "valid": true + } + ] + } +] diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/json_schema_test_suite.py b/lib/python3.10/site-packages/jsonschema/benchmarks/json_schema_test_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..905fb6a3b88faf56e3288f7eb5053172f97abe8b --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/json_schema_test_suite.py @@ -0,0 +1,12 @@ +""" +A performance benchmark using the official test suite. + +This benchmarks jsonschema using every valid example in the +JSON-Schema-Test-Suite. It will take some time to complete. +""" +from pyperf import Runner + +from jsonschema.tests._suite import Suite + +if __name__ == "__main__": + Suite().benchmark(runner=Runner()) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/nested_schemas.py b/lib/python3.10/site-packages/jsonschema/benchmarks/nested_schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..b025c47cfd6736b0ea3d36856c1e2c8de35dde79 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/nested_schemas.py @@ -0,0 +1,56 @@ +""" +Validating highly nested schemas shouldn't cause exponential time blowups. + +See https://github.com/python-jsonschema/jsonschema/issues/1097. +""" +from itertools import cycle + +from jsonschema.validators import validator_for + +metaschemaish = { + "$id": "https://example.com/draft/2020-12/schema/strict", + "$schema": "https://json-schema.org/draft/2020-12/schema", + + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": True, + "https://json-schema.org/draft/2020-12/vocab/applicator": True, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": True, + "https://json-schema.org/draft/2020-12/vocab/validation": True, + "https://json-schema.org/draft/2020-12/vocab/meta-data": True, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": True, + "https://json-schema.org/draft/2020-12/vocab/content": True, + }, + "$dynamicAnchor": "meta", + + "$ref": "https://json-schema.org/draft/2020-12/schema", + "unevaluatedProperties": False, +} + + +def nested_schema(levels): + """ + Produce a schema which validates deeply nested objects and arrays. + """ + + names = cycle(["foo", "bar", "baz", "quux", "spam", "eggs"]) + schema = {"type": "object", "properties": {"ham": {"type": "string"}}} + for _, name in zip(range(levels - 1), names): + schema = {"type": "object", "properties": {name: schema}} + return schema + + +validator = validator_for(metaschemaish)(metaschemaish) + +if __name__ == "__main__": + from pyperf import Runner + runner = Runner() + + not_nested = nested_schema(levels=1) + runner.bench_func("not nested", lambda: validator.is_valid(not_nested)) + + for levels in range(1, 11, 3): + schema = nested_schema(levels=levels) + runner.bench_func( + f"nested * {levels}", + lambda schema=schema: validator.is_valid(schema), + ) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/subcomponents.py b/lib/python3.10/site-packages/jsonschema/benchmarks/subcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..6d78c7be6c88d177b89dfc80e52acae7820156a5 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/subcomponents.py @@ -0,0 +1,42 @@ +""" +A benchmark which tries to compare the possible slow subparts of validation. +""" +from referencing import Registry +from referencing.jsonschema import DRAFT202012 +from rpds import HashTrieMap, HashTrieSet + +from jsonschema import Draft202012Validator + +schema = { + "type": "array", + "minLength": 1, + "maxLength": 1, + "items": {"type": "integer"}, +} + +hmap = HashTrieMap() +hset = HashTrieSet() + +registry = Registry() + +v = Draft202012Validator(schema) + + +def registry_data_structures(): + return hmap.insert("foo", "bar"), hset.insert("foo") + + +def registry_add(): + resource = DRAFT202012.create_resource(schema) + return registry.with_resource(uri="urn:example", resource=resource) + + +if __name__ == "__main__": + from pyperf import Runner + runner = Runner() + + runner.bench_func("HashMap/HashSet insertion", registry_data_structures) + runner.bench_func("Registry insertion", registry_add) + runner.bench_func("Success", lambda: v.is_valid([1])) + runner.bench_func("Failure", lambda: v.is_valid(["foo"])) + runner.bench_func("Metaschema validation", lambda: v.check_schema(schema)) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/unused_registry.py b/lib/python3.10/site-packages/jsonschema/benchmarks/unused_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..7b272c235625378b231d1c113bcb8ef45851cbc7 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/unused_registry.py @@ -0,0 +1,35 @@ +""" +An unused schema registry should not cause slower validation. + +"Unused" here means one where no reference resolution is occurring anyhow. + +See https://github.com/python-jsonschema/jsonschema/issues/1088. +""" +from pyperf import Runner +from referencing import Registry +from referencing.jsonschema import DRAFT201909 + +from jsonschema import Draft201909Validator + +registry = Registry().with_resource( + "urn:example:foo", + DRAFT201909.create_resource({}), +) + +schema = {"$ref": "https://json-schema.org/draft/2019-09/schema"} +instance = {"maxLength": 4} + +no_registry = Draft201909Validator(schema) +with_useless_registry = Draft201909Validator(schema, registry=registry) + +if __name__ == "__main__": + runner = Runner() + + runner.bench_func( + "no registry", + lambda: no_registry.is_valid(instance), + ) + runner.bench_func( + "useless registry", + lambda: with_useless_registry.is_valid(instance), + ) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/useless_applicator_schemas.py b/lib/python3.10/site-packages/jsonschema/benchmarks/useless_applicator_schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..f3229c0b8b9341e80ed087297efe31b04aa77fd7 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/useless_applicator_schemas.py @@ -0,0 +1,106 @@ + +""" +A benchmark for validation of applicators containing lots of useless schemas. + +Signals a small possible optimization to remove all such schemas ahead of time. +""" + +from pyperf import Runner + +from jsonschema import Draft202012Validator as Validator + +NUM_USELESS = 100000 + +subschema = {"const": 37} + +valid = 37 +invalid = 12 + +baseline = Validator(subschema) + + +# These should be indistinguishable from just `subschema` +by_name = { + "single subschema": { + "anyOf": Validator({"anyOf": [subschema]}), + "allOf": Validator({"allOf": [subschema]}), + "oneOf": Validator({"oneOf": [subschema]}), + }, + "redundant subschemas": { + "anyOf": Validator({"anyOf": [subschema] * NUM_USELESS}), + "allOf": Validator({"allOf": [subschema] * NUM_USELESS}), + }, + "useless successful subschemas (beginning)": { + "anyOf": Validator({"anyOf": [subschema, *[True] * NUM_USELESS]}), + "allOf": Validator({"allOf": [subschema, *[True] * NUM_USELESS]}), + }, + "useless successful subschemas (middle)": { + "anyOf": Validator( + { + "anyOf": [ + *[True] * (NUM_USELESS // 2), + subschema, + *[True] * (NUM_USELESS // 2), + ], + }, + ), + "allOf": Validator( + { + "allOf": [ + *[True] * (NUM_USELESS // 2), + subschema, + *[True] * (NUM_USELESS // 2), + ], + }, + ), + }, + "useless successful subschemas (end)": { + "anyOf": Validator({"anyOf": [*[True] * NUM_USELESS, subschema]}), + "allOf": Validator({"allOf": [*[True] * NUM_USELESS, subschema]}), + }, + "useless failing subschemas (beginning)": { + "anyOf": Validator({"anyOf": [subschema, *[False] * NUM_USELESS]}), + "oneOf": Validator({"oneOf": [subschema, *[False] * NUM_USELESS]}), + }, + "useless failing subschemas (middle)": { + "anyOf": Validator( + { + "anyOf": [ + *[False] * (NUM_USELESS // 2), + subschema, + *[False] * (NUM_USELESS // 2), + ], + }, + ), + "oneOf": Validator( + { + "oneOf": [ + *[False] * (NUM_USELESS // 2), + subschema, + *[False] * (NUM_USELESS // 2), + ], + }, + ), + }, + "useless failing subschemas (end)": { + "anyOf": Validator({"anyOf": [*[False] * NUM_USELESS, subschema]}), + "oneOf": Validator({"oneOf": [*[False] * NUM_USELESS, subschema]}), + }, +} + +if __name__ == "__main__": + runner = Runner() + + runner.bench_func("baseline valid", lambda: baseline.is_valid(valid)) + runner.bench_func("baseline invalid", lambda: baseline.is_valid(invalid)) + + for group, applicators in by_name.items(): + for applicator, validator in applicators.items(): + runner.bench_func( + f"{group}: {applicator} valid", + lambda validator=validator: validator.is_valid(valid), + ) + runner.bench_func( + f"{group}: {applicator} invalid", + lambda validator=validator: validator.is_valid(invalid), + ) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/useless_keywords.py b/lib/python3.10/site-packages/jsonschema/benchmarks/useless_keywords.py new file mode 100644 index 0000000000000000000000000000000000000000..50f435989dd14d34d804508067e91e5fae4a275c --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/useless_keywords.py @@ -0,0 +1,32 @@ +""" +A benchmark for validation of schemas containing lots of useless keywords. + +Checks we filter them out once, ahead of time. +""" + +from pyperf import Runner + +from jsonschema import Draft202012Validator + +NUM_USELESS = 100000 +schema = dict( + [ + ("not", {"const": 42}), + *((str(i), i) for i in range(NUM_USELESS)), + ("type", "integer"), + *((str(i), i) for i in range(NUM_USELESS, NUM_USELESS)), + ("minimum", 37), + ], +) +validator = Draft202012Validator(schema) + +valid = 3737 +invalid = 12 + + +if __name__ == "__main__": + runner = Runner() + runner.bench_func("beginning of schema", lambda: validator.is_valid(42)) + runner.bench_func("middle of schema", lambda: validator.is_valid("foo")) + runner.bench_func("end of schema", lambda: validator.is_valid(12)) + runner.bench_func("valid", lambda: validator.is_valid(3737)) diff --git a/lib/python3.10/site-packages/jsonschema/benchmarks/validator_creation.py b/lib/python3.10/site-packages/jsonschema/benchmarks/validator_creation.py new file mode 100644 index 0000000000000000000000000000000000000000..4baeb3a31641a027496732a6f10e200346551209 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/benchmarks/validator_creation.py @@ -0,0 +1,14 @@ +from pyperf import Runner + +from jsonschema import Draft202012Validator + +schema = { + "type": "array", + "minLength": 1, + "maxLength": 1, + "items": {"type": "integer"}, +} + + +if __name__ == "__main__": + Runner().bench_func("validator creation", Draft202012Validator, schema) diff --git a/lib/python3.10/site-packages/jsonschema/tests/__init__.py b/lib/python3.10/site-packages/jsonschema/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beb0e6241d54e491f9d932d1e5871240bd51cef9 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/_suite.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/_suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30b0b152f7e4035baae49ebd26309c568f47bd4d Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/_suite.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/fuzz_validate.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/fuzz_validate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4415fcc1a47541b29bd3d36fd8f45385e3ea9fa Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/fuzz_validate.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_cli.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60b83db16189cff161d151e9c68df6fb3f67977a Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_cli.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_deprecations.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_deprecations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09e76cf8c841af424f847c71d4f74ae8ba3ff880 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_deprecations.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_exceptions.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7a11c0ae8650887ba5d68e5768cda40cb962f4c Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_exceptions.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_format.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f6d508dfba3e43b0807182ea73c61d697af9303 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_format.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8157f865d20d9e9c1631df57db71389566da6487 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_jsonschema_test_suite.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_types.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06191e8b6a8b0eec78166f4b08e6eb0da05e3385 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_types.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_utils.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a198139de7bca36e7e82ccac4d884dad62d175c0 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_validators.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..670438b9f11f2ca0fcc56e1795f117db8d045a67 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema/tests/__pycache__/test_validators.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema/tests/_suite.py b/lib/python3.10/site-packages/jsonschema/tests/_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..d61d38277d31d3eba23f6681d92066b8dba3ffc6 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/_suite.py @@ -0,0 +1,285 @@ +""" +Python representations of the JSON Schema Test Suite tests. +""" +from __future__ import annotations + +from contextlib import suppress +from functools import partial +from pathlib import Path +from typing import TYPE_CHECKING, Any +import json +import os +import re +import sys +import unittest + +from attrs import field, frozen +from referencing import Registry +import referencing.jsonschema + +if TYPE_CHECKING: + from collections.abc import Iterable, Mapping, Sequence + + from referencing.jsonschema import Schema + import pyperf + +from jsonschema.validators import _VALIDATORS +import jsonschema + +MAGIC_REMOTE_URL = "http://localhost:1234" + +_DELIMITERS = re.compile(r"[\W\- ]+") + + +def _find_suite(): + root = os.environ.get("JSON_SCHEMA_TEST_SUITE") + if root is not None: + return Path(root) + + root = Path(jsonschema.__file__).parent.parent / "json" + if not root.is_dir(): # pragma: no cover + raise ValueError( + ( + "Can't find the JSON-Schema-Test-Suite directory. " + "Set the 'JSON_SCHEMA_TEST_SUITE' environment " + "variable or run the tests from alongside a checkout " + "of the suite." + ), + ) + return root + + +@frozen +class Suite: + + _root: Path = field(factory=_find_suite) + + + def benchmark(self, runner: pyperf.Runner): # pragma: no cover + for name, Validator in _VALIDATORS.items(): + self.version(name=name).benchmark( + runner=runner, + Validator=Validator, + ) + + def version(self, name) -> Version: + Validator = _VALIDATORS[name] + uri: str = Validator.ID_OF(Validator.META_SCHEMA) # type: ignore[assignment] + specification = referencing.jsonschema.specification_with(uri) + + registry = Registry().with_contents( + remotes_in(root=self._root / "remotes", name=name, uri=uri), + default_specification=specification, + ) + return Version( + name=name, + path=self._root / "tests" / name, + remotes=registry, + ) + + +@frozen +class Version: + + _path: Path + _remotes: referencing.jsonschema.SchemaRegistry + + name: str + + def benchmark(self, **kwargs): # pragma: no cover + for case in self.cases(): + case.benchmark(**kwargs) + + def cases(self) -> Iterable[_Case]: + return self._cases_in(paths=self._path.glob("*.json")) + + def format_cases(self) -> Iterable[_Case]: + return self._cases_in(paths=self._path.glob("optional/format/*.json")) + + def optional_cases_of(self, name: str) -> Iterable[_Case]: + return self._cases_in(paths=[self._path / "optional" / f"{name}.json"]) + + def to_unittest_testcase(self, *groups, **kwargs): + name = kwargs.pop("name", "Test" + self.name.title().replace("-", "")) + methods = { + method.__name__: method + for method in ( + test.to_unittest_method(**kwargs) + for group in groups + for case in group + for test in case.tests + ) + } + cls = type(name, (unittest.TestCase,), methods) + + # We're doing crazy things, so if they go wrong, like a function + # behaving differently on some other interpreter, just make them + # not happen. + with suppress(Exception): + cls.__module__ = _someone_save_us_the_module_of_the_caller() + + return cls + + def _cases_in(self, paths: Iterable[Path]) -> Iterable[_Case]: + for path in paths: + for case in json.loads(path.read_text(encoding="utf-8")): + yield _Case.from_dict( + case, + version=self, + subject=path.stem, + remotes=self._remotes, + ) + + +@frozen +class _Case: + + version: Version + + subject: str + description: str + schema: Mapping[str, Any] | bool + tests: list[_Test] + comment: str | None = None + specification: Sequence[dict[str, str]] = () + + @classmethod + def from_dict(cls, data, remotes, **kwargs): + data.update(kwargs) + tests = [ + _Test( + version=data["version"], + subject=data["subject"], + case_description=data["description"], + schema=data["schema"], + remotes=remotes, + **test, + ) for test in data.pop("tests") + ] + return cls(tests=tests, **data) + + def benchmark(self, runner: pyperf.Runner, **kwargs): # pragma: no cover + for test in self.tests: + runner.bench_func( + test.fully_qualified_name, + partial(test.validate_ignoring_errors, **kwargs), + ) + + +def remotes_in( + root: Path, + name: str, + uri: str, +) -> Iterable[tuple[str, Schema]]: + # This messy logic is because the test suite is terrible at indicating + # what remotes are needed for what drafts, and mixes in schemas which + # have no $schema and which are invalid under earlier versions, in with + # other schemas which are needed for tests. + + for each in root.rglob("*.json"): + schema = json.loads(each.read_text()) + + relative = str(each.relative_to(root)).replace("\\", "/") + + if ( + ( # invalid boolean schema + name in {"draft3", "draft4"} + and each.stem == "tree" + ) or + ( # draft/*.json + "$schema" not in schema + and relative.startswith("draft") + and not relative.startswith(name) + ) + ): + continue + yield f"{MAGIC_REMOTE_URL}/{relative}", schema + + +@frozen(repr=False) +class _Test: + + version: Version + + subject: str + case_description: str + description: str + + data: Any + schema: Mapping[str, Any] | bool + + valid: bool + + _remotes: referencing.jsonschema.SchemaRegistry + + comment: str | None = None + + def __repr__(self): # pragma: no cover + return f"" + + @property + def fully_qualified_name(self): # pragma: no cover + return " > ".join( # noqa: FLY002 + [ + self.version.name, + self.subject, + self.case_description, + self.description, + ], + ) + + def to_unittest_method(self, skip=lambda test: None, **kwargs): + if self.valid: + def fn(this): + self.validate(**kwargs) + else: + def fn(this): + with this.assertRaises(jsonschema.ValidationError): + self.validate(**kwargs) + + fn.__name__ = "_".join( + [ + "test", + _DELIMITERS.sub("_", self.subject), + _DELIMITERS.sub("_", self.case_description), + _DELIMITERS.sub("_", self.description), + ], + ) + reason = skip(self) + if reason is None or os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0": + return fn + elif os.environ.get("JSON_SCHEMA_EXPECTED_FAILURES", "0") != "0": # pragma: no cover # noqa: E501 + return unittest.expectedFailure(fn) + else: + return unittest.skip(reason)(fn) + + def validate(self, Validator, **kwargs): + Validator.check_schema(self.schema) + validator = Validator( + schema=self.schema, + registry=self._remotes, + **kwargs, + ) + if os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0": # pragma: no cover + breakpoint() # noqa: T100 + validator.validate(instance=self.data) + + def validate_ignoring_errors(self, Validator): # pragma: no cover + with suppress(jsonschema.ValidationError): + self.validate(Validator=Validator) + + +def _someone_save_us_the_module_of_the_caller(): + """ + The FQON of the module 2nd stack frames up from here. + + This is intended to allow us to dynamically return test case classes that + are indistinguishable from being defined in the module that wants them. + + Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run + the class that really is running. + + Save us all, this is all so so so so so terrible. + """ + + return sys._getframe(2).f_globals["__name__"] diff --git a/lib/python3.10/site-packages/jsonschema/tests/fuzz_validate.py b/lib/python3.10/site-packages/jsonschema/tests/fuzz_validate.py new file mode 100644 index 0000000000000000000000000000000000000000..c12e88bcfe9bfdc0e0ffaab502789a6b585d4be2 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/fuzz_validate.py @@ -0,0 +1,50 @@ +""" +Fuzzing setup for OSS-Fuzz. + +See https://github.com/google/oss-fuzz/tree/master/projects/jsonschema for the +other half of the setup here. +""" +import sys + +from hypothesis import given, strategies + +import jsonschema + +PRIM = strategies.one_of( + strategies.booleans(), + strategies.integers(), + strategies.floats(allow_nan=False, allow_infinity=False), + strategies.text(), +) +DICT = strategies.recursive( + base=strategies.one_of( + strategies.booleans(), + strategies.dictionaries(strategies.text(), PRIM), + ), + extend=lambda inner: strategies.dictionaries(strategies.text(), inner), +) + + +@given(obj1=DICT, obj2=DICT) +def test_schemas(obj1, obj2): + try: + jsonschema.validate(instance=obj1, schema=obj2) + except jsonschema.exceptions.ValidationError: + pass + except jsonschema.exceptions.SchemaError: + pass + + +def main(): + atheris.instrument_all() + atheris.Setup( + sys.argv, + test_schemas.hypothesis.fuzz_one_input, + enable_python_coverage=True, + ) + atheris.Fuzz() + + +if __name__ == "__main__": + import atheris + main() diff --git a/lib/python3.10/site-packages/jsonschema/tests/test_cli.py b/lib/python3.10/site-packages/jsonschema/tests/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..bed9f3e4c4089a4cc653ffdb02d3648042df4088 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/test_cli.py @@ -0,0 +1,904 @@ +from contextlib import redirect_stderr, redirect_stdout +from importlib import metadata +from io import StringIO +from json import JSONDecodeError +from pathlib import Path +from textwrap import dedent +from unittest import TestCase +import json +import os +import subprocess +import sys +import tempfile +import warnings + +from jsonschema import Draft4Validator, Draft202012Validator +from jsonschema.exceptions import ( + SchemaError, + ValidationError, + _RefResolutionError, +) +from jsonschema.validators import _LATEST_VERSION, validate + +with warnings.catch_warnings(): + warnings.simplefilter("ignore") + from jsonschema import cli + + +def fake_validator(*errors): + errors = list(reversed(errors)) + + class FakeValidator: + def __init__(self, *args, **kwargs): + pass + + def iter_errors(self, instance): + if errors: + return errors.pop() + return [] # pragma: no cover + + @classmethod + def check_schema(self, schema): + pass + + return FakeValidator + + +def fake_open(all_contents): + def open(path): + contents = all_contents.get(path) + if contents is None: + raise FileNotFoundError(path) + return StringIO(contents) + return open + + +def _message_for(non_json): + try: + json.loads(non_json) + except JSONDecodeError as error: + return str(error) + else: # pragma: no cover + raise RuntimeError("Tried and failed to capture a JSON dump error.") + + +class TestCLI(TestCase): + def run_cli( + self, argv, files=None, stdin=StringIO(), exit_code=0, **override, + ): + arguments = cli.parse_args(argv) + arguments.update(override) + + self.assertFalse(hasattr(cli, "open")) + cli.open = fake_open(files or {}) + try: + stdout, stderr = StringIO(), StringIO() + actual_exit_code = cli.run( + arguments, + stdin=stdin, + stdout=stdout, + stderr=stderr, + ) + finally: + del cli.open + + self.assertEqual( + actual_exit_code, exit_code, msg=dedent( + f""" + Expected an exit code of {exit_code} != {actual_exit_code}. + + stdout: {stdout.getvalue()} + + stderr: {stderr.getvalue()} + """, + ), + ) + return stdout.getvalue(), stderr.getvalue() + + def assertOutputs(self, stdout="", stderr="", **kwargs): + self.assertEqual( + self.run_cli(**kwargs), + (dedent(stdout), dedent(stderr)), + ) + + def test_invalid_instance(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="12: I am an error!\n", + ) + + def test_invalid_instance_pretty_output(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["-i", "some_instance", "--output", "pretty", "some_schema"], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_instance)=== + + I am an error! + ----------------------------- + """, + ) + + def test_invalid_instance_explicit_plain_output(self): + error = ValidationError("I am an error!", instance=12) + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(error.instance), + ), + validator=fake_validator([error]), + + argv=["--output", "plain", "-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="12: I am an error!\n", + ) + + def test_invalid_instance_multiple_errors(self): + instance = 12 + first = ValidationError("First error", instance=instance) + second = ValidationError("Second error", instance=instance) + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(instance), + ), + validator=fake_validator([first, second]), + + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 12: First error + 12: Second error + """, + ) + + def test_invalid_instance_multiple_errors_pretty_output(self): + instance = 12 + first = ValidationError("First error", instance=instance) + second = ValidationError("Second error", instance=instance) + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_instance=json.dumps(instance), + ), + validator=fake_validator([first, second]), + + argv=["-i", "some_instance", "--output", "pretty", "some_schema"], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_instance)=== + + First error + ----------------------------- + ===[ValidationError]===(some_instance)=== + + Second error + ----------------------------- + """, + ) + + def test_multiple_invalid_instances(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + 12: An error + 12: Another error + foo: BOOM + """, + ) + + def test_multiple_invalid_instances_pretty_output(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "--output", "pretty", + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + ===[ValidationError]===(some_first_instance)=== + + An error + ----------------------------- + ===[ValidationError]===(some_first_instance)=== + + Another error + ----------------------------- + ===[ValidationError]===(some_second_instance)=== + + BOOM + ----------------------------- + """, + ) + + def test_custom_error_format(self): + first_instance = 12 + first_errors = [ + ValidationError("An error", instance=first_instance), + ValidationError("Another error", instance=first_instance), + ] + second_instance = "foo" + second_errors = [ValidationError("BOOM", instance=second_instance)] + + self.assertOutputs( + files=dict( + some_schema='{"does not": "matter since it is stubbed"}', + some_first_instance=json.dumps(first_instance), + some_second_instance=json.dumps(second_instance), + ), + validator=fake_validator(first_errors, second_errors), + + argv=[ + "--error-format", ":{error.message}._-_.{error.instance}:", + "-i", "some_first_instance", + "-i", "some_second_instance", + "some_schema", + ], + + exit_code=1, + stderr=":An error._-_.12::Another error._-_.12::BOOM._-_.foo:", + ) + + def test_invalid_schema(self): + self.assertOutputs( + files=dict(some_schema='{"type": 12}'), + argv=["some_schema"], + + exit_code=1, + stderr="""\ + 12: 12 is not valid under any of the given schemas + """, + ) + + def test_invalid_schema_pretty_output(self): + schema = {"type": 12} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance="") + error = str(e.exception) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_schema_multiple_errors(self): + self.assertOutputs( + files=dict(some_schema='{"type": 12, "items": 57}'), + argv=["some_schema"], + + exit_code=1, + stderr="""\ + 57: 57 is not of type 'object', 'boolean' + """, + ) + + def test_invalid_schema_multiple_errors_pretty_output(self): + schema = {"type": 12, "items": 57} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance="") + error = str(e.exception) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_schema_with_invalid_instance(self): + """ + "Validating" an instance that's invalid under an invalid schema + just shows the schema error. + """ + self.assertOutputs( + files=dict( + some_schema='{"type": 12, "minimum": 30}', + some_instance="13", + ), + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 12: 12 is not valid under any of the given schemas + """, + ) + + def test_invalid_schema_with_invalid_instance_pretty_output(self): + instance, schema = 13, {"type": 12, "minimum": 30} + + with self.assertRaises(SchemaError) as e: + validate(schema=schema, instance=instance) + error = str(e.exception) + + self.assertOutputs( + files=dict( + some_schema=json.dumps(schema), + some_instance=json.dumps(instance), + ), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + stderr=( + "===[SchemaError]===(some_schema)===\n\n" + + str(error) + + "\n-----------------------------\n" + ), + ) + + def test_invalid_instance_continues_with_the_rest(self): + self.assertOutputs( + files=dict( + some_schema='{"minimum": 30}', + first_instance="not valid JSON!", + second_instance="12", + ), + argv=[ + "-i", "first_instance", + "-i", "second_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + Failed to parse 'first_instance': {} + 12: 12 is less than the minimum of 30 + """.format(_message_for("not valid JSON!")), + ) + + def test_custom_error_format_applies_to_schema_errors(self): + instance, schema = 13, {"type": 12, "minimum": 30} + + with self.assertRaises(SchemaError): + validate(schema=schema, instance=instance) + + self.assertOutputs( + files=dict(some_schema=json.dumps(schema)), + + argv=[ + "--error-format", ":{error.message}._-_.{error.instance}:", + "some_schema", + ], + + exit_code=1, + stderr=":12 is not valid under any of the given schemas._-_.12:", + ) + + def test_instance_is_invalid_JSON(self): + instance = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema="{}", some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + + exit_code=1, + stderr=f"""\ + Failed to parse 'some_instance': {_message_for(instance)} + """, + ) + + def test_instance_is_invalid_JSON_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict( + some_schema="{}", + some_instance="not valid JSON!", + ), + + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_instance)===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_schema", stderr) + + def test_instance_is_invalid_JSON_on_stdin(self): + instance = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO(instance), + + argv=["some_schema"], + + exit_code=1, + stderr=f"""\ + Failed to parse : {_message_for(instance)} + """, + ) + + def test_instance_is_invalid_JSON_on_stdin_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict(some_schema="{}"), + stdin=StringIO("not valid JSON!"), + + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "()===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_schema", stderr) + + def test_schema_is_invalid_JSON(self): + schema = "not valid JSON!" + + self.assertOutputs( + files=dict(some_schema=schema), + + argv=["some_schema"], + + exit_code=1, + stderr=f"""\ + Failed to parse 'some_schema': {_message_for(schema)} + """, + ) + + def test_schema_is_invalid_JSON_pretty_output(self): + stdout, stderr = self.run_cli( + files=dict(some_schema="not valid JSON!"), + + argv=["--output", "pretty", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_schema)===\n\nTraceback (most recent call last):\n", + stderr, + ) + + def test_schema_and_instance_are_both_invalid_JSON(self): + """ + Only the schema error is reported, as we abort immediately. + """ + schema, instance = "not valid JSON!", "also not valid JSON!" + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + + argv=["some_schema"], + + exit_code=1, + stderr=f"""\ + Failed to parse 'some_schema': {_message_for(schema)} + """, + ) + + def test_schema_and_instance_are_both_invalid_JSON_pretty_output(self): + """ + Only the schema error is reported, as we abort immediately. + """ + stdout, stderr = self.run_cli( + files=dict( + some_schema="not valid JSON!", + some_instance="also not valid JSON!", + ), + + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + + exit_code=1, + ) + self.assertFalse(stdout) + self.assertIn( + "(some_schema)===\n\nTraceback (most recent call last):\n", + stderr, + ) + self.assertNotIn("some_instance", stderr) + + def test_instance_does_not_exist(self): + self.assertOutputs( + files=dict(some_schema="{}"), + argv=["-i", "nonexisting_instance", "some_schema"], + + exit_code=1, + stderr="""\ + 'nonexisting_instance' does not exist. + """, + ) + + def test_instance_does_not_exist_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}"), + argv=[ + "--output", "pretty", + "-i", "nonexisting_instance", + "some_schema", + ], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_instance)=== + + 'nonexisting_instance' does not exist. + ----------------------------- + """, + ) + + def test_schema_does_not_exist(self): + self.assertOutputs( + argv=["nonexisting_schema"], + + exit_code=1, + stderr="'nonexisting_schema' does not exist.\n", + ) + + def test_schema_does_not_exist_pretty_output(self): + self.assertOutputs( + argv=["--output", "pretty", "nonexisting_schema"], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_schema)=== + + 'nonexisting_schema' does not exist. + ----------------------------- + """, + ) + + def test_neither_instance_nor_schema_exist(self): + self.assertOutputs( + argv=["-i", "nonexisting_instance", "nonexisting_schema"], + + exit_code=1, + stderr="'nonexisting_schema' does not exist.\n", + ) + + def test_neither_instance_nor_schema_exist_pretty_output(self): + self.assertOutputs( + argv=[ + "--output", "pretty", + "-i", "nonexisting_instance", + "nonexisting_schema", + ], + + exit_code=1, + stderr="""\ + ===[FileNotFoundError]===(nonexisting_schema)=== + + 'nonexisting_schema' does not exist. + ----------------------------- + """, + ) + + def test_successful_validation(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + stdout="===[SUCCESS]===(some_instance)===\n", + stderr="", + ) + + def test_successful_validation_of_stdin(self): + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO("{}"), + argv=["some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_of_stdin_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}"), + stdin=StringIO("{}"), + argv=["--output", "pretty", "some_schema"], + stdout="===[SUCCESS]===()===\n", + stderr="", + ) + + def test_successful_validation_of_just_the_schema(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + def test_successful_validation_of_just_the_schema_pretty_output(self): + self.assertOutputs( + files=dict(some_schema="{}", some_instance="{}"), + argv=["--output", "pretty", "-i", "some_instance", "some_schema"], + stdout="===[SUCCESS]===(some_instance)===\n", + stderr="", + ) + + def test_successful_validation_via_explicit_base_uri(self): + ref_schema_file = tempfile.NamedTemporaryFile(delete=False) # noqa: SIM115 + ref_schema_file.close() + self.addCleanup(os.remove, ref_schema_file.name) + + ref_path = Path(ref_schema_file.name) + ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}') + + schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}' + + self.assertOutputs( + files=dict(some_schema=schema, some_instance="1"), + argv=[ + "-i", "some_instance", + "--base-uri", ref_path.parent.as_uri() + "/", + "some_schema", + ], + stdout="", + stderr="", + ) + + def test_unsuccessful_validation_via_explicit_base_uri(self): + ref_schema_file = tempfile.NamedTemporaryFile(delete=False) # noqa: SIM115 + ref_schema_file.close() + self.addCleanup(os.remove, ref_schema_file.name) + + ref_path = Path(ref_schema_file.name) + ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}') + + schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}' + + self.assertOutputs( + files=dict(some_schema=schema, some_instance='"1"'), + argv=[ + "-i", "some_instance", + "--base-uri", ref_path.parent.as_uri() + "/", + "some_schema", + ], + exit_code=1, + stdout="", + stderr="1: '1' is not of type 'integer'\n", + ) + + def test_nonexistent_file_with_explicit_base_uri(self): + schema = '{"$ref": "someNonexistentFile.json#definitions/num"}' + instance = "1" + + with self.assertRaises(_RefResolutionError) as e: + self.assertOutputs( + files=dict( + some_schema=schema, + some_instance=instance, + ), + argv=[ + "-i", "some_instance", + "--base-uri", Path.cwd().as_uri(), + "some_schema", + ], + ) + error = str(e.exception) + self.assertIn(f"{os.sep}someNonexistentFile.json'", error) + + def test_invalid_explicit_base_uri(self): + schema = '{"$ref": "foo.json#definitions/num"}' + instance = "1" + + with self.assertRaises(_RefResolutionError) as e: + self.assertOutputs( + files=dict( + some_schema=schema, + some_instance=instance, + ), + argv=[ + "-i", "some_instance", + "--base-uri", "not@UR1", + "some_schema", + ], + ) + error = str(e.exception) + self.assertEqual( + error, "unknown url type: 'foo.json'", + ) + + def test_it_validates_using_the_latest_validator_when_unspecified(self): + # There isn't a better way now I can think of to ensure that the + # latest version was used, given that the call to validator_for + # is hidden inside the CLI, so guard that that's the case, and + # this test will have to be updated when versions change until + # we can think of a better way to ensure this behavior. + self.assertIs(Draft202012Validator, _LATEST_VERSION) + + self.assertOutputs( + files=dict(some_schema='{"const": "check"}', some_instance='"a"'), + argv=["-i", "some_instance", "some_schema"], + exit_code=1, + stdout="", + stderr="a: 'check' was expected\n", + ) + + def test_it_validates_using_draft7_when_specified(self): + """ + Specifically, `const` validation applies for Draft 7. + """ + schema = """ + { + "$schema": "http://json-schema.org/draft-07/schema#", + "const": "check" + } + """ + instance = '"foo"' + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + exit_code=1, + stdout="", + stderr="foo: 'check' was expected\n", + ) + + def test_it_validates_using_draft4_when_specified(self): + """ + Specifically, `const` validation *does not* apply for Draft 4. + """ + schema = """ + { + "$schema": "http://json-schema.org/draft-04/schema#", + "const": "check" + } + """ + instance = '"foo"' + self.assertOutputs( + files=dict(some_schema=schema, some_instance=instance), + argv=["-i", "some_instance", "some_schema"], + stdout="", + stderr="", + ) + + +class TestParser(TestCase): + + FakeValidator = fake_validator() + + def test_find_validator_by_fully_qualified_object_name(self): + arguments = cli.parse_args( + [ + "--validator", + "jsonschema.tests.test_cli.TestParser.FakeValidator", + "--instance", "mem://some/instance", + "mem://some/schema", + ], + ) + self.assertIs(arguments["validator"], self.FakeValidator) + + def test_find_validator_in_jsonschema(self): + arguments = cli.parse_args( + [ + "--validator", "Draft4Validator", + "--instance", "mem://some/instance", + "mem://some/schema", + ], + ) + self.assertIs(arguments["validator"], Draft4Validator) + + def cli_output_for(self, *argv): + stdout, stderr = StringIO(), StringIO() + with redirect_stdout(stdout), redirect_stderr(stderr): # noqa: SIM117 + with self.assertRaises(SystemExit): + cli.parse_args(argv) + return stdout.getvalue(), stderr.getvalue() + + def test_unknown_output(self): + stdout, stderr = self.cli_output_for( + "--output", "foo", + "mem://some/schema", + ) + self.assertIn("invalid choice: 'foo'", stderr) + self.assertFalse(stdout) + + def test_useless_error_format(self): + stdout, stderr = self.cli_output_for( + "--output", "pretty", + "--error-format", "foo", + "mem://some/schema", + ) + self.assertIn( + "--error-format can only be used with --output plain", + stderr, + ) + self.assertFalse(stdout) + + +class TestCLIIntegration(TestCase): + def test_license(self): + our_metadata = metadata.metadata("jsonschema") + self.assertEqual(our_metadata.get("License-Expression"), "MIT") + + def test_version(self): + version = subprocess.check_output( + [sys.executable, "-W", "ignore", "-m", "jsonschema", "--version"], + stderr=subprocess.STDOUT, + ) + version = version.decode("utf-8").strip() + self.assertEqual(version, metadata.version("jsonschema")) + + def test_no_arguments_shows_usage_notes(self): + output = subprocess.check_output( + [sys.executable, "-m", "jsonschema"], + stderr=subprocess.STDOUT, + ) + output_for_help = subprocess.check_output( + [sys.executable, "-m", "jsonschema", "--help"], + stderr=subprocess.STDOUT, + ) + self.assertEqual(output, output_for_help) diff --git a/lib/python3.10/site-packages/jsonschema/tests/test_deprecations.py b/lib/python3.10/site-packages/jsonschema/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..a54b02f380ae098ba7b3f2c518a44935859f1ce6 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/test_deprecations.py @@ -0,0 +1,432 @@ +from contextlib import contextmanager +from io import BytesIO +from unittest import TestCase, mock +import importlib.metadata +import json +import subprocess +import sys +import urllib.request + +import referencing.exceptions + +from jsonschema import FormatChecker, exceptions, protocols, validators + + +class TestDeprecations(TestCase): + def test_version(self): + """ + As of v4.0.0, __version__ is deprecated in favor of importlib.metadata. + """ + + message = "Accessing jsonschema.__version__ is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import __version__ + + self.assertEqual(__version__, importlib.metadata.version("jsonschema")) + self.assertEqual(w.filename, __file__) + + def test_validators_ErrorTree(self): + """ + As of v4.0.0, importing ErrorTree from jsonschema.validators is + deprecated in favor of doing so from jsonschema.exceptions. + """ + + message = "Importing ErrorTree from jsonschema.validators is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema.validators import ErrorTree + + self.assertEqual(ErrorTree, exceptions.ErrorTree) + self.assertEqual(w.filename, __file__) + + def test_import_ErrorTree(self): + """ + As of v4.18.0, importing ErrorTree from the package root is + deprecated in favor of doing so from jsonschema.exceptions. + """ + + message = "Importing ErrorTree directly from the jsonschema package " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import ErrorTree + + self.assertEqual(ErrorTree, exceptions.ErrorTree) + self.assertEqual(w.filename, __file__) + + def test_ErrorTree_setitem(self): + """ + As of v4.20.0, setting items on an ErrorTree is deprecated. + """ + + e = exceptions.ValidationError("some error", path=["foo"]) + tree = exceptions.ErrorTree() + subtree = exceptions.ErrorTree(errors=[e]) + + message = "ErrorTree.__setitem__ is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + tree["foo"] = subtree + + self.assertEqual(tree["foo"], subtree) + self.assertEqual(w.filename, __file__) + + def test_import_FormatError(self): + """ + As of v4.18.0, importing FormatError from the package root is + deprecated in favor of doing so from jsonschema.exceptions. + """ + + message = "Importing FormatError directly from the jsonschema package " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import FormatError + + self.assertEqual(FormatError, exceptions.FormatError) + self.assertEqual(w.filename, __file__) + + def test_import_Validator(self): + """ + As of v4.19.0, importing Validator from the package root is + deprecated in favor of doing so from jsonschema.protocols. + """ + + message = "Importing Validator directly from the jsonschema package " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import Validator + + self.assertEqual(Validator, protocols.Validator) + self.assertEqual(w.filename, __file__) + + def test_validators_validators(self): + """ + As of v4.0.0, accessing jsonschema.validators.validators is + deprecated. + """ + + message = "Accessing jsonschema.validators.validators is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + value = validators.validators + + self.assertEqual(value, validators._VALIDATORS) + self.assertEqual(w.filename, __file__) + + def test_validators_meta_schemas(self): + """ + As of v4.0.0, accessing jsonschema.validators.meta_schemas is + deprecated. + """ + + message = "Accessing jsonschema.validators.meta_schemas is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + value = validators.meta_schemas + + self.assertEqual(value, validators._META_SCHEMAS) + self.assertEqual(w.filename, __file__) + + def test_RefResolver_in_scope(self): + """ + As of v4.0.0, RefResolver.in_scope is deprecated. + """ + + resolver = validators._RefResolver.from_schema({}) + message = "jsonschema.RefResolver.in_scope is deprecated " + with self.assertWarnsRegex(DeprecationWarning, message) as w: # noqa: SIM117 + with resolver.in_scope("foo"): + pass + + self.assertEqual(w.filename, __file__) + + def test_Validator_is_valid_two_arguments(self): + """ + As of v4.0.0, calling is_valid with two arguments (to provide a + different schema) is deprecated. + """ + + validator = validators.Draft7Validator({}) + message = "Passing a schema to Validator.is_valid is deprecated " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + result = validator.is_valid("foo", {"type": "number"}) + + self.assertFalse(result) + self.assertEqual(w.filename, __file__) + + def test_Validator_iter_errors_two_arguments(self): + """ + As of v4.0.0, calling iter_errors with two arguments (to provide a + different schema) is deprecated. + """ + + validator = validators.Draft7Validator({}) + message = "Passing a schema to Validator.iter_errors is deprecated " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + error, = validator.iter_errors("foo", {"type": "number"}) + + self.assertEqual(error.validator, "type") + self.assertEqual(w.filename, __file__) + + def test_Validator_resolver(self): + """ + As of v4.18.0, accessing Validator.resolver is deprecated. + """ + + validator = validators.Draft7Validator({}) + message = "Accessing Draft7Validator.resolver is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + self.assertIsInstance(validator.resolver, validators._RefResolver) + + self.assertEqual(w.filename, __file__) + + def test_RefResolver(self): + """ + As of v4.18.0, RefResolver is fully deprecated. + """ + + message = "jsonschema.RefResolver is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import RefResolver + self.assertEqual(w.filename, __file__) + + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema.validators import RefResolver # noqa: F401 + self.assertEqual(w.filename, __file__) + + def test_RefResolutionError(self): + """ + As of v4.18.0, RefResolutionError is deprecated in favor of directly + catching errors from the referencing library. + """ + + message = "jsonschema.exceptions.RefResolutionError is deprecated" + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import RefResolutionError + + self.assertEqual(RefResolutionError, exceptions._RefResolutionError) + self.assertEqual(w.filename, __file__) + + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema.exceptions import RefResolutionError + + self.assertEqual(RefResolutionError, exceptions._RefResolutionError) + self.assertEqual(w.filename, __file__) + + def test_catching_Unresolvable_directly(self): + """ + This behavior is the intended behavior (i.e. it's not deprecated), but + given we do "tricksy" things in the iterim to wrap exceptions in a + multiple inheritance subclass, we need to be extra sure it works and + stays working. + """ + validator = validators.Draft202012Validator({"$ref": "urn:nothing"}) + + with self.assertRaises(referencing.exceptions.Unresolvable) as e: + validator.validate(12) + + expected = referencing.exceptions.Unresolvable(ref="urn:nothing") + self.assertEqual( + (e.exception, str(e.exception)), + (expected, "Unresolvable: urn:nothing"), + ) + + def test_catching_Unresolvable_via_RefResolutionError(self): + """ + Until RefResolutionError is removed, it is still possible to catch + exceptions from reference resolution using it, even though they may + have been raised by referencing. + """ + with self.assertWarns(DeprecationWarning): + from jsonschema import RefResolutionError + + validator = validators.Draft202012Validator({"$ref": "urn:nothing"}) + + with self.assertRaises(referencing.exceptions.Unresolvable) as u: + validator.validate(12) + + with self.assertRaises(RefResolutionError) as e: + validator.validate(12) + + self.assertEqual( + (e.exception, str(e.exception)), + (u.exception, "Unresolvable: urn:nothing"), + ) + + def test_WrappedReferencingError_hashability(self): + """ + Ensure the wrapped referencing errors are hashable when possible. + """ + with self.assertWarns(DeprecationWarning): + from jsonschema import RefResolutionError + + validator = validators.Draft202012Validator({"$ref": "urn:nothing"}) + + with self.assertRaises(referencing.exceptions.Unresolvable) as u: + validator.validate(12) + + with self.assertRaises(RefResolutionError) as e: + validator.validate(12) + + self.assertIn(e.exception, {u.exception}) + self.assertIn(u.exception, {e.exception}) + + def test_Validator_subclassing(self): + """ + As of v4.12.0, subclassing a validator class produces an explicit + deprecation warning. + + This was never intended to be public API (and some comments over the + years in issues said so, but obviously that's not a great way to make + sure it's followed). + + A future version will explicitly raise an error. + """ + + message = "Subclassing validator classes is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + class Subclass(validators.Draft202012Validator): + pass + + self.assertEqual(w.filename, __file__) + + with self.assertWarnsRegex(DeprecationWarning, message) as w: + class AnotherSubclass(validators.create(meta_schema={})): + pass + + def test_FormatChecker_cls_checks(self): + """ + As of v4.14.0, FormatChecker.cls_checks is deprecated without + replacement. + """ + + self.addCleanup(FormatChecker.checkers.pop, "boom", None) + + message = "FormatChecker.cls_checks " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + FormatChecker.cls_checks("boom") + + self.assertEqual(w.filename, __file__) + + def test_draftN_format_checker(self): + """ + As of v4.16.0, accessing jsonschema.draftn_format_checker is deprecated + in favor of Validator.FORMAT_CHECKER. + """ + + message = "Accessing jsonschema.draft202012_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft202012_format_checker + + self.assertIs( + draft202012_format_checker, + validators.Draft202012Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft201909_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft201909_format_checker + + self.assertIs( + draft201909_format_checker, + validators.Draft201909Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft7_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft7_format_checker + + self.assertIs( + draft7_format_checker, + validators.Draft7Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft6_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft6_format_checker + + self.assertIs( + draft6_format_checker, + validators.Draft6Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft4_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft4_format_checker + + self.assertIs( + draft4_format_checker, + validators.Draft4Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + message = "Accessing jsonschema.draft3_format_checker is " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + from jsonschema import draft3_format_checker + + self.assertIs( + draft3_format_checker, + validators.Draft3Validator.FORMAT_CHECKER, + ) + self.assertEqual(w.filename, __file__) + + with self.assertRaises(ImportError): + from jsonschema import draft1234_format_checker # noqa: F401 + + def test_import_cli(self): + """ + As of v4.17.0, importing jsonschema.cli is deprecated. + """ + + message = "The jsonschema CLI is deprecated and will be removed " + with self.assertWarnsRegex(DeprecationWarning, message) as w: + import jsonschema.cli + importlib.reload(jsonschema.cli) + + self.assertEqual(w.filename, importlib.__file__) + + def test_cli(self): + """ + As of v4.17.0, the jsonschema CLI is deprecated. + """ + + process = subprocess.run( + [sys.executable, "-m", "jsonschema"], + capture_output=True, + check=True, + ) + self.assertIn(b"The jsonschema CLI is deprecated ", process.stderr) + + def test_automatic_remote_retrieval(self): + """ + Automatic retrieval of remote references is deprecated as of v4.18.0. + """ + ref = "http://bar#/$defs/baz" + schema = {"$defs": {"baz": {"type": "integer"}}} + + if "requests" in sys.modules: # pragma: no cover + self.addCleanup( + sys.modules.__setitem__, "requests", sys.modules["requests"], + ) + sys.modules["requests"] = None + + @contextmanager + def fake_urlopen(request): + self.assertIsInstance(request, urllib.request.Request) + self.assertEqual(request.full_url, "http://bar") + + # Ha ha urllib.request.Request "normalizes" header names and + # Request.get_header does not also normalize them... + (header, value), = request.header_items() + self.assertEqual(header.lower(), "user-agent") + self.assertEqual( + value, "python-jsonschema (deprecated $ref resolution)", + ) + yield BytesIO(json.dumps(schema).encode("utf8")) + + validator = validators.Draft202012Validator({"$ref": ref}) + + message = "Automatically retrieving remote references " + patch = mock.patch.object(urllib.request, "urlopen", new=fake_urlopen) + + with patch, self.assertWarnsRegex(DeprecationWarning, message): + self.assertEqual( + (validator.is_valid({}), validator.is_valid(37)), + (False, True), + ) diff --git a/lib/python3.10/site-packages/jsonschema/tests/test_exceptions.py b/lib/python3.10/site-packages/jsonschema/tests/test_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..8d515a998506e779b9e2f2340204bca33f73ae04 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/test_exceptions.py @@ -0,0 +1,759 @@ +from unittest import TestCase +import textwrap + +import jsonpath_ng + +from jsonschema import exceptions +from jsonschema.validators import _LATEST_VERSION + + +class TestBestMatch(TestCase): + def best_match_of(self, instance, schema): + errors = list(_LATEST_VERSION(schema).iter_errors(instance)) + msg = f"No errors found for {instance} under {schema!r}!" + self.assertTrue(errors, msg=msg) + + best = exceptions.best_match(iter(errors)) + reversed_best = exceptions.best_match(reversed(errors)) + + self.assertEqual( + best._contents(), + reversed_best._contents(), + f"No consistent best match!\nGot: {best}\n\nThen: {reversed_best}", + ) + return best + + def test_shallower_errors_are_better_matches(self): + schema = { + "properties": { + "foo": { + "minProperties": 2, + "properties": {"bar": {"type": "object"}}, + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": []}}, schema=schema) + self.assertEqual(best.validator, "minProperties") + + def test_oneOf_and_anyOf_are_weak_matches(self): + """ + A property you *must* match is probably better than one you have to + match a part of. + """ + + schema = { + "minProperties": 2, + "anyOf": [{"type": "string"}, {"type": "number"}], + "oneOf": [{"type": "string"}, {"type": "number"}], + } + best = self.best_match_of(instance={}, schema=schema) + self.assertEqual(best.validator, "minProperties") + + def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self): + """ + If the most relevant error is an anyOf, then we traverse its context + and select the otherwise *least* relevant error, since in this case + that means the most specific, deep, error inside the instance. + + I.e. since only one of the schemas must match, we look for the most + relevant one. + """ + + schema = { + "properties": { + "foo": { + "anyOf": [ + {"type": "string"}, + {"properties": {"bar": {"type": "array"}}}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema) + self.assertEqual(best.validator_value, "array") + + def test_no_anyOf_traversal_for_equally_relevant_errors(self): + """ + We don't traverse into an anyOf (as above) if all of its context errors + seem to be equally "wrong" against the instance. + """ + + schema = { + "anyOf": [ + {"type": "string"}, + {"type": "integer"}, + {"type": "object"}, + ], + } + best = self.best_match_of(instance=[], schema=schema) + self.assertEqual(best.validator, "anyOf") + + def test_anyOf_traversal_for_single_equally_relevant_error(self): + """ + We *do* traverse anyOf with a single nested error, even though it is + vacuously equally relevant to itself. + """ + + schema = { + "anyOf": [ + {"type": "string"}, + ], + } + best = self.best_match_of(instance=[], schema=schema) + self.assertEqual(best.validator, "type") + + def test_anyOf_traversal_for_single_sibling_errors(self): + """ + We *do* traverse anyOf with a single subschema that fails multiple + times (e.g. on multiple items). + """ + + schema = { + "anyOf": [ + {"items": {"const": 37}}, + ], + } + best = self.best_match_of(instance=[12, 12], schema=schema) + self.assertEqual(best.validator, "const") + + def test_anyOf_traversal_for_non_type_matching_sibling_errors(self): + """ + We *do* traverse anyOf with multiple subschemas when one does not type + match. + """ + + schema = { + "anyOf": [ + {"type": "object"}, + {"items": {"const": 37}}, + ], + } + best = self.best_match_of(instance=[12, 12], schema=schema) + self.assertEqual(best.validator, "const") + + def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self): + """ + If the most relevant error is an oneOf, then we traverse its context + and select the otherwise *least* relevant error, since in this case + that means the most specific, deep, error inside the instance. + + I.e. since only one of the schemas must match, we look for the most + relevant one. + """ + + schema = { + "properties": { + "foo": { + "oneOf": [ + {"type": "string"}, + {"properties": {"bar": {"type": "array"}}}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema) + self.assertEqual(best.validator_value, "array") + + def test_no_oneOf_traversal_for_equally_relevant_errors(self): + """ + We don't traverse into an oneOf (as above) if all of its context errors + seem to be equally "wrong" against the instance. + """ + + schema = { + "oneOf": [ + {"type": "string"}, + {"type": "integer"}, + {"type": "object"}, + ], + } + best = self.best_match_of(instance=[], schema=schema) + self.assertEqual(best.validator, "oneOf") + + def test_oneOf_traversal_for_single_equally_relevant_error(self): + """ + We *do* traverse oneOf with a single nested error, even though it is + vacuously equally relevant to itself. + """ + + schema = { + "oneOf": [ + {"type": "string"}, + ], + } + best = self.best_match_of(instance=[], schema=schema) + self.assertEqual(best.validator, "type") + + def test_oneOf_traversal_for_single_sibling_errors(self): + """ + We *do* traverse oneOf with a single subschema that fails multiple + times (e.g. on multiple items). + """ + + schema = { + "oneOf": [ + {"items": {"const": 37}}, + ], + } + best = self.best_match_of(instance=[12, 12], schema=schema) + self.assertEqual(best.validator, "const") + + def test_oneOf_traversal_for_non_type_matching_sibling_errors(self): + """ + We *do* traverse oneOf with multiple subschemas when one does not type + match. + """ + + schema = { + "oneOf": [ + {"type": "object"}, + {"items": {"const": 37}}, + ], + } + best = self.best_match_of(instance=[12, 12], schema=schema) + self.assertEqual(best.validator, "const") + + def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self): + """ + Now, if the error is allOf, we traverse but select the *most* relevant + error from the context, because all schemas here must match anyways. + """ + + schema = { + "properties": { + "foo": { + "allOf": [ + {"type": "string"}, + {"properties": {"bar": {"type": "array"}}}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema) + self.assertEqual(best.validator_value, "string") + + def test_nested_context_for_oneOf(self): + """ + We traverse into nested contexts (a oneOf containing an error in a + nested oneOf here). + """ + + schema = { + "properties": { + "foo": { + "oneOf": [ + {"type": "string"}, + { + "oneOf": [ + {"type": "string"}, + { + "properties": { + "bar": {"type": "array"}, + }, + }, + ], + }, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema) + self.assertEqual(best.validator_value, "array") + + def test_it_prioritizes_matching_types(self): + schema = { + "properties": { + "foo": { + "anyOf": [ + {"type": "array", "minItems": 2}, + {"type": "string", "minLength": 10}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": "bar"}, schema=schema) + self.assertEqual(best.validator, "minLength") + + reordered = { + "properties": { + "foo": { + "anyOf": [ + {"type": "string", "minLength": 10}, + {"type": "array", "minItems": 2}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": "bar"}, schema=reordered) + self.assertEqual(best.validator, "minLength") + + def test_it_prioritizes_matching_union_types(self): + schema = { + "properties": { + "foo": { + "anyOf": [ + {"type": ["array", "object"], "minItems": 2}, + {"type": ["integer", "string"], "minLength": 10}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": "bar"}, schema=schema) + self.assertEqual(best.validator, "minLength") + + reordered = { + "properties": { + "foo": { + "anyOf": [ + {"type": "string", "minLength": 10}, + {"type": "array", "minItems": 2}, + ], + }, + }, + } + best = self.best_match_of(instance={"foo": "bar"}, schema=reordered) + self.assertEqual(best.validator, "minLength") + + def test_boolean_schemas(self): + schema = {"properties": {"foo": False}} + best = self.best_match_of(instance={"foo": "bar"}, schema=schema) + self.assertIsNone(best.validator) + + def test_one_error(self): + validator = _LATEST_VERSION({"minProperties": 2}) + error, = validator.iter_errors({}) + self.assertEqual( + exceptions.best_match(validator.iter_errors({})).validator, + "minProperties", + ) + + def test_no_errors(self): + validator = _LATEST_VERSION({}) + self.assertIsNone(exceptions.best_match(validator.iter_errors({}))) + + +class TestByRelevance(TestCase): + def test_short_paths_are_better_matches(self): + shallow = exceptions.ValidationError("Oh no!", path=["baz"]) + deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"]) + match = max([shallow, deep], key=exceptions.relevance) + self.assertIs(match, shallow) + + match = max([deep, shallow], key=exceptions.relevance) + self.assertIs(match, shallow) + + def test_global_errors_are_even_better_matches(self): + shallow = exceptions.ValidationError("Oh no!", path=[]) + deep = exceptions.ValidationError("Oh yes!", path=["foo"]) + + errors = sorted([shallow, deep], key=exceptions.relevance) + self.assertEqual( + [list(error.path) for error in errors], + [["foo"], []], + ) + + errors = sorted([deep, shallow], key=exceptions.relevance) + self.assertEqual( + [list(error.path) for error in errors], + [["foo"], []], + ) + + def test_weak_keywords_are_lower_priority(self): + weak = exceptions.ValidationError("Oh no!", path=[], validator="a") + normal = exceptions.ValidationError("Oh yes!", path=[], validator="b") + + best_match = exceptions.by_relevance(weak="a") + + match = max([weak, normal], key=best_match) + self.assertIs(match, normal) + + match = max([normal, weak], key=best_match) + self.assertIs(match, normal) + + def test_strong_keywords_are_higher_priority(self): + weak = exceptions.ValidationError("Oh no!", path=[], validator="a") + normal = exceptions.ValidationError("Oh yes!", path=[], validator="b") + strong = exceptions.ValidationError("Oh fine!", path=[], validator="c") + + best_match = exceptions.by_relevance(weak="a", strong="c") + + match = max([weak, normal, strong], key=best_match) + self.assertIs(match, strong) + + match = max([strong, normal, weak], key=best_match) + self.assertIs(match, strong) + + +class TestErrorTree(TestCase): + def test_it_knows_how_many_total_errors_it_contains(self): + # FIXME: #442 + errors = [ + exceptions.ValidationError("Something", validator=i) + for i in range(8) + ] + tree = exceptions.ErrorTree(errors) + self.assertEqual(tree.total_errors, 8) + + def test_it_contains_an_item_if_the_item_had_an_error(self): + errors = [exceptions.ValidationError("a message", path=["bar"])] + tree = exceptions.ErrorTree(errors) + self.assertIn("bar", tree) + + def test_it_does_not_contain_an_item_if_the_item_had_no_error(self): + errors = [exceptions.ValidationError("a message", path=["bar"])] + tree = exceptions.ErrorTree(errors) + self.assertNotIn("foo", tree) + + def test_keywords_that_failed_appear_in_errors_dict(self): + error = exceptions.ValidationError("a message", validator="foo") + tree = exceptions.ErrorTree([error]) + self.assertEqual(tree.errors, {"foo": error}) + + def test_it_creates_a_child_tree_for_each_nested_path(self): + errors = [ + exceptions.ValidationError("a bar message", path=["bar"]), + exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]), + ] + tree = exceptions.ErrorTree(errors) + self.assertIn(0, tree["bar"]) + self.assertNotIn(1, tree["bar"]) + + def test_children_have_their_errors_dicts_built(self): + e1, e2 = ( + exceptions.ValidationError("1", validator="foo", path=["bar", 0]), + exceptions.ValidationError("2", validator="quux", path=["bar", 0]), + ) + tree = exceptions.ErrorTree([e1, e2]) + self.assertEqual(tree["bar"][0].errors, {"foo": e1, "quux": e2}) + + def test_multiple_errors_with_instance(self): + e1, e2 = ( + exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1"), + exceptions.ValidationError( + "2", + validator="quux", + path=["foobar", 2], + instance="i2"), + ) + exceptions.ErrorTree([e1, e2]) + + def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self): + error = exceptions.ValidationError("123", validator="foo", instance=[]) + tree = exceptions.ErrorTree([error]) + + with self.assertRaises(IndexError): + tree[0] + + def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self): + """ + If a keyword refers to a path that isn't in the instance, the + tree still properly returns a subtree for that path. + """ + + error = exceptions.ValidationError( + "a message", validator="foo", instance={}, path=["foo"], + ) + tree = exceptions.ErrorTree([error]) + self.assertIsInstance(tree["foo"], exceptions.ErrorTree) + + def test_iter(self): + e1, e2 = ( + exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1"), + exceptions.ValidationError( + "2", + validator="quux", + path=["foobar", 2], + instance="i2"), + ) + tree = exceptions.ErrorTree([e1, e2]) + self.assertEqual(set(tree), {"bar", "foobar"}) + + def test_repr_single(self): + error = exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1", + ) + tree = exceptions.ErrorTree([error]) + self.assertEqual(repr(tree), "") + + def test_repr_multiple(self): + e1, e2 = ( + exceptions.ValidationError( + "1", + validator="foo", + path=["bar", "bar2"], + instance="i1"), + exceptions.ValidationError( + "2", + validator="quux", + path=["foobar", 2], + instance="i2"), + ) + tree = exceptions.ErrorTree([e1, e2]) + self.assertEqual(repr(tree), "") + + def test_repr_empty(self): + tree = exceptions.ErrorTree([]) + self.assertEqual(repr(tree), "") + + +class TestErrorInitReprStr(TestCase): + def make_error(self, **kwargs): + defaults = dict( + message="hello", + validator="type", + validator_value="string", + instance=5, + schema={"type": "string"}, + ) + defaults.update(kwargs) + return exceptions.ValidationError(**defaults) + + def assertShows(self, expected, **kwargs): + expected = textwrap.dedent(expected).rstrip("\n") + + error = self.make_error(**kwargs) + message_line, _, rest = str(error).partition("\n") + self.assertEqual(message_line, error.message) + self.assertEqual(rest, expected) + + def test_it_calls_super_and_sets_args(self): + error = self.make_error() + self.assertGreater(len(error.args), 1) + + def test_repr(self): + self.assertEqual( + repr(exceptions.ValidationError(message="Hello!")), + "", + ) + + def test_unset_error(self): + error = exceptions.ValidationError("message") + self.assertEqual(str(error), "message") + + kwargs = { + "validator": "type", + "validator_value": "string", + "instance": 5, + "schema": {"type": "string"}, + } + # Just the message should show if any of the attributes are unset + for attr in kwargs: + k = dict(kwargs) + del k[attr] + error = exceptions.ValidationError("message", **k) + self.assertEqual(str(error), "message") + + def test_empty_paths(self): + self.assertShows( + """ + Failed validating 'type' in schema: + {'type': 'string'} + + On instance: + 5 + """, + path=[], + schema_path=[], + ) + + def test_one_item_paths(self): + self.assertShows( + """ + Failed validating 'type' in schema: + {'type': 'string'} + + On instance[0]: + 5 + """, + path=[0], + schema_path=["items"], + ) + + def test_multiple_item_paths(self): + self.assertShows( + """ + Failed validating 'type' in schema['items'][0]: + {'type': 'string'} + + On instance[0]['a']: + 5 + """, + path=[0, "a"], + schema_path=["items", 0, 1], + ) + + def test_uses_pprint(self): + self.assertShows( + """ + Failed validating 'maxLength' in schema: + {0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9, + 10: 10, + 11: 11, + 12: 12, + 13: 13, + 14: 14, + 15: 15, + 16: 16, + 17: 17, + 18: 18, + 19: 19} + + On instance: + [0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24] + """, + instance=list(range(25)), + schema=dict(zip(range(20), range(20))), + validator="maxLength", + ) + + def test_does_not_reorder_dicts(self): + self.assertShows( + """ + Failed validating 'type' in schema: + {'do': 3, 'not': 7, 'sort': 37, 'me': 73} + + On instance: + {'here': 73, 'too': 37, 'no': 7, 'sorting': 3} + """, + schema={ + "do": 3, + "not": 7, + "sort": 37, + "me": 73, + }, + instance={ + "here": 73, + "too": 37, + "no": 7, + "sorting": 3, + }, + ) + + def test_str_works_with_instances_having_overriden_eq_operator(self): + """ + Check for #164 which rendered exceptions unusable when a + `ValidationError` involved instances with an `__eq__` method + that returned truthy values. + """ + + class DontEQMeBro: + def __eq__(this, other): # pragma: no cover + self.fail("Don't!") + + def __ne__(this, other): # pragma: no cover + self.fail("Don't!") + + instance = DontEQMeBro() + error = exceptions.ValidationError( + "a message", + validator="foo", + instance=instance, + validator_value="some", + schema="schema", + ) + self.assertIn(repr(instance), str(error)) + + +class TestHashable(TestCase): + def test_hashable(self): + {exceptions.ValidationError("")} + {exceptions.SchemaError("")} + + +class TestJsonPathRendering(TestCase): + def validate_json_path_rendering(self, property_name, expected_path): + error = exceptions.ValidationError( + path=[property_name], + message="1", + validator="foo", + instance="i1", + ) + + rendered_json_path = error.json_path + self.assertEqual(rendered_json_path, expected_path) + + re_parsed_name = jsonpath_ng.parse(rendered_json_path).right.fields[0] + self.assertEqual(re_parsed_name, property_name) + + def test_basic(self): + self.validate_json_path_rendering("x", "$.x") + + def test_empty(self): + self.validate_json_path_rendering("", "$['']") + + def test_number(self): + self.validate_json_path_rendering("1", "$['1']") + + def test_period(self): + self.validate_json_path_rendering(".", "$['.']") + + def test_single_quote(self): + self.validate_json_path_rendering("'", r"$['\'']") + + def test_space(self): + self.validate_json_path_rendering(" ", "$[' ']") + + def test_backslash(self): + self.validate_json_path_rendering("\\", r"$['\\']") + + def test_backslash_single_quote(self): + self.validate_json_path_rendering(r"\'", r"$['\\\'']") + + def test_underscore(self): + self.validate_json_path_rendering("_", r"$['_']") + + def test_double_quote(self): + self.validate_json_path_rendering('"', """$['"']""") + + def test_hyphen(self): + self.validate_json_path_rendering("-", "$['-']") + + def test_json_path_injection(self): + self.validate_json_path_rendering("a[0]", "$['a[0]']") + + def test_open_bracket(self): + self.validate_json_path_rendering("[", "$['[']") diff --git a/lib/python3.10/site-packages/jsonschema/tests/test_format.py b/lib/python3.10/site-packages/jsonschema/tests/test_format.py new file mode 100644 index 0000000000000000000000000000000000000000..d829f9848f51f882d5a3f9413c80e0dbdcdaf292 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/test_format.py @@ -0,0 +1,91 @@ +""" +Tests for the parts of jsonschema related to the :kw:`format` keyword. +""" + +from unittest import TestCase + +from jsonschema import FormatChecker, ValidationError +from jsonschema.exceptions import FormatError +from jsonschema.validators import Draft4Validator + +BOOM = ValueError("Boom!") +BANG = ZeroDivisionError("Bang!") + + +def boom(thing): + if thing == "bang": + raise BANG + raise BOOM + + +class TestFormatChecker(TestCase): + def test_it_can_validate_no_formats(self): + checker = FormatChecker(formats=()) + self.assertFalse(checker.checkers) + + def test_it_raises_a_key_error_for_unknown_formats(self): + with self.assertRaises(KeyError): + FormatChecker(formats=["o noes"]) + + def test_it_can_register_cls_checkers(self): + original = dict(FormatChecker.checkers) + self.addCleanup(FormatChecker.checkers.pop, "boom") + with self.assertWarns(DeprecationWarning): + FormatChecker.cls_checks("boom")(boom) + self.assertEqual( + FormatChecker.checkers, + dict(original, boom=(boom, ())), + ) + + def test_it_can_register_checkers(self): + checker = FormatChecker() + checker.checks("boom")(boom) + self.assertEqual( + checker.checkers, + dict(FormatChecker.checkers, boom=(boom, ())), + ) + + def test_it_catches_registered_errors(self): + checker = FormatChecker() + checker.checks("boom", raises=type(BOOM))(boom) + + with self.assertRaises(FormatError) as cm: + checker.check(instance=12, format="boom") + + self.assertIs(cm.exception.cause, BOOM) + self.assertIs(cm.exception.__cause__, BOOM) + self.assertEqual(str(cm.exception), "12 is not a 'boom'") + + # Unregistered errors should not be caught + with self.assertRaises(type(BANG)): + checker.check(instance="bang", format="boom") + + def test_format_error_causes_become_validation_error_causes(self): + checker = FormatChecker() + checker.checks("boom", raises=ValueError)(boom) + validator = Draft4Validator({"format": "boom"}, format_checker=checker) + + with self.assertRaises(ValidationError) as cm: + validator.validate("BOOM") + + self.assertIs(cm.exception.cause, BOOM) + self.assertIs(cm.exception.__cause__, BOOM) + + def test_format_checkers_come_with_defaults(self): + # This is bad :/ but relied upon. + # The docs for quite awhile recommended people do things like + # validate(..., format_checker=FormatChecker()) + # We should change that, but we can't without deprecation... + checker = FormatChecker() + with self.assertRaises(FormatError): + checker.check(instance="not-an-ipv4", format="ipv4") + + def test_repr(self): + checker = FormatChecker(formats=()) + checker.checks("foo")(lambda thing: True) # pragma: no cover + checker.checks("bar")(lambda thing: True) # pragma: no cover + checker.checks("baz")(lambda thing: True) # pragma: no cover + self.assertEqual( + repr(checker), + "", + ) diff --git a/lib/python3.10/site-packages/jsonschema/tests/test_jsonschema_test_suite.py b/lib/python3.10/site-packages/jsonschema/tests/test_jsonschema_test_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..41c982553688ab179dcb95eedc3aae78648e2a4d --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/test_jsonschema_test_suite.py @@ -0,0 +1,262 @@ +""" +Test runner for the JSON Schema official test suite + +Tests comprehensive correctness of each draft's validator. + +See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details. +""" + + +from jsonschema.tests._suite import Suite +import jsonschema + +SUITE = Suite() +DRAFT3 = SUITE.version(name="draft3") +DRAFT4 = SUITE.version(name="draft4") +DRAFT6 = SUITE.version(name="draft6") +DRAFT7 = SUITE.version(name="draft7") +DRAFT201909 = SUITE.version(name="draft2019-09") +DRAFT202012 = SUITE.version(name="draft2020-12") + + +def skip(message, **kwargs): + def skipper(test): + if all(value == getattr(test, attr) for attr, value in kwargs.items()): + return message + return skipper + + +def ecmascript_regex(test): + if test.subject == "ecmascript-regex": + return "ECMA regex support will be added in #1142." + + +def missing_format(Validator): + def missing_format(test): # pragma: no cover + schema = test.schema + if ( + schema is True + or schema is False + or "format" not in schema + or schema["format"] in Validator.FORMAT_CHECKER.checkers + or test.valid + ): + return + + return f"Format checker {schema['format']!r} not found." + return missing_format + + +def complex_email_validation(test): + if test.subject != "email": + return + + message = "Complex email validation is (intentionally) unsupported." + return skip( + message=message, + description="an invalid domain", + )(test) or skip( + message=message, + description="an invalid IPv4-address-literal", + )(test) or skip( + message=message, + description="dot after local part is not valid", + )(test) or skip( + message=message, + description="dot before local part is not valid", + )(test) or skip( + message=message, + description="two subsequent dots inside local part are not valid", + )(test) + + +def leap_second(test): + message = "Leap seconds are unsupported." + return skip( + message=message, + subject="time", + description="a valid time string with leap second", + )(test) or skip( + message=message, + subject="time", + description="a valid time string with leap second, Zulu", + )(test) or skip( + message=message, + subject="time", + description="a valid time string with leap second with offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, positive time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, negative time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, large positive time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, large negative time-offset", + )(test) or skip( + message=message, + subject="time", + description="valid leap second, zero time-offset", + )(test) or skip( + message=message, + subject="date-time", + description="a valid date-time with a leap second, UTC", + )(test) or skip( + message=message, + subject="date-time", + description="a valid date-time with a leap second, with minus offset", + )(test) + + +TestDraft3 = DRAFT3.to_unittest_testcase( + DRAFT3.cases(), + DRAFT3.format_cases(), + DRAFT3.optional_cases_of(name="bignum"), + DRAFT3.optional_cases_of(name="non-bmp-regex"), + DRAFT3.optional_cases_of(name="zeroTerminatedFloats"), + Validator=jsonschema.Draft3Validator, + format_checker=jsonschema.Draft3Validator.FORMAT_CHECKER, + skip=lambda test: ( + ecmascript_regex(test) + or missing_format(jsonschema.Draft3Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft4 = DRAFT4.to_unittest_testcase( + DRAFT4.cases(), + DRAFT4.format_cases(), + DRAFT4.optional_cases_of(name="bignum"), + DRAFT4.optional_cases_of(name="float-overflow"), + DRAFT4.optional_cases_of(name="id"), + DRAFT4.optional_cases_of(name="non-bmp-regex"), + DRAFT4.optional_cases_of(name="zeroTerminatedFloats"), + Validator=jsonschema.Draft4Validator, + format_checker=jsonschema.Draft4Validator.FORMAT_CHECKER, + skip=lambda test: ( + ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft4Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft6 = DRAFT6.to_unittest_testcase( + DRAFT6.cases(), + DRAFT6.format_cases(), + DRAFT6.optional_cases_of(name="bignum"), + DRAFT6.optional_cases_of(name="float-overflow"), + DRAFT6.optional_cases_of(name="id"), + DRAFT6.optional_cases_of(name="non-bmp-regex"), + Validator=jsonschema.Draft6Validator, + format_checker=jsonschema.Draft6Validator.FORMAT_CHECKER, + skip=lambda test: ( + ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft6Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft7 = DRAFT7.to_unittest_testcase( + DRAFT7.cases(), + DRAFT7.format_cases(), + DRAFT7.optional_cases_of(name="bignum"), + DRAFT7.optional_cases_of(name="cross-draft"), + DRAFT7.optional_cases_of(name="float-overflow"), + DRAFT6.optional_cases_of(name="id"), + DRAFT7.optional_cases_of(name="non-bmp-regex"), + DRAFT7.optional_cases_of(name="unknownKeyword"), + Validator=jsonschema.Draft7Validator, + format_checker=jsonschema.Draft7Validator.FORMAT_CHECKER, + skip=lambda test: ( + ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft7Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft201909 = DRAFT201909.to_unittest_testcase( + DRAFT201909.cases(), + DRAFT201909.optional_cases_of(name="anchor"), + DRAFT201909.optional_cases_of(name="bignum"), + DRAFT201909.optional_cases_of(name="cross-draft"), + DRAFT201909.optional_cases_of(name="float-overflow"), + DRAFT201909.optional_cases_of(name="id"), + DRAFT201909.optional_cases_of(name="no-schema"), + DRAFT201909.optional_cases_of(name="non-bmp-regex"), + DRAFT201909.optional_cases_of(name="refOfUnknownKeyword"), + DRAFT201909.optional_cases_of(name="unknownKeyword"), + Validator=jsonschema.Draft201909Validator, + skip=skip( + message="Vocabulary support is still in-progress.", + subject="vocabulary", + description=( + "no validation: invalid number, but it still validates" + ), + ), +) + + +TestDraft201909Format = DRAFT201909.to_unittest_testcase( + DRAFT201909.format_cases(), + name="TestDraft201909Format", + Validator=jsonschema.Draft201909Validator, + format_checker=jsonschema.Draft201909Validator.FORMAT_CHECKER, + skip=lambda test: ( + complex_email_validation(test) + or ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft201909Validator)(test) + or complex_email_validation(test) + ), +) + + +TestDraft202012 = DRAFT202012.to_unittest_testcase( + DRAFT202012.cases(), + DRAFT201909.optional_cases_of(name="anchor"), + DRAFT202012.optional_cases_of(name="bignum"), + DRAFT202012.optional_cases_of(name="cross-draft"), + DRAFT202012.optional_cases_of(name="float-overflow"), + DRAFT202012.optional_cases_of(name="id"), + DRAFT202012.optional_cases_of(name="no-schema"), + DRAFT202012.optional_cases_of(name="non-bmp-regex"), + DRAFT202012.optional_cases_of(name="refOfUnknownKeyword"), + DRAFT202012.optional_cases_of(name="unknownKeyword"), + Validator=jsonschema.Draft202012Validator, + skip=skip( + message="Vocabulary support is still in-progress.", + subject="vocabulary", + description=( + "no validation: invalid number, but it still validates" + ), + ), +) + + +TestDraft202012Format = DRAFT202012.to_unittest_testcase( + DRAFT202012.format_cases(), + name="TestDraft202012Format", + Validator=jsonschema.Draft202012Validator, + format_checker=jsonschema.Draft202012Validator.FORMAT_CHECKER, + skip=lambda test: ( + complex_email_validation(test) + or ecmascript_regex(test) + or leap_second(test) + or missing_format(jsonschema.Draft202012Validator)(test) + or complex_email_validation(test) + ), +) diff --git a/lib/python3.10/site-packages/jsonschema/tests/test_types.py b/lib/python3.10/site-packages/jsonschema/tests/test_types.py new file mode 100644 index 0000000000000000000000000000000000000000..bd97b180029ae05d7f3b22f1048f9adcb769f36f --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/test_types.py @@ -0,0 +1,221 @@ +""" +Tests for the `TypeChecker`-based type interface. + +The actual correctness of the type checking is handled in +`test_jsonschema_test_suite`; these tests check that TypeChecker +functions correctly at a more granular level. +""" +from collections import namedtuple +from unittest import TestCase + +from jsonschema import ValidationError, _keywords +from jsonschema._types import TypeChecker +from jsonschema.exceptions import UndefinedTypeCheck, UnknownType +from jsonschema.validators import Draft202012Validator, extend + + +def equals_2(checker, instance): + return instance == 2 + + +def is_namedtuple(instance): + return isinstance(instance, tuple) and getattr(instance, "_fields", None) + + +def is_object_or_named_tuple(checker, instance): + if Draft202012Validator.TYPE_CHECKER.is_type(instance, "object"): + return True + return is_namedtuple(instance) + + +class TestTypeChecker(TestCase): + def test_is_type(self): + checker = TypeChecker({"two": equals_2}) + self.assertEqual( + ( + checker.is_type(instance=2, type="two"), + checker.is_type(instance="bar", type="two"), + ), + (True, False), + ) + + def test_is_unknown_type(self): + with self.assertRaises(UndefinedTypeCheck) as e: + TypeChecker().is_type(4, "foobar") + self.assertIn( + "'foobar' is unknown to this type checker", + str(e.exception), + ) + self.assertTrue( + e.exception.__suppress_context__, + msg="Expected the internal KeyError to be hidden.", + ) + + def test_checks_can_be_added_at_init(self): + checker = TypeChecker({"two": equals_2}) + self.assertEqual(checker, TypeChecker().redefine("two", equals_2)) + + def test_redefine_existing_type(self): + self.assertEqual( + TypeChecker().redefine("two", object()).redefine("two", equals_2), + TypeChecker().redefine("two", equals_2), + ) + + def test_remove(self): + self.assertEqual( + TypeChecker({"two": equals_2}).remove("two"), + TypeChecker(), + ) + + def test_remove_unknown_type(self): + with self.assertRaises(UndefinedTypeCheck) as context: + TypeChecker().remove("foobar") + self.assertIn("foobar", str(context.exception)) + + def test_redefine_many(self): + self.assertEqual( + TypeChecker().redefine_many({"foo": int, "bar": str}), + TypeChecker().redefine("foo", int).redefine("bar", str), + ) + + def test_remove_multiple(self): + self.assertEqual( + TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"), + TypeChecker(), + ) + + def test_type_check_can_raise_key_error(self): + """ + Make sure no one writes: + + try: + self._type_checkers[type](...) + except KeyError: + + ignoring the fact that the function itself can raise that. + """ + + error = KeyError("Stuff") + + def raises_keyerror(checker, instance): + raise error + + with self.assertRaises(KeyError) as context: + TypeChecker({"foo": raises_keyerror}).is_type(4, "foo") + + self.assertIs(context.exception, error) + + def test_repr(self): + checker = TypeChecker({"foo": is_namedtuple, "bar": is_namedtuple}) + self.assertEqual(repr(checker), "") + + +class TestCustomTypes(TestCase): + def test_simple_type_can_be_extended(self): + def int_or_str_int(checker, instance): + if not isinstance(instance, (int, str)): + return False + try: + int(instance) + except ValueError: + return False + return True + + CustomValidator = extend( + Draft202012Validator, + type_checker=Draft202012Validator.TYPE_CHECKER.redefine( + "integer", int_or_str_int, + ), + ) + validator = CustomValidator({"type": "integer"}) + + validator.validate(4) + validator.validate("4") + + with self.assertRaises(ValidationError): + validator.validate(4.4) + + with self.assertRaises(ValidationError): + validator.validate("foo") + + def test_object_can_be_extended(self): + schema = {"type": "object"} + + Point = namedtuple("Point", ["x", "y"]) + + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, + ) + + CustomValidator = extend( + Draft202012Validator, + type_checker=type_checker, + ) + validator = CustomValidator(schema) + + validator.validate(Point(x=4, y=5)) + + def test_object_extensions_require_custom_validators(self): + schema = {"type": "object", "required": ["x"]} + + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, + ) + + CustomValidator = extend( + Draft202012Validator, + type_checker=type_checker, + ) + validator = CustomValidator(schema) + + Point = namedtuple("Point", ["x", "y"]) + # Cannot handle required + with self.assertRaises(ValidationError): + validator.validate(Point(x=4, y=5)) + + def test_object_extensions_can_handle_custom_validators(self): + schema = { + "type": "object", + "required": ["x"], + "properties": {"x": {"type": "integer"}}, + } + + type_checker = Draft202012Validator.TYPE_CHECKER.redefine( + "object", is_object_or_named_tuple, + ) + + def coerce_named_tuple(fn): + def coerced(validator, value, instance, schema): + if is_namedtuple(instance): + instance = instance._asdict() + return fn(validator, value, instance, schema) + return coerced + + required = coerce_named_tuple(_keywords.required) + properties = coerce_named_tuple(_keywords.properties) + + CustomValidator = extend( + Draft202012Validator, + type_checker=type_checker, + validators={"required": required, "properties": properties}, + ) + + validator = CustomValidator(schema) + + Point = namedtuple("Point", ["x", "y"]) + # Can now process required and properties + validator.validate(Point(x=4, y=5)) + + with self.assertRaises(ValidationError): + validator.validate(Point(x="not an integer", y=5)) + + # As well as still handle objects. + validator.validate({"x": 4, "y": 5}) + + with self.assertRaises(ValidationError): + validator.validate({"x": "not an integer", "y": 5}) + + def test_unknown_type(self): + with self.assertRaises(UnknownType) as e: + Draft202012Validator({}).is_type(12, "some unknown type") + self.assertIn("'some unknown type'", str(e.exception)) diff --git a/lib/python3.10/site-packages/jsonschema/tests/test_utils.py b/lib/python3.10/site-packages/jsonschema/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d9764b0f9e92edb38c19e1fc43b248a20186ef6b --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/test_utils.py @@ -0,0 +1,138 @@ +from math import nan +from unittest import TestCase + +from jsonschema._utils import equal + + +class TestEqual(TestCase): + def test_none(self): + self.assertTrue(equal(None, None)) + + def test_nan(self): + self.assertTrue(equal(nan, nan)) + + +class TestDictEqual(TestCase): + def test_equal_dictionaries(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "b"} + self.assertTrue(equal(dict_1, dict_2)) + + def test_equal_dictionaries_with_nan(self): + dict_1 = {"a": nan, "c": "d"} + dict_2 = {"c": "d", "a": nan} + self.assertTrue(equal(dict_1, dict_2)) + + def test_missing_key(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "x": "b"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_additional_key(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "b", "x": "x"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_missing_value(self): + dict_1 = {"a": "b", "c": "d"} + dict_2 = {"c": "d", "a": "x"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_empty_dictionaries(self): + dict_1 = {} + dict_2 = {} + self.assertTrue(equal(dict_1, dict_2)) + + def test_one_none(self): + dict_1 = None + dict_2 = {"a": "b", "c": "d"} + self.assertFalse(equal(dict_1, dict_2)) + + def test_same_item(self): + dict_1 = {"a": "b", "c": "d"} + self.assertTrue(equal(dict_1, dict_1)) + + def test_nested_equal(self): + dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"} + dict_2 = {"c": "d", "a": {"a": "b", "c": "d"}} + self.assertTrue(equal(dict_1, dict_2)) + + def test_nested_dict_unequal(self): + dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"} + dict_2 = {"c": "d", "a": {"a": "b", "c": "x"}} + self.assertFalse(equal(dict_1, dict_2)) + + def test_mixed_nested_equal(self): + dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"} + dict_2 = {"c": "d", "a": ["a", "b", "c", "d"]} + self.assertTrue(equal(dict_1, dict_2)) + + def test_nested_list_unequal(self): + dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"} + dict_2 = {"c": "d", "a": ["b", "c", "d", "a"]} + self.assertFalse(equal(dict_1, dict_2)) + + +class TestListEqual(TestCase): + def test_equal_lists(self): + list_1 = ["a", "b", "c"] + list_2 = ["a", "b", "c"] + self.assertTrue(equal(list_1, list_2)) + + def test_equal_lists_with_nan(self): + list_1 = ["a", nan, "c"] + list_2 = ["a", nan, "c"] + self.assertTrue(equal(list_1, list_2)) + + def test_unsorted_lists(self): + list_1 = ["a", "b", "c"] + list_2 = ["b", "b", "a"] + self.assertFalse(equal(list_1, list_2)) + + def test_first_list_larger(self): + list_1 = ["a", "b", "c"] + list_2 = ["a", "b"] + self.assertFalse(equal(list_1, list_2)) + + def test_second_list_larger(self): + list_1 = ["a", "b"] + list_2 = ["a", "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + def test_list_with_none_unequal(self): + list_1 = ["a", "b", None] + list_2 = ["a", "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + list_1 = ["a", "b", None] + list_2 = [None, "b", "c"] + self.assertFalse(equal(list_1, list_2)) + + def test_list_with_none_equal(self): + list_1 = ["a", None, "c"] + list_2 = ["a", None, "c"] + self.assertTrue(equal(list_1, list_2)) + + def test_empty_list(self): + list_1 = [] + list_2 = [] + self.assertTrue(equal(list_1, list_2)) + + def test_one_none(self): + list_1 = None + list_2 = [] + self.assertFalse(equal(list_1, list_2)) + + def test_same_list(self): + list_1 = ["a", "b", "c"] + self.assertTrue(equal(list_1, list_1)) + + def test_equal_nested_lists(self): + list_1 = ["a", ["b", "c"], "d"] + list_2 = ["a", ["b", "c"], "d"] + self.assertTrue(equal(list_1, list_2)) + + def test_unequal_nested_lists(self): + list_1 = ["a", ["b", "c"], "d"] + list_2 = ["a", [], "c"] + self.assertFalse(equal(list_1, list_2)) diff --git a/lib/python3.10/site-packages/jsonschema/tests/test_validators.py b/lib/python3.10/site-packages/jsonschema/tests/test_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..28cc4027372c10df1d071eb656102b3b81bc5ecc --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema/tests/test_validators.py @@ -0,0 +1,2575 @@ +from __future__ import annotations + +from collections import deque, namedtuple +from contextlib import contextmanager +from decimal import Decimal +from io import BytesIO +from typing import Any +from unittest import TestCase, mock +from urllib.request import pathname2url +import json +import os +import sys +import tempfile +import warnings + +from attrs import define, field +from referencing.jsonschema import DRAFT202012 +import referencing.exceptions + +from jsonschema import ( + FormatChecker, + TypeChecker, + exceptions, + protocols, + validators, +) + + +def fail(validator, errors, instance, schema): + for each in errors: + each.setdefault("message", "You told me to fail!") + yield exceptions.ValidationError(**each) + + +class TestCreateAndExtend(TestCase): + def setUp(self): + self.addCleanup( + self.assertEqual, + validators._META_SCHEMAS, + dict(validators._META_SCHEMAS), + ) + self.addCleanup( + self.assertEqual, + validators._VALIDATORS, + dict(validators._VALIDATORS), + ) + + self.meta_schema = {"$id": "some://meta/schema"} + self.validators = {"fail": fail} + self.type_checker = TypeChecker() + self.Validator = validators.create( + meta_schema=self.meta_schema, + validators=self.validators, + type_checker=self.type_checker, + ) + + def test_attrs(self): + self.assertEqual( + ( + self.Validator.VALIDATORS, + self.Validator.META_SCHEMA, + self.Validator.TYPE_CHECKER, + ), ( + self.validators, + self.meta_schema, + self.type_checker, + ), + ) + + def test_init(self): + schema = {"fail": []} + self.assertEqual(self.Validator(schema).schema, schema) + + def test_iter_errors_successful(self): + schema = {"fail": []} + validator = self.Validator(schema) + + errors = list(validator.iter_errors("hello")) + self.assertEqual(errors, []) + + def test_iter_errors_one_error(self): + schema = {"fail": [{"message": "Whoops!"}]} + validator = self.Validator(schema) + + expected_error = exceptions.ValidationError( + "Whoops!", + instance="goodbye", + schema=schema, + validator="fail", + validator_value=[{"message": "Whoops!"}], + schema_path=deque(["fail"]), + ) + + errors = list(validator.iter_errors("goodbye")) + self.assertEqual(len(errors), 1) + self.assertEqual(errors[0]._contents(), expected_error._contents()) + + def test_iter_errors_multiple_errors(self): + schema = { + "fail": [ + {"message": "First"}, + {"message": "Second!", "validator": "asdf"}, + {"message": "Third"}, + ], + } + validator = self.Validator(schema) + + errors = list(validator.iter_errors("goodbye")) + self.assertEqual(len(errors), 3) + + def test_if_a_version_is_provided_it_is_registered(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.addCleanup(validators._VALIDATORS.pop, "my version") + self.assertEqual(Validator.__name__, "MyVersionValidator") + self.assertEqual(Validator.__qualname__, "MyVersionValidator") + + def test_repr(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.addCleanup(validators._VALIDATORS.pop, "my version") + self.assertEqual( + repr(Validator({})), + "MyVersionValidator(schema={}, format_checker=None)", + ) + + def test_long_repr(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.addCleanup(validators._VALIDATORS.pop, "my version") + self.assertEqual( + repr(Validator({"a": list(range(1000))})), ( + "MyVersionValidator(schema={'a': [0, 1, 2, 3, 4, 5, ...]}, " + "format_checker=None)" + ), + ) + + def test_repr_no_version(self): + Validator = validators.create(meta_schema={}) + self.assertEqual( + repr(Validator({})), + "Validator(schema={}, format_checker=None)", + ) + + def test_dashes_are_stripped_from_validator_names(self): + Validator = validators.create( + meta_schema={"$id": "something"}, + version="foo-bar", + ) + self.addCleanup(validators._META_SCHEMAS.pop, "something") + self.addCleanup(validators._VALIDATORS.pop, "foo-bar") + self.assertEqual(Validator.__qualname__, "FooBarValidator") + + def test_if_a_version_is_not_provided_it_is_not_registered(self): + original = dict(validators._META_SCHEMAS) + validators.create(meta_schema={"id": "id"}) + self.assertEqual(validators._META_SCHEMAS, original) + + def test_validates_registers_meta_schema_id(self): + meta_schema_key = "meta schema id" + my_meta_schema = {"id": meta_schema_key} + + validators.create( + meta_schema=my_meta_schema, + version="my version", + id_of=lambda s: s.get("id", ""), + ) + self.addCleanup(validators._META_SCHEMAS.pop, meta_schema_key) + self.addCleanup(validators._VALIDATORS.pop, "my version") + + self.assertIn(meta_schema_key, validators._META_SCHEMAS) + + def test_validates_registers_meta_schema_draft6_id(self): + meta_schema_key = "meta schema $id" + my_meta_schema = {"$id": meta_schema_key} + + validators.create( + meta_schema=my_meta_schema, + version="my version", + ) + self.addCleanup(validators._META_SCHEMAS.pop, meta_schema_key) + self.addCleanup(validators._VALIDATORS.pop, "my version") + + self.assertIn(meta_schema_key, validators._META_SCHEMAS) + + def test_create_default_types(self): + Validator = validators.create(meta_schema={}, validators=()) + self.assertTrue( + all( + Validator({}).is_type(instance=instance, type=type) + for type, instance in [ + ("array", []), + ("boolean", True), + ("integer", 12), + ("null", None), + ("number", 12.0), + ("object", {}), + ("string", "foo"), + ] + ), + ) + + def test_check_schema_with_different_metaschema(self): + """ + One can create a validator class whose metaschema uses a different + dialect than itself. + """ + + NoEmptySchemasValidator = validators.create( + meta_schema={ + "$schema": validators.Draft202012Validator.META_SCHEMA["$id"], + "not": {"const": {}}, + }, + ) + NoEmptySchemasValidator.check_schema({"foo": "bar"}) + + with self.assertRaises(exceptions.SchemaError): + NoEmptySchemasValidator.check_schema({}) + + NoEmptySchemasValidator({"foo": "bar"}).validate("foo") + + def test_check_schema_with_different_metaschema_defaults_to_self(self): + """ + A validator whose metaschema doesn't declare $schema defaults to its + own validation behavior, not the latest "normal" specification. + """ + + NoEmptySchemasValidator = validators.create( + meta_schema={"fail": [{"message": "Meta schema whoops!"}]}, + validators={"fail": fail}, + ) + with self.assertRaises(exceptions.SchemaError): + NoEmptySchemasValidator.check_schema({}) + + def test_extend(self): + original = dict(self.Validator.VALIDATORS) + new = object() + + Extended = validators.extend( + self.Validator, + validators={"new": new}, + ) + self.assertEqual( + ( + Extended.VALIDATORS, + Extended.META_SCHEMA, + Extended.TYPE_CHECKER, + self.Validator.VALIDATORS, + ), ( + dict(original, new=new), + self.Validator.META_SCHEMA, + self.Validator.TYPE_CHECKER, + original, + ), + ) + + def test_extend_idof(self): + """ + Extending a validator preserves its notion of schema IDs. + """ + def id_of(schema): + return schema.get("__test__", self.Validator.ID_OF(schema)) + correct_id = "the://correct/id/" + meta_schema = { + "$id": "the://wrong/id/", + "__test__": correct_id, + } + Original = validators.create( + meta_schema=meta_schema, + validators=self.validators, + type_checker=self.type_checker, + id_of=id_of, + ) + self.assertEqual(Original.ID_OF(Original.META_SCHEMA), correct_id) + + Derived = validators.extend(Original) + self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id) + + def test_extend_applicable_validators(self): + """ + Extending a validator preserves its notion of applicable validators. + """ + + schema = { + "$defs": {"test": {"type": "number"}}, + "$ref": "#/$defs/test", + "maximum": 1, + } + + draft4 = validators.Draft4Validator(schema) + self.assertTrue(draft4.is_valid(37)) # as $ref ignores siblings + + Derived = validators.extend(validators.Draft4Validator) + self.assertTrue(Derived(schema).is_valid(37)) + + +class TestValidationErrorMessages(TestCase): + def message_for(self, instance, schema, *args, **kwargs): + cls = kwargs.pop("cls", validators._LATEST_VERSION) + cls.check_schema(schema) + validator = cls(schema, *args, **kwargs) + errors = list(validator.iter_errors(instance)) + self.assertTrue(errors, msg=f"No errors were raised for {instance!r}") + self.assertEqual( + len(errors), + 1, + msg=f"Expected exactly one error, found {errors!r}", + ) + return errors[0].message + + def test_single_type_failure(self): + message = self.message_for(instance=1, schema={"type": "string"}) + self.assertEqual(message, "1 is not of type 'string'") + + def test_single_type_list_failure(self): + message = self.message_for(instance=1, schema={"type": ["string"]}) + self.assertEqual(message, "1 is not of type 'string'") + + def test_multiple_type_failure(self): + types = "string", "object" + message = self.message_for(instance=1, schema={"type": list(types)}) + self.assertEqual(message, "1 is not of type 'string', 'object'") + + def test_object_with_named_type_failure(self): + schema = {"type": [{"name": "Foo", "minimum": 3}]} + message = self.message_for( + instance=1, + schema=schema, + cls=validators.Draft3Validator, + ) + self.assertEqual(message, "1 is not of type 'Foo'") + + def test_minimum(self): + message = self.message_for(instance=1, schema={"minimum": 2}) + self.assertEqual(message, "1 is less than the minimum of 2") + + def test_maximum(self): + message = self.message_for(instance=1, schema={"maximum": 0}) + self.assertEqual(message, "1 is greater than the maximum of 0") + + def test_dependencies_single_element(self): + depend, on = "bar", "foo" + schema = {"dependencies": {depend: on}} + message = self.message_for( + instance={"bar": 2}, + schema=schema, + cls=validators.Draft3Validator, + ) + self.assertEqual(message, "'foo' is a dependency of 'bar'") + + def test_object_without_title_type_failure_draft3(self): + type = {"type": [{"minimum": 3}]} + message = self.message_for( + instance=1, + schema={"type": [type]}, + cls=validators.Draft3Validator, + ) + self.assertEqual( + message, + "1 is not of type {'type': [{'minimum': 3}]}", + ) + + def test_dependencies_list_draft3(self): + depend, on = "bar", "foo" + schema = {"dependencies": {depend: [on]}} + message = self.message_for( + instance={"bar": 2}, + schema=schema, + cls=validators.Draft3Validator, + ) + self.assertEqual(message, "'foo' is a dependency of 'bar'") + + def test_dependencies_list_draft7(self): + depend, on = "bar", "foo" + schema = {"dependencies": {depend: [on]}} + message = self.message_for( + instance={"bar": 2}, + schema=schema, + cls=validators.Draft7Validator, + ) + self.assertEqual(message, "'foo' is a dependency of 'bar'") + + def test_additionalItems_single_failure(self): + message = self.message_for( + instance=[2], + schema={"items": [], "additionalItems": False}, + cls=validators.Draft3Validator, + ) + self.assertIn("(2 was unexpected)", message) + + def test_additionalItems_multiple_failures(self): + message = self.message_for( + instance=[1, 2, 3], + schema={"items": [], "additionalItems": False}, + cls=validators.Draft3Validator, + ) + self.assertIn("(1, 2, 3 were unexpected)", message) + + def test_additionalProperties_single_failure(self): + additional = "foo" + schema = {"additionalProperties": False} + message = self.message_for(instance={additional: 2}, schema=schema) + self.assertIn("('foo' was unexpected)", message) + + def test_additionalProperties_multiple_failures(self): + schema = {"additionalProperties": False} + message = self.message_for( + instance=dict.fromkeys(["foo", "bar"]), + schema=schema, + ) + + self.assertIn(repr("foo"), message) + self.assertIn(repr("bar"), message) + self.assertIn("were unexpected)", message) + + def test_const(self): + schema = {"const": 12} + message = self.message_for( + instance={"foo": "bar"}, + schema=schema, + ) + self.assertIn("12 was expected", message) + + def test_contains_draft_6(self): + schema = {"contains": {"const": 12}} + message = self.message_for( + instance=[2, {}, []], + schema=schema, + cls=validators.Draft6Validator, + ) + self.assertEqual( + message, + "None of [2, {}, []] are valid under the given schema", + ) + + def test_invalid_format_default_message(self): + checker = FormatChecker(formats=()) + checker.checks("thing")(lambda value: False) + + schema = {"format": "thing"} + message = self.message_for( + instance="bla", + schema=schema, + format_checker=checker, + ) + + self.assertIn(repr("bla"), message) + self.assertIn(repr("thing"), message) + self.assertIn("is not a", message) + + def test_additionalProperties_false_patternProperties(self): + schema = {"type": "object", + "additionalProperties": False, + "patternProperties": { + "^abc$": {"type": "string"}, + "^def$": {"type": "string"}, + }} + message = self.message_for( + instance={"zebra": 123}, + schema=schema, + cls=validators.Draft4Validator, + ) + self.assertEqual( + message, + "{} does not match any of the regexes: {}, {}".format( + repr("zebra"), repr("^abc$"), repr("^def$"), + ), + ) + message = self.message_for( + instance={"zebra": 123, "fish": 456}, + schema=schema, + cls=validators.Draft4Validator, + ) + self.assertEqual( + message, + "{}, {} do not match any of the regexes: {}, {}".format( + repr("fish"), repr("zebra"), repr("^abc$"), repr("^def$"), + ), + ) + + def test_False_schema(self): + message = self.message_for( + instance="something", + schema=False, + ) + self.assertEqual(message, "False schema does not allow 'something'") + + def test_multipleOf(self): + message = self.message_for( + instance=3, + schema={"multipleOf": 2}, + ) + self.assertEqual(message, "3 is not a multiple of 2") + + def test_minItems(self): + message = self.message_for(instance=[], schema={"minItems": 2}) + self.assertEqual(message, "[] is too short") + + def test_maxItems(self): + message = self.message_for(instance=[1, 2, 3], schema={"maxItems": 2}) + self.assertEqual(message, "[1, 2, 3] is too long") + + def test_minItems_1(self): + message = self.message_for(instance=[], schema={"minItems": 1}) + self.assertEqual(message, "[] should be non-empty") + + def test_maxItems_0(self): + message = self.message_for(instance=[1, 2, 3], schema={"maxItems": 0}) + self.assertEqual(message, "[1, 2, 3] is expected to be empty") + + def test_minLength(self): + message = self.message_for( + instance="", + schema={"minLength": 2}, + ) + self.assertEqual(message, "'' is too short") + + def test_maxLength(self): + message = self.message_for( + instance="abc", + schema={"maxLength": 2}, + ) + self.assertEqual(message, "'abc' is too long") + + def test_minLength_1(self): + message = self.message_for(instance="", schema={"minLength": 1}) + self.assertEqual(message, "'' should be non-empty") + + def test_maxLength_0(self): + message = self.message_for(instance="abc", schema={"maxLength": 0}) + self.assertEqual(message, "'abc' is expected to be empty") + + def test_minProperties(self): + message = self.message_for(instance={}, schema={"minProperties": 2}) + self.assertEqual(message, "{} does not have enough properties") + + def test_maxProperties(self): + message = self.message_for( + instance={"a": {}, "b": {}, "c": {}}, + schema={"maxProperties": 2}, + ) + self.assertEqual( + message, + "{'a': {}, 'b': {}, 'c': {}} has too many properties", + ) + + def test_minProperties_1(self): + message = self.message_for(instance={}, schema={"minProperties": 1}) + self.assertEqual(message, "{} should be non-empty") + + def test_maxProperties_0(self): + message = self.message_for( + instance={1: 2}, + schema={"maxProperties": 0}, + ) + self.assertEqual(message, "{1: 2} is expected to be empty") + + def test_prefixItems_with_items(self): + message = self.message_for( + instance=[1, 2, "foo"], + schema={"items": False, "prefixItems": [{}, {}]}, + ) + self.assertEqual( + message, + "Expected at most 2 items but found 1 extra: 'foo'", + ) + + def test_prefixItems_with_multiple_extra_items(self): + message = self.message_for( + instance=[1, 2, "foo", 5], + schema={"items": False, "prefixItems": [{}, {}]}, + ) + self.assertEqual( + message, + "Expected at most 2 items but found 2 extra: ['foo', 5]", + ) + + def test_pattern(self): + message = self.message_for( + instance="bbb", + schema={"pattern": "^a*$"}, + ) + self.assertEqual(message, "'bbb' does not match '^a*$'") + + def test_does_not_contain(self): + message = self.message_for( + instance=[], + schema={"contains": {"type": "string"}}, + ) + self.assertEqual( + message, + "[] does not contain items matching the given schema", + ) + + def test_contains_too_few(self): + message = self.message_for( + instance=["foo", 1], + schema={"contains": {"type": "string"}, "minContains": 2}, + ) + self.assertEqual( + message, + "Too few items match the given schema " + "(expected at least 2 but only 1 matched)", + ) + + def test_contains_too_few_both_constrained(self): + message = self.message_for( + instance=["foo", 1], + schema={ + "contains": {"type": "string"}, + "minContains": 2, + "maxContains": 4, + }, + ) + self.assertEqual( + message, + "Too few items match the given schema (expected at least 2 but " + "only 1 matched)", + ) + + def test_contains_too_many(self): + message = self.message_for( + instance=["foo", "bar", "baz"], + schema={"contains": {"type": "string"}, "maxContains": 2}, + ) + self.assertEqual( + message, + "Too many items match the given schema (expected at most 2)", + ) + + def test_contains_too_many_both_constrained(self): + message = self.message_for( + instance=["foo"] * 5, + schema={ + "contains": {"type": "string"}, + "minContains": 2, + "maxContains": 4, + }, + ) + self.assertEqual( + message, + "Too many items match the given schema (expected at most 4)", + ) + + def test_exclusiveMinimum(self): + message = self.message_for( + instance=3, + schema={"exclusiveMinimum": 5}, + ) + self.assertEqual( + message, + "3 is less than or equal to the minimum of 5", + ) + + def test_exclusiveMaximum(self): + message = self.message_for(instance=3, schema={"exclusiveMaximum": 2}) + self.assertEqual( + message, + "3 is greater than or equal to the maximum of 2", + ) + + def test_required(self): + message = self.message_for(instance={}, schema={"required": ["foo"]}) + self.assertEqual(message, "'foo' is a required property") + + def test_dependentRequired(self): + message = self.message_for( + instance={"foo": {}}, + schema={"dependentRequired": {"foo": ["bar"]}}, + ) + self.assertEqual(message, "'bar' is a dependency of 'foo'") + + def test_oneOf_matches_none(self): + message = self.message_for(instance={}, schema={"oneOf": [False]}) + self.assertEqual( + message, + "{} is not valid under any of the given schemas", + ) + + def test_oneOf_matches_too_many(self): + message = self.message_for(instance={}, schema={"oneOf": [True, True]}) + self.assertEqual(message, "{} is valid under each of True, True") + + def test_unevaluated_items(self): + schema = {"type": "array", "unevaluatedItems": False} + message = self.message_for(instance=["foo", "bar"], schema=schema) + self.assertIn( + message, + "Unevaluated items are not allowed ('foo', 'bar' were unexpected)", + ) + + def test_unevaluated_items_on_invalid_type(self): + schema = {"type": "array", "unevaluatedItems": False} + message = self.message_for(instance="foo", schema=schema) + self.assertEqual(message, "'foo' is not of type 'array'") + + def test_unevaluated_properties_invalid_against_subschema(self): + schema = { + "properties": {"foo": {"type": "string"}}, + "unevaluatedProperties": {"const": 12}, + } + message = self.message_for( + instance={ + "foo": "foo", + "bar": "bar", + "baz": 12, + }, + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated properties are not valid under the given schema " + "('bar' was unevaluated and invalid)", + ) + + def test_unevaluated_properties_disallowed(self): + schema = {"type": "object", "unevaluatedProperties": False} + message = self.message_for( + instance={ + "foo": "foo", + "bar": "bar", + }, + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated properties are not allowed " + "('bar', 'foo' were unexpected)", + ) + + def test_unevaluated_properties_on_invalid_type(self): + schema = {"type": "object", "unevaluatedProperties": False} + message = self.message_for(instance="foo", schema=schema) + self.assertEqual(message, "'foo' is not of type 'object'") + + def test_single_item(self): + schema = {"prefixItems": [{}], "items": False} + message = self.message_for( + instance=["foo", "bar", "baz"], + schema=schema, + ) + self.assertEqual( + message, + "Expected at most 1 item but found 2 extra: ['bar', 'baz']", + ) + + def test_heterogeneous_additionalItems_with_Items(self): + schema = {"items": [{}], "additionalItems": False} + message = self.message_for( + instance=["foo", "bar", 37], + schema=schema, + cls=validators.Draft7Validator, + ) + self.assertEqual( + message, + "Additional items are not allowed ('bar', 37 were unexpected)", + ) + + def test_heterogeneous_items_prefixItems(self): + schema = {"prefixItems": [{}], "items": False} + message = self.message_for( + instance=["foo", "bar", 37], + schema=schema, + ) + self.assertEqual( + message, + "Expected at most 1 item but found 2 extra: ['bar', 37]", + ) + + def test_heterogeneous_unevaluatedItems_prefixItems(self): + schema = {"prefixItems": [{}], "unevaluatedItems": False} + message = self.message_for( + instance=["foo", "bar", 37], + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated items are not allowed ('bar', 37 were unexpected)", + ) + + def test_heterogeneous_properties_additionalProperties(self): + """ + Not valid deserialized JSON, but this should not blow up. + """ + schema = {"properties": {"foo": {}}, "additionalProperties": False} + message = self.message_for( + instance={"foo": {}, "a": "baz", 37: 12}, + schema=schema, + ) + self.assertEqual( + message, + "Additional properties are not allowed (37, 'a' were unexpected)", + ) + + def test_heterogeneous_properties_unevaluatedProperties(self): + """ + Not valid deserialized JSON, but this should not blow up. + """ + schema = {"properties": {"foo": {}}, "unevaluatedProperties": False} + message = self.message_for( + instance={"foo": {}, "a": "baz", 37: 12}, + schema=schema, + ) + self.assertEqual( + message, + "Unevaluated properties are not allowed (37, 'a' were unexpected)", + ) + + +class TestValidationErrorDetails(TestCase): + # TODO: These really need unit tests for each individual keyword, rather + # than just these higher level tests. + def test_anyOf(self): + instance = 5 + schema = { + "anyOf": [ + {"minimum": 20}, + {"type": "string"}, + ], + } + + validator = validators.Draft4Validator(schema) + errors = list(validator.iter_errors(instance)) + self.assertEqual(len(errors), 1) + e = errors[0] + + self.assertEqual(e.validator, "anyOf") + self.assertEqual(e.validator_value, schema["anyOf"]) + self.assertEqual(e.instance, instance) + self.assertEqual(e.schema, schema) + self.assertIsNone(e.parent) + + self.assertEqual(e.path, deque([])) + self.assertEqual(e.relative_path, deque([])) + self.assertEqual(e.absolute_path, deque([])) + self.assertEqual(e.json_path, "$") + + self.assertEqual(e.schema_path, deque(["anyOf"])) + self.assertEqual(e.relative_schema_path, deque(["anyOf"])) + self.assertEqual(e.absolute_schema_path, deque(["anyOf"])) + + self.assertEqual(len(e.context), 2) + + e1, e2 = sorted_errors(e.context) + + self.assertEqual(e1.validator, "minimum") + self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"]) + self.assertEqual(e1.instance, instance) + self.assertEqual(e1.schema, schema["anyOf"][0]) + self.assertIs(e1.parent, e) + + self.assertEqual(e1.path, deque([])) + self.assertEqual(e1.absolute_path, deque([])) + self.assertEqual(e1.relative_path, deque([])) + self.assertEqual(e1.json_path, "$") + + self.assertEqual(e1.schema_path, deque([0, "minimum"])) + self.assertEqual(e1.relative_schema_path, deque([0, "minimum"])) + self.assertEqual( + e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]), + ) + + self.assertFalse(e1.context) + + self.assertEqual(e2.validator, "type") + self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"]) + self.assertEqual(e2.instance, instance) + self.assertEqual(e2.schema, schema["anyOf"][1]) + self.assertIs(e2.parent, e) + + self.assertEqual(e2.path, deque([])) + self.assertEqual(e2.relative_path, deque([])) + self.assertEqual(e2.absolute_path, deque([])) + self.assertEqual(e2.json_path, "$") + + self.assertEqual(e2.schema_path, deque([1, "type"])) + self.assertEqual(e2.relative_schema_path, deque([1, "type"])) + self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"])) + + self.assertEqual(len(e2.context), 0) + + def test_type(self): + instance = {"foo": 1} + schema = { + "type": [ + {"type": "integer"}, + { + "type": "object", + "properties": {"foo": {"enum": [2]}}, + }, + ], + } + + validator = validators.Draft3Validator(schema) + errors = list(validator.iter_errors(instance)) + self.assertEqual(len(errors), 1) + e = errors[0] + + self.assertEqual(e.validator, "type") + self.assertEqual(e.validator_value, schema["type"]) + self.assertEqual(e.instance, instance) + self.assertEqual(e.schema, schema) + self.assertIsNone(e.parent) + + self.assertEqual(e.path, deque([])) + self.assertEqual(e.relative_path, deque([])) + self.assertEqual(e.absolute_path, deque([])) + self.assertEqual(e.json_path, "$") + + self.assertEqual(e.schema_path, deque(["type"])) + self.assertEqual(e.relative_schema_path, deque(["type"])) + self.assertEqual(e.absolute_schema_path, deque(["type"])) + + self.assertEqual(len(e.context), 2) + + e1, e2 = sorted_errors(e.context) + + self.assertEqual(e1.validator, "type") + self.assertEqual(e1.validator_value, schema["type"][0]["type"]) + self.assertEqual(e1.instance, instance) + self.assertEqual(e1.schema, schema["type"][0]) + self.assertIs(e1.parent, e) + + self.assertEqual(e1.path, deque([])) + self.assertEqual(e1.relative_path, deque([])) + self.assertEqual(e1.absolute_path, deque([])) + self.assertEqual(e1.json_path, "$") + + self.assertEqual(e1.schema_path, deque([0, "type"])) + self.assertEqual(e1.relative_schema_path, deque([0, "type"])) + self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"])) + + self.assertFalse(e1.context) + + self.assertEqual(e2.validator, "enum") + self.assertEqual(e2.validator_value, [2]) + self.assertEqual(e2.instance, 1) + self.assertEqual(e2.schema, {"enum": [2]}) + self.assertIs(e2.parent, e) + + self.assertEqual(e2.path, deque(["foo"])) + self.assertEqual(e2.relative_path, deque(["foo"])) + self.assertEqual(e2.absolute_path, deque(["foo"])) + self.assertEqual(e2.json_path, "$.foo") + + self.assertEqual( + e2.schema_path, deque([1, "properties", "foo", "enum"]), + ) + self.assertEqual( + e2.relative_schema_path, deque([1, "properties", "foo", "enum"]), + ) + self.assertEqual( + e2.absolute_schema_path, + deque(["type", 1, "properties", "foo", "enum"]), + ) + + self.assertFalse(e2.context) + + def test_single_nesting(self): + instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"} + schema = { + "properties": { + "foo": {"type": "string"}, + "bar": {"minItems": 2}, + "baz": {"maximum": 10, "enum": [2, 4, 6, 8]}, + }, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2, e3, e4 = sorted_errors(errors) + + self.assertEqual(e1.path, deque(["bar"])) + self.assertEqual(e2.path, deque(["baz"])) + self.assertEqual(e3.path, deque(["baz"])) + self.assertEqual(e4.path, deque(["foo"])) + + self.assertEqual(e1.relative_path, deque(["bar"])) + self.assertEqual(e2.relative_path, deque(["baz"])) + self.assertEqual(e3.relative_path, deque(["baz"])) + self.assertEqual(e4.relative_path, deque(["foo"])) + + self.assertEqual(e1.absolute_path, deque(["bar"])) + self.assertEqual(e2.absolute_path, deque(["baz"])) + self.assertEqual(e3.absolute_path, deque(["baz"])) + self.assertEqual(e4.absolute_path, deque(["foo"])) + + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.baz") + self.assertEqual(e3.json_path, "$.baz") + self.assertEqual(e4.json_path, "$.foo") + + self.assertEqual(e1.validator, "minItems") + self.assertEqual(e2.validator, "enum") + self.assertEqual(e3.validator, "maximum") + self.assertEqual(e4.validator, "type") + + def test_multiple_nesting(self): + instance = [1, {"foo": 2, "bar": {"baz": [1]}}, "quux"] + schema = { + "type": "string", + "items": { + "type": ["string", "object"], + "properties": { + "foo": {"enum": [1, 3]}, + "bar": { + "type": "array", + "properties": { + "bar": {"required": True}, + "baz": {"minItems": 2}, + }, + }, + }, + }, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2, e3, e4, e5, e6 = sorted_errors(errors) + + self.assertEqual(e1.path, deque([])) + self.assertEqual(e2.path, deque([0])) + self.assertEqual(e3.path, deque([1, "bar"])) + self.assertEqual(e4.path, deque([1, "bar", "bar"])) + self.assertEqual(e5.path, deque([1, "bar", "baz"])) + self.assertEqual(e6.path, deque([1, "foo"])) + + self.assertEqual(e1.json_path, "$") + self.assertEqual(e2.json_path, "$[0]") + self.assertEqual(e3.json_path, "$[1].bar") + self.assertEqual(e4.json_path, "$[1].bar.bar") + self.assertEqual(e5.json_path, "$[1].bar.baz") + self.assertEqual(e6.json_path, "$[1].foo") + + self.assertEqual(e1.schema_path, deque(["type"])) + self.assertEqual(e2.schema_path, deque(["items", "type"])) + self.assertEqual( + list(e3.schema_path), ["items", "properties", "bar", "type"], + ) + self.assertEqual( + list(e4.schema_path), + ["items", "properties", "bar", "properties", "bar", "required"], + ) + self.assertEqual( + list(e5.schema_path), + ["items", "properties", "bar", "properties", "baz", "minItems"], + ) + self.assertEqual( + list(e6.schema_path), ["items", "properties", "foo", "enum"], + ) + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "type") + self.assertEqual(e3.validator, "type") + self.assertEqual(e4.validator, "required") + self.assertEqual(e5.validator, "minItems") + self.assertEqual(e6.validator, "enum") + + def test_recursive(self): + schema = { + "definitions": { + "node": { + "anyOf": [{ + "type": "object", + "required": ["name", "children"], + "properties": { + "name": { + "type": "string", + }, + "children": { + "type": "object", + "patternProperties": { + "^.*$": { + "$ref": "#/definitions/node", + }, + }, + }, + }, + }], + }, + }, + "type": "object", + "required": ["root"], + "properties": {"root": {"$ref": "#/definitions/node"}}, + } + + instance = { + "root": { + "name": "root", + "children": { + "a": { + "name": "a", + "children": { + "ab": { + "name": "ab", + # missing "children" + }, + }, + }, + }, + }, + } + validator = validators.Draft4Validator(schema) + + e, = validator.iter_errors(instance) + self.assertEqual(e.absolute_path, deque(["root"])) + self.assertEqual( + e.absolute_schema_path, deque(["properties", "root", "anyOf"]), + ) + self.assertEqual(e.json_path, "$.root") + + e1, = e.context + self.assertEqual(e1.absolute_path, deque(["root", "children", "a"])) + self.assertEqual( + e1.absolute_schema_path, deque( + [ + "properties", + "root", + "anyOf", + 0, + "properties", + "children", + "patternProperties", + "^.*$", + "anyOf", + ], + ), + ) + self.assertEqual(e1.json_path, "$.root.children.a") + + e2, = e1.context + self.assertEqual( + e2.absolute_path, deque( + ["root", "children", "a", "children", "ab"], + ), + ) + self.assertEqual( + e2.absolute_schema_path, deque( + [ + "properties", + "root", + "anyOf", + 0, + "properties", + "children", + "patternProperties", + "^.*$", + "anyOf", + 0, + "properties", + "children", + "patternProperties", + "^.*$", + "anyOf", + ], + ), + ) + self.assertEqual(e2.json_path, "$.root.children.a.children.ab") + + def test_additionalProperties(self): + instance = {"bar": "bar", "foo": 2} + schema = {"additionalProperties": {"type": "integer", "minimum": 5}} + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2 = sorted_errors(errors) + + self.assertEqual(e1.path, deque(["bar"])) + self.assertEqual(e2.path, deque(["foo"])) + + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.foo") + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "minimum") + + def test_patternProperties(self): + instance = {"bar": 1, "foo": 2} + schema = { + "patternProperties": { + "bar": {"type": "string"}, + "foo": {"minimum": 5}, + }, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2 = sorted_errors(errors) + + self.assertEqual(e1.path, deque(["bar"])) + self.assertEqual(e2.path, deque(["foo"])) + + self.assertEqual(e1.json_path, "$.bar") + self.assertEqual(e2.json_path, "$.foo") + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "minimum") + + def test_additionalItems(self): + instance = ["foo", 1] + schema = { + "items": [], + "additionalItems": {"type": "integer", "minimum": 5}, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2 = sorted_errors(errors) + + self.assertEqual(e1.path, deque([0])) + self.assertEqual(e2.path, deque([1])) + + self.assertEqual(e1.json_path, "$[0]") + self.assertEqual(e2.json_path, "$[1]") + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "minimum") + + def test_additionalItems_with_items(self): + instance = ["foo", "bar", 1] + schema = { + "items": [{}], + "additionalItems": {"type": "integer", "minimum": 5}, + } + + validator = validators.Draft3Validator(schema) + errors = validator.iter_errors(instance) + e1, e2 = sorted_errors(errors) + + self.assertEqual(e1.path, deque([1])) + self.assertEqual(e2.path, deque([2])) + + self.assertEqual(e1.json_path, "$[1]") + self.assertEqual(e2.json_path, "$[2]") + + self.assertEqual(e1.validator, "type") + self.assertEqual(e2.validator, "minimum") + + def test_propertyNames(self): + instance = {"foo": 12} + schema = {"propertyNames": {"not": {"const": "foo"}}} + + validator = validators.Draft7Validator(schema) + error, = validator.iter_errors(instance) + + self.assertEqual(error.validator, "not") + self.assertEqual( + error.message, + "'foo' should not be valid under {'const': 'foo'}", + ) + self.assertEqual(error.path, deque([])) + self.assertEqual(error.json_path, "$") + self.assertEqual(error.schema_path, deque(["propertyNames", "not"])) + + def test_if_then(self): + schema = { + "if": {"const": 12}, + "then": {"const": 13}, + } + + validator = validators.Draft7Validator(schema) + error, = validator.iter_errors(12) + + self.assertEqual(error.validator, "const") + self.assertEqual(error.message, "13 was expected") + self.assertEqual(error.path, deque([])) + self.assertEqual(error.json_path, "$") + self.assertEqual(error.schema_path, deque(["then", "const"])) + + def test_if_else(self): + schema = { + "if": {"const": 12}, + "else": {"const": 13}, + } + + validator = validators.Draft7Validator(schema) + error, = validator.iter_errors(15) + + self.assertEqual(error.validator, "const") + self.assertEqual(error.message, "13 was expected") + self.assertEqual(error.path, deque([])) + self.assertEqual(error.json_path, "$") + self.assertEqual(error.schema_path, deque(["else", "const"])) + + def test_boolean_schema_False(self): + validator = validators.Draft7Validator(False) + error, = validator.iter_errors(12) + + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "False schema does not allow 12", + None, + None, + 12, + False, + deque([]), + "$", + ), + ) + + def test_ref(self): + ref, schema = "someRef", {"additionalProperties": {"type": "integer"}} + validator = validators.Draft7Validator( + {"$ref": ref}, + resolver=validators._RefResolver("", {}, store={ref: schema}), + ) + error, = validator.iter_errors({"foo": "notAnInteger"}) + + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "'notAnInteger' is not of type 'integer'", + "type", + "integer", + "notAnInteger", + deque(["foo"]), + {"type": "integer"}, + deque(["additionalProperties", "type"]), + "$.foo", + ), + ) + + def test_prefixItems(self): + schema = {"prefixItems": [{"type": "string"}, {}, {}, {"maximum": 3}]} + validator = validators.Draft202012Validator(schema) + type_error, min_error = validator.iter_errors([1, 2, "foo", 5]) + self.assertEqual( + ( + type_error.message, + type_error.validator, + type_error.validator_value, + type_error.instance, + type_error.absolute_path, + type_error.schema, + type_error.schema_path, + type_error.json_path, + ), + ( + "1 is not of type 'string'", + "type", + "string", + 1, + deque([0]), + {"type": "string"}, + deque(["prefixItems", 0, "type"]), + "$[0]", + ), + ) + self.assertEqual( + ( + min_error.message, + min_error.validator, + min_error.validator_value, + min_error.instance, + min_error.absolute_path, + min_error.schema, + min_error.schema_path, + min_error.json_path, + ), + ( + "5 is greater than the maximum of 3", + "maximum", + 3, + 5, + deque([3]), + {"maximum": 3}, + deque(["prefixItems", 3, "maximum"]), + "$[3]", + ), + ) + + def test_prefixItems_with_items(self): + schema = { + "items": {"type": "string"}, + "prefixItems": [{}], + } + validator = validators.Draft202012Validator(schema) + e1, e2 = validator.iter_errors(["foo", 2, "bar", 4, "baz"]) + self.assertEqual( + ( + e1.message, + e1.validator, + e1.validator_value, + e1.instance, + e1.absolute_path, + e1.schema, + e1.schema_path, + e1.json_path, + ), + ( + "2 is not of type 'string'", + "type", + "string", + 2, + deque([1]), + {"type": "string"}, + deque(["items", "type"]), + "$[1]", + ), + ) + self.assertEqual( + ( + e2.message, + e2.validator, + e2.validator_value, + e2.instance, + e2.absolute_path, + e2.schema, + e2.schema_path, + e2.json_path, + ), + ( + "4 is not of type 'string'", + "type", + "string", + 4, + deque([3]), + {"type": "string"}, + deque(["items", "type"]), + "$[3]", + ), + ) + + def test_contains_too_many(self): + """ + `contains` + `maxContains` produces only one error, even if there are + many more incorrectly matching elements. + """ + schema = {"contains": {"type": "string"}, "maxContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors(["foo", 2, "bar", 4, "baz", "quux"]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "Too many items match the given schema (expected at most 2)", + "maxContains", + 2, + ["foo", 2, "bar", 4, "baz", "quux"], + deque([]), + {"contains": {"type": "string"}, "maxContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_contains_too_few(self): + schema = {"contains": {"type": "string"}, "minContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors(["foo", 2, 4]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + ( + "Too few items match the given schema " + "(expected at least 2 but only 1 matched)" + ), + "minContains", + 2, + ["foo", 2, 4], + deque([]), + {"contains": {"type": "string"}, "minContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_contains_none(self): + schema = {"contains": {"type": "string"}, "minContains": 2} + validator = validators.Draft202012Validator(schema) + error, = validator.iter_errors([2, 4]) + self.assertEqual( + ( + error.message, + error.validator, + error.validator_value, + error.instance, + error.absolute_path, + error.schema, + error.schema_path, + error.json_path, + ), + ( + "[2, 4] does not contain items matching the given schema", + "contains", + {"type": "string"}, + [2, 4], + deque([]), + {"contains": {"type": "string"}, "minContains": 2}, + deque(["contains"]), + "$", + ), + ) + + def test_ref_sibling(self): + schema = { + "$defs": {"foo": {"required": ["bar"]}}, + "properties": { + "aprop": { + "$ref": "#/$defs/foo", + "required": ["baz"], + }, + }, + } + + validator = validators.Draft202012Validator(schema) + e1, e2 = validator.iter_errors({"aprop": {}}) + self.assertEqual( + ( + e1.message, + e1.validator, + e1.validator_value, + e1.instance, + e1.absolute_path, + e1.schema, + e1.schema_path, + e1.relative_schema_path, + e1.json_path, + ), + ( + "'bar' is a required property", + "required", + ["bar"], + {}, + deque(["aprop"]), + {"required": ["bar"]}, + deque(["properties", "aprop", "required"]), + deque(["properties", "aprop", "required"]), + "$.aprop", + ), + ) + self.assertEqual( + ( + e2.message, + e2.validator, + e2.validator_value, + e2.instance, + e2.absolute_path, + e2.schema, + e2.schema_path, + e2.relative_schema_path, + e2.json_path, + ), + ( + "'baz' is a required property", + "required", + ["baz"], + {}, + deque(["aprop"]), + {"$ref": "#/$defs/foo", "required": ["baz"]}, + deque(["properties", "aprop", "required"]), + deque(["properties", "aprop", "required"]), + "$.aprop", + ), + ) + + +class MetaSchemaTestsMixin: + # TODO: These all belong upstream + def test_invalid_properties(self): + with self.assertRaises(exceptions.SchemaError): + self.Validator.check_schema({"properties": 12}) + + def test_minItems_invalid_string(self): + with self.assertRaises(exceptions.SchemaError): + # needs to be an integer + self.Validator.check_schema({"minItems": "1"}) + + def test_enum_allows_empty_arrays(self): + """ + Technically, all the spec says is they SHOULD have elements, not MUST. + + (As of Draft 6. Previous drafts do say MUST). + + See #529. + """ + if self.Validator in { + validators.Draft3Validator, + validators.Draft4Validator, + }: + with self.assertRaises(exceptions.SchemaError): + self.Validator.check_schema({"enum": []}) + else: + self.Validator.check_schema({"enum": []}) + + def test_enum_allows_non_unique_items(self): + """ + Technically, all the spec says is they SHOULD be unique, not MUST. + + (As of Draft 6. Previous drafts do say MUST). + + See #529. + """ + if self.Validator in { + validators.Draft3Validator, + validators.Draft4Validator, + }: + with self.assertRaises(exceptions.SchemaError): + self.Validator.check_schema({"enum": [12, 12]}) + else: + self.Validator.check_schema({"enum": [12, 12]}) + + def test_schema_with_invalid_regex(self): + with self.assertRaises(exceptions.SchemaError): + self.Validator.check_schema({"pattern": "*notaregex"}) + + def test_schema_with_invalid_regex_with_disabled_format_validation(self): + self.Validator.check_schema( + {"pattern": "*notaregex"}, + format_checker=None, + ) + + +class ValidatorTestMixin(MetaSchemaTestsMixin): + def test_it_implements_the_validator_protocol(self): + self.assertIsInstance(self.Validator({}), protocols.Validator) + + def test_valid_instances_are_valid(self): + schema, instance = self.valid + self.assertTrue(self.Validator(schema).is_valid(instance)) + + def test_invalid_instances_are_not_valid(self): + schema, instance = self.invalid + self.assertFalse(self.Validator(schema).is_valid(instance)) + + def test_non_existent_properties_are_ignored(self): + self.Validator({object(): object()}).validate(instance=object()) + + def test_evolve(self): + schema, format_checker = {"type": "integer"}, FormatChecker() + original = self.Validator( + schema, + format_checker=format_checker, + ) + new = original.evolve( + schema={"type": "string"}, + format_checker=self.Validator.FORMAT_CHECKER, + ) + + expected = self.Validator( + {"type": "string"}, + format_checker=self.Validator.FORMAT_CHECKER, + _resolver=new._resolver, + ) + + self.assertEqual(new, expected) + self.assertNotEqual(new, original) + + def test_evolve_with_subclass(self): + """ + Subclassing validators isn't supported public API, but some users have + done it, because we don't actually error entirely when it's done :/ + + We need to deprecate doing so first to help as many of these users + ensure they can move to supported APIs, but this test ensures that in + the interim, we haven't broken those users. + """ + + with self.assertWarns(DeprecationWarning): + @define + class OhNo(self.Validator): + foo = field(factory=lambda: [1, 2, 3]) + _bar = field(default=37) + + validator = OhNo({}, bar=12) + self.assertEqual(validator.foo, [1, 2, 3]) + + new = validator.evolve(schema={"type": "integer"}) + self.assertEqual(new.foo, [1, 2, 3]) + self.assertEqual(new._bar, 12) + + def test_is_type_is_true_for_valid_type(self): + self.assertTrue(self.Validator({}).is_type("foo", "string")) + + def test_is_type_is_false_for_invalid_type(self): + self.assertFalse(self.Validator({}).is_type("foo", "array")) + + def test_is_type_evades_bool_inheriting_from_int(self): + self.assertFalse(self.Validator({}).is_type(True, "integer")) + self.assertFalse(self.Validator({}).is_type(True, "number")) + + def test_it_can_validate_with_decimals(self): + schema = {"items": {"type": "number"}} + Validator = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine( + "number", + lambda checker, thing: isinstance( + thing, (int, float, Decimal), + ) and not isinstance(thing, bool), + ), + ) + + validator = Validator(schema) + validator.validate([1, 1.1, Decimal(1) / Decimal(8)]) + + invalid = ["foo", {}, [], True, None] + self.assertEqual( + [error.instance for error in validator.iter_errors(invalid)], + invalid, + ) + + def test_it_returns_true_for_formats_it_does_not_know_about(self): + validator = self.Validator( + {"format": "carrot"}, format_checker=FormatChecker(), + ) + validator.validate("bugs") + + def test_it_does_not_validate_formats_by_default(self): + validator = self.Validator({}) + self.assertIsNone(validator.format_checker) + + def test_it_validates_formats_if_a_checker_is_provided(self): + checker = FormatChecker() + bad = ValueError("Bad!") + + @checker.checks("foo", raises=ValueError) + def check(value): + if value == "good": + return True + elif value == "bad": + raise bad + else: # pragma: no cover + self.fail(f"What is {value}? [Baby Don't Hurt Me]") + + validator = self.Validator( + {"format": "foo"}, format_checker=checker, + ) + + validator.validate("good") + with self.assertRaises(exceptions.ValidationError) as cm: + validator.validate("bad") + + # Make sure original cause is attached + self.assertIs(cm.exception.cause, bad) + + def test_non_string_custom_type(self): + non_string_type = object() + schema = {"type": [non_string_type]} + Crazy = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine( + non_string_type, + lambda checker, thing: isinstance(thing, int), + ), + ) + Crazy(schema).validate(15) + + def test_it_properly_formats_tuples_in_errors(self): + """ + A tuple instance properly formats validation errors for uniqueItems. + + See #224 + """ + TupleValidator = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine( + "array", + lambda checker, thing: isinstance(thing, tuple), + ), + ) + with self.assertRaises(exceptions.ValidationError) as e: + TupleValidator({"uniqueItems": True}).validate((1, 1)) + self.assertIn("(1, 1) has non-unique elements", str(e.exception)) + + def test_check_redefined_sequence(self): + """ + Allow array to validate against another defined sequence type + """ + schema = {"type": "array", "uniqueItems": True} + MyMapping = namedtuple("MyMapping", "a, b") + Validator = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine_many( + { + "array": lambda checker, thing: isinstance( + thing, (list, deque), + ), + "object": lambda checker, thing: isinstance( + thing, (dict, MyMapping), + ), + }, + ), + ) + validator = Validator(schema) + + valid_instances = [ + deque(["a", None, "1", "", True]), + deque([[False], [0]]), + [deque([False]), deque([0])], + [[deque([False])], [deque([0])]], + [[[[[deque([False])]]]], [[[[deque([0])]]]]], + [deque([deque([False])]), deque([deque([0])])], + [MyMapping("a", 0), MyMapping("a", False)], + [ + MyMapping("a", [deque([0])]), + MyMapping("a", [deque([False])]), + ], + [ + MyMapping("a", [MyMapping("a", deque([0]))]), + MyMapping("a", [MyMapping("a", deque([False]))]), + ], + [deque(deque(deque([False]))), deque(deque(deque([0])))], + ] + + for instance in valid_instances: + validator.validate(instance) + + invalid_instances = [ + deque(["a", "b", "a"]), + deque([[False], [False]]), + [deque([False]), deque([False])], + [[deque([False])], [deque([False])]], + [[[[[deque([False])]]]], [[[[deque([False])]]]]], + [deque([deque([False])]), deque([deque([False])])], + [MyMapping("a", False), MyMapping("a", False)], + [ + MyMapping("a", [deque([False])]), + MyMapping("a", [deque([False])]), + ], + [ + MyMapping("a", [MyMapping("a", deque([False]))]), + MyMapping("a", [MyMapping("a", deque([False]))]), + ], + [deque(deque(deque([False]))), deque(deque(deque([False])))], + ] + + for instance in invalid_instances: + with self.assertRaises(exceptions.ValidationError): + validator.validate(instance) + + def test_it_creates_a_ref_resolver_if_not_provided(self): + with self.assertWarns(DeprecationWarning): + resolver = self.Validator({}).resolver + self.assertIsInstance(resolver, validators._RefResolver) + + def test_it_upconverts_from_deprecated_RefResolvers(self): + ref, schema = "someCoolRef", {"type": "integer"} + resolver = validators._RefResolver("", {}, store={ref: schema}) + validator = self.Validator({"$ref": ref}, resolver=resolver) + + with self.assertRaises(exceptions.ValidationError): + validator.validate(None) + + def test_it_upconverts_from_yet_older_deprecated_legacy_RefResolvers(self): + """ + Legacy RefResolvers support only the context manager form of + resolution. + """ + + class LegacyRefResolver: + @contextmanager + def resolving(this, ref): + self.assertEqual(ref, "the ref") + yield {"type": "integer"} + + resolver = LegacyRefResolver() + schema = {"$ref": "the ref"} + + with self.assertRaises(exceptions.ValidationError): + self.Validator(schema, resolver=resolver).validate(None) + + +class AntiDraft6LeakMixin: + """ + Make sure functionality from draft 6 doesn't leak backwards in time. + """ + + def test_True_is_not_a_schema(self): + with self.assertRaises(exceptions.SchemaError) as e: + self.Validator.check_schema(True) + self.assertIn("True is not of type", str(e.exception)) + + def test_False_is_not_a_schema(self): + with self.assertRaises(exceptions.SchemaError) as e: + self.Validator.check_schema(False) + self.assertIn("False is not of type", str(e.exception)) + + def test_True_is_not_a_schema_even_if_you_forget_to_check(self): + with self.assertRaises(Exception) as e: + self.Validator(True).validate(12) + self.assertNotIsInstance(e.exception, exceptions.ValidationError) + + def test_False_is_not_a_schema_even_if_you_forget_to_check(self): + with self.assertRaises(Exception) as e: + self.Validator(False).validate(12) + self.assertNotIsInstance(e.exception, exceptions.ValidationError) + + +class TestDraft3Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase): + Validator = validators.Draft3Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + def test_any_type_is_valid_for_type_any(self): + validator = self.Validator({"type": "any"}) + validator.validate(object()) + + def test_any_type_is_redefinable(self): + """ + Sigh, because why not. + """ + Crazy = validators.extend( + self.Validator, + type_checker=self.Validator.TYPE_CHECKER.redefine( + "any", lambda checker, thing: isinstance(thing, int), + ), + ) + validator = Crazy({"type": "any"}) + validator.validate(12) + with self.assertRaises(exceptions.ValidationError): + validator.validate("foo") + + def test_is_type_is_true_for_any_type(self): + self.assertTrue(self.Validator({"type": "any"}).is_valid(object())) + + def test_is_type_does_not_evade_bool_if_it_is_being_tested(self): + self.assertTrue(self.Validator({}).is_type(True, "boolean")) + self.assertTrue(self.Validator({"type": "any"}).is_valid(True)) + + +class TestDraft4Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase): + Validator = validators.Draft4Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft6Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft6Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft7Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft7Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft201909Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft201909Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestDraft202012Validator(ValidatorTestMixin, TestCase): + Validator = validators.Draft202012Validator + valid: tuple[dict, dict] = ({}, {}) + invalid = {"type": "integer"}, "foo" + + +class TestLatestValidator(TestCase): + """ + These really apply to multiple versions but are easiest to test on one. + """ + + def test_ref_resolvers_may_have_boolean_schemas_stored(self): + ref = "someCoolRef" + schema = {"$ref": ref} + resolver = validators._RefResolver("", {}, store={ref: False}) + validator = validators._LATEST_VERSION(schema, resolver=resolver) + + with self.assertRaises(exceptions.ValidationError): + validator.validate(None) + + +class TestValidatorFor(TestCase): + def test_draft_3(self): + schema = {"$schema": "http://json-schema.org/draft-03/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft3Validator, + ) + + schema = {"$schema": "http://json-schema.org/draft-03/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft3Validator, + ) + + def test_draft_4(self): + schema = {"$schema": "http://json-schema.org/draft-04/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft4Validator, + ) + + schema = {"$schema": "http://json-schema.org/draft-04/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft4Validator, + ) + + def test_draft_6(self): + schema = {"$schema": "http://json-schema.org/draft-06/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft6Validator, + ) + + schema = {"$schema": "http://json-schema.org/draft-06/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft6Validator, + ) + + def test_draft_7(self): + schema = {"$schema": "http://json-schema.org/draft-07/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft7Validator, + ) + + schema = {"$schema": "http://json-schema.org/draft-07/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft7Validator, + ) + + def test_draft_201909(self): + schema = {"$schema": "https://json-schema.org/draft/2019-09/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft201909Validator, + ) + + schema = {"$schema": "https://json-schema.org/draft/2019-09/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft201909Validator, + ) + + def test_draft_202012(self): + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"} + self.assertIs( + validators.validator_for(schema), + validators.Draft202012Validator, + ) + + schema = {"$schema": "https://json-schema.org/draft/2020-12/schema#"} + self.assertIs( + validators.validator_for(schema), + validators.Draft202012Validator, + ) + + def test_True(self): + self.assertIs( + validators.validator_for(True), + validators._LATEST_VERSION, + ) + + def test_False(self): + self.assertIs( + validators.validator_for(False), + validators._LATEST_VERSION, + ) + + def test_custom_validator(self): + Validator = validators.create( + meta_schema={"id": "meta schema id"}, + version="12", + id_of=lambda s: s.get("id", ""), + ) + schema = {"$schema": "meta schema id"} + self.assertIs( + validators.validator_for(schema), + Validator, + ) + + def test_custom_validator_draft6(self): + Validator = validators.create( + meta_schema={"$id": "meta schema $id"}, + version="13", + ) + schema = {"$schema": "meta schema $id"} + self.assertIs( + validators.validator_for(schema), + Validator, + ) + + def test_validator_for_jsonschema_default(self): + self.assertIs(validators.validator_for({}), validators._LATEST_VERSION) + + def test_validator_for_custom_default(self): + self.assertIs(validators.validator_for({}, default=None), None) + + def test_warns_if_meta_schema_specified_was_not_found(self): + with self.assertWarns(DeprecationWarning) as cm: + validators.validator_for(schema={"$schema": "unknownSchema"}) + + self.assertEqual(cm.filename, __file__) + self.assertEqual( + str(cm.warning), + "The metaschema specified by $schema was not found. " + "Using the latest draft to validate, but this will raise " + "an error in the future.", + ) + + def test_does_not_warn_if_meta_schema_is_unspecified(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + validators.validator_for(schema={}, default={}) + self.assertFalse(w) + + def test_validator_for_custom_default_with_schema(self): + schema, default = {"$schema": "mailto:foo@example.com"}, object() + self.assertIs(validators.validator_for(schema, default), default) + + +class TestValidate(TestCase): + def assertUses(self, schema, Validator): + result = [] + with mock.patch.object(Validator, "check_schema", result.append): + validators.validate({}, schema) + self.assertEqual(result, [schema]) + + def test_draft3_validator_is_chosen(self): + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-03/schema#"}, + Validator=validators.Draft3Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-03/schema"}, + Validator=validators.Draft3Validator, + ) + + def test_draft4_validator_is_chosen(self): + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-04/schema#"}, + Validator=validators.Draft4Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-04/schema"}, + Validator=validators.Draft4Validator, + ) + + def test_draft6_validator_is_chosen(self): + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-06/schema#"}, + Validator=validators.Draft6Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-06/schema"}, + Validator=validators.Draft6Validator, + ) + + def test_draft7_validator_is_chosen(self): + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-07/schema#"}, + Validator=validators.Draft7Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={"$schema": "http://json-schema.org/draft-07/schema"}, + Validator=validators.Draft7Validator, + ) + + def test_draft202012_validator_is_chosen(self): + self.assertUses( + schema={ + "$schema": "https://json-schema.org/draft/2020-12/schema#", + }, + Validator=validators.Draft202012Validator, + ) + # Make sure it works without the empty fragment + self.assertUses( + schema={ + "$schema": "https://json-schema.org/draft/2020-12/schema", + }, + Validator=validators.Draft202012Validator, + ) + + def test_draft202012_validator_is_the_default(self): + self.assertUses(schema={}, Validator=validators.Draft202012Validator) + + def test_validation_error_message(self): + with self.assertRaises(exceptions.ValidationError) as e: + validators.validate(12, {"type": "string"}) + self.assertRegex( + str(e.exception), + "(?s)Failed validating '.*' in schema.*On instance", + ) + + def test_schema_error_message(self): + with self.assertRaises(exceptions.SchemaError) as e: + validators.validate(12, {"type": 12}) + self.assertRegex( + str(e.exception), + "(?s)Failed validating '.*' in metaschema.*On schema", + ) + + def test_it_uses_best_match(self): + schema = { + "oneOf": [ + {"type": "number", "minimum": 20}, + {"type": "array"}, + ], + } + with self.assertRaises(exceptions.ValidationError) as e: + validators.validate(12, schema) + self.assertIn("12 is less than the minimum of 20", str(e.exception)) + + +class TestThreading(TestCase): + """ + Threading-related functionality tests. + + jsonschema doesn't promise thread safety, and its validation behavior + across multiple threads may change at any time, but that means it isn't + safe to share *validators* across threads, not that anytime one has + multiple threads that jsonschema won't work (it certainly is intended to). + + These tests ensure that this minimal level of functionality continues to + work. + """ + + def test_validation_across_a_second_thread(self): + failed = [] + + def validate(): + try: + validators.validate(instance=37, schema=True) + except: # pragma: no cover # noqa: E722 + failed.append(sys.exc_info()) + + validate() # just verify it succeeds + + from threading import Thread + thread = Thread(target=validate) + thread.start() + thread.join() + self.assertEqual((thread.is_alive(), failed), (False, [])) + + +class TestReferencing(TestCase): + def test_registry_with_retrieve(self): + def retrieve(uri): + return DRAFT202012.create_resource({"type": "integer"}) + + registry = referencing.Registry(retrieve=retrieve) + schema = {"$ref": "https://example.com/"} + validator = validators.Draft202012Validator(schema, registry=registry) + + self.assertEqual( + (validator.is_valid(12), validator.is_valid("foo")), + (True, False), + ) + + def test_custom_registries_do_not_autoretrieve_remote_resources(self): + registry = referencing.Registry() + schema = {"$ref": "https://example.com/"} + validator = validators.Draft202012Validator(schema, registry=registry) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with self.assertRaises(referencing.exceptions.Unresolvable): + validator.validate(12) + self.assertFalse(w) + + +class TestRefResolver(TestCase): + + base_uri = "" + stored_uri = "foo://stored" + stored_schema = {"stored": "schema"} + + def setUp(self): + self.referrer = {} + self.store = {self.stored_uri: self.stored_schema} + self.resolver = validators._RefResolver( + self.base_uri, self.referrer, self.store, + ) + + def test_it_does_not_retrieve_schema_urls_from_the_network(self): + ref = validators.Draft3Validator.META_SCHEMA["id"] + with mock.patch.object(self.resolver, "resolve_remote") as patched: # noqa: SIM117 + with self.resolver.resolving(ref) as resolved: + pass + self.assertEqual(resolved, validators.Draft3Validator.META_SCHEMA) + self.assertFalse(patched.called) + + def test_it_resolves_local_refs(self): + ref = "#/properties/foo" + self.referrer["properties"] = {"foo": object()} + with self.resolver.resolving(ref) as resolved: + self.assertEqual(resolved, self.referrer["properties"]["foo"]) + + def test_it_resolves_local_refs_with_id(self): + schema = {"id": "http://bar/schema#", "a": {"foo": "bar"}} + resolver = validators._RefResolver.from_schema( + schema, + id_of=lambda schema: schema.get("id", ""), + ) + with resolver.resolving("#/a") as resolved: + self.assertEqual(resolved, schema["a"]) + with resolver.resolving("http://bar/schema#/a") as resolved: + self.assertEqual(resolved, schema["a"]) + + def test_it_retrieves_stored_refs(self): + with self.resolver.resolving(self.stored_uri) as resolved: + self.assertIs(resolved, self.stored_schema) + + self.resolver.store["cached_ref"] = {"foo": 12} + with self.resolver.resolving("cached_ref#/foo") as resolved: + self.assertEqual(resolved, 12) + + def test_it_retrieves_unstored_refs_via_requests(self): + ref = "http://bar#baz" + schema = {"baz": 12} + + if "requests" in sys.modules: # pragma: no cover + self.addCleanup( + sys.modules.__setitem__, "requests", sys.modules["requests"], + ) + sys.modules["requests"] = ReallyFakeRequests({"http://bar": schema}) + + with self.resolver.resolving(ref) as resolved: + self.assertEqual(resolved, 12) + + def test_it_retrieves_unstored_refs_via_urlopen(self): + ref = "http://bar#baz" + schema = {"baz": 12} + + if "requests" in sys.modules: # pragma: no cover + self.addCleanup( + sys.modules.__setitem__, "requests", sys.modules["requests"], + ) + sys.modules["requests"] = None + + @contextmanager + def fake_urlopen(url): + self.assertEqual(url, "http://bar") + yield BytesIO(json.dumps(schema).encode("utf8")) + + self.addCleanup(setattr, validators, "urlopen", validators.urlopen) + validators.urlopen = fake_urlopen + + with self.resolver.resolving(ref) as resolved: + pass + self.assertEqual(resolved, 12) + + def test_it_retrieves_local_refs_via_urlopen(self): + with tempfile.NamedTemporaryFile(delete=False, mode="wt") as tempf: + self.addCleanup(os.remove, tempf.name) + json.dump({"foo": "bar"}, tempf) + + ref = f"file://{pathname2url(tempf.name)}#foo" + with self.resolver.resolving(ref) as resolved: + self.assertEqual(resolved, "bar") + + def test_it_can_construct_a_base_uri_from_a_schema(self): + schema = {"id": "foo"} + resolver = validators._RefResolver.from_schema( + schema, + id_of=lambda schema: schema.get("id", ""), + ) + self.assertEqual(resolver.base_uri, "foo") + self.assertEqual(resolver.resolution_scope, "foo") + with resolver.resolving("") as resolved: + self.assertEqual(resolved, schema) + with resolver.resolving("#") as resolved: + self.assertEqual(resolved, schema) + with resolver.resolving("foo") as resolved: + self.assertEqual(resolved, schema) + with resolver.resolving("foo#") as resolved: + self.assertEqual(resolved, schema) + + def test_it_can_construct_a_base_uri_from_a_schema_without_id(self): + schema = {} + resolver = validators._RefResolver.from_schema(schema) + self.assertEqual(resolver.base_uri, "") + self.assertEqual(resolver.resolution_scope, "") + with resolver.resolving("") as resolved: + self.assertEqual(resolved, schema) + with resolver.resolving("#") as resolved: + self.assertEqual(resolved, schema) + + def test_custom_uri_scheme_handlers(self): + def handler(url): + self.assertEqual(url, ref) + return schema + + schema = {"foo": "bar"} + ref = "foo://bar" + resolver = validators._RefResolver("", {}, handlers={"foo": handler}) + with resolver.resolving(ref) as resolved: + self.assertEqual(resolved, schema) + + def test_cache_remote_on(self): + response = [object()] + + def handler(url): + try: + return response.pop() + except IndexError: # pragma: no cover + self.fail("Response must not have been cached!") + + ref = "foo://bar" + resolver = validators._RefResolver( + "", {}, cache_remote=True, handlers={"foo": handler}, + ) + with resolver.resolving(ref): + pass + with resolver.resolving(ref): + pass + + def test_cache_remote_off(self): + response = [object()] + + def handler(url): + try: + return response.pop() + except IndexError: # pragma: no cover + self.fail("Handler called twice!") + + ref = "foo://bar" + resolver = validators._RefResolver( + "", {}, cache_remote=False, handlers={"foo": handler}, + ) + with resolver.resolving(ref): + pass + + def test_if_you_give_it_junk_you_get_a_resolution_error(self): + error = ValueError("Oh no! What's this?") + + def handler(url): + raise error + + ref = "foo://bar" + resolver = validators._RefResolver("", {}, handlers={"foo": handler}) + with self.assertRaises(exceptions._RefResolutionError) as err: # noqa: SIM117 + with resolver.resolving(ref): + self.fail("Shouldn't get this far!") # pragma: no cover + self.assertEqual(err.exception, exceptions._RefResolutionError(error)) + + def test_helpful_error_message_on_failed_pop_scope(self): + resolver = validators._RefResolver("", {}) + resolver.pop_scope() + with self.assertRaises(exceptions._RefResolutionError) as exc: + resolver.pop_scope() + self.assertIn("Failed to pop the scope", str(exc.exception)) + + def test_pointer_within_schema_with_different_id(self): + """ + See #1085. + """ + schema = validators.Draft7Validator.META_SCHEMA + one = validators._RefResolver("", schema) + validator = validators.Draft7Validator(schema, resolver=one) + self.assertFalse(validator.is_valid({"maxLength": "foo"})) + + another = { + "allOf": [{"$ref": validators.Draft7Validator.META_SCHEMA["$id"]}], + } + two = validators._RefResolver("", another) + validator = validators.Draft7Validator(another, resolver=two) + self.assertFalse(validator.is_valid({"maxLength": "foo"})) + + def test_newly_created_validator_with_ref_resolver(self): + """ + See https://github.com/python-jsonschema/jsonschema/issues/1061#issuecomment-1624266555. + """ + + def handle(uri): + self.assertEqual(uri, "http://example.com/foo") + return {"type": "integer"} + + resolver = validators._RefResolver("", {}, handlers={"http": handle}) + Validator = validators.create( + meta_schema={}, + validators=validators.Draft4Validator.VALIDATORS, + ) + schema = {"$id": "http://example.com/bar", "$ref": "foo"} + validator = Validator(schema, resolver=resolver) + self.assertEqual( + (validator.is_valid({}), validator.is_valid(37)), + (False, True), + ) + + def test_refresolver_with_pointer_in_schema_with_no_id(self): + """ + See https://github.com/python-jsonschema/jsonschema/issues/1124#issuecomment-1632574249. + """ + + schema = { + "properties": {"x": {"$ref": "#/definitions/x"}}, + "definitions": {"x": {"type": "integer"}}, + } + + validator = validators.Draft202012Validator( + schema, + resolver=validators._RefResolver("", schema), + ) + self.assertEqual( + (validator.is_valid({"x": "y"}), validator.is_valid({"x": 37})), + (False, True), + ) + + + +def sorted_errors(errors): + def key(error): + return ( + [str(e) for e in error.path], + [str(e) for e in error.schema_path], + ) + return sorted(errors, key=key) + + +@define +class ReallyFakeRequests: + + _responses: dict[str, Any] + + def get(self, url): + response = self._responses.get(url) + if url is None: # pragma: no cover + raise ValueError("Unknown URL: " + repr(url)) + return _ReallyFakeJSONResponse(json.dumps(response)) + + +@define +class _ReallyFakeJSONResponse: + + _response: str + + def json(self): + return json.loads(self._response) diff --git a/lib/python3.10/site-packages/jsonschema_specifications-2025.4.1.dist-info/licenses/COPYING b/lib/python3.10/site-packages/jsonschema_specifications-2025.4.1.dist-info/licenses/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..a9f853e43069b8e3f8a156a4af2b1198a004230d --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications-2025.4.1.dist-info/licenses/COPYING @@ -0,0 +1,19 @@ +Copyright (c) 2022 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0f21ff3e0fc3050f0d8916b040e94d75615d675 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/_core.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ded6e07b17bedd3a694a35a99769d1c72d4c508b Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema_specifications/__pycache__/_core.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json new file mode 100644 index 0000000000000000000000000000000000000000..2248a0c80bbfc51b67863f848f60466f135cdb2b --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/metaschema.json @@ -0,0 +1,42 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/applicator b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/applicator new file mode 100644 index 0000000000000000000000000000000000000000..24a1cc4f4f54b5a556de93c2799853b31e78e512 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/applicator @@ -0,0 +1,56 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/content b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/content new file mode 100644 index 0000000000000000000000000000000000000000..f6752a8efbeb74a7b7b2e74cbb272c422a975363 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/content @@ -0,0 +1,17 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/core b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/core new file mode 100644 index 0000000000000000000000000000000000000000..eb708a56044146bb7903fd59159725a0c693aefe --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/core @@ -0,0 +1,57 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data new file mode 100644 index 0000000000000000000000000000000000000000..da04cff6d346f26bfe5bd21b636609fecd583628 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/meta-data @@ -0,0 +1,37 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/validation b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/validation new file mode 100644 index 0000000000000000000000000000000000000000..9f59677b30c238f07a358c0c8ee72c3b710dc24d --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft201909/vocabularies/validation @@ -0,0 +1,98 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json new file mode 100644 index 0000000000000000000000000000000000000000..d5e2d31c3c88e61f4c204cb6616887bba7e105dd --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/metaschema.json @@ -0,0 +1,58 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator new file mode 100644 index 0000000000000000000000000000000000000000..ca69923096f3a2e4fe5eb8152a92d87d146cf021 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/applicator @@ -0,0 +1,48 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content new file mode 100644 index 0000000000000000000000000000000000000000..2f6e056a9ac2399582eaf05985a26acf9161f213 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/content @@ -0,0 +1,17 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Content vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core new file mode 100644 index 0000000000000000000000000000000000000000..dfc092d9646e1ea89e4d771bfcf2a4ca87eeac75 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/core @@ -0,0 +1,51 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format new file mode 100644 index 0000000000000000000000000000000000000000..09bbfdda972ca77cd26e290d70ab59db4bbe8a27 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation new file mode 100644 index 0000000000000000000000000000000000000000..51ef7ea118c34e8ac3da66c973bae02b9e8325d8 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion new file mode 100644 index 0000000000000000000000000000000000000000..5e73fd7571e504990f7a055da960056f07dcdb19 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data new file mode 100644 index 0000000000000000000000000000000000000000..05cbc22afde5ebe6ba84957ca412277bde1e4b08 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/meta-data @@ -0,0 +1,37 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + + "title": "Meta-data vocabulary meta-schema", + + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated new file mode 100644 index 0000000000000000000000000000000000000000..5f62a3ffa20be07dc813d6b8a154fa8451d0dc61 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation new file mode 100644 index 0000000000000000000000000000000000000000..606b87ba2ec213d63b9e2035a6acc4edcb4b8357 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft202012/vocabularies/validation @@ -0,0 +1,98 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json new file mode 100644 index 0000000000000000000000000000000000000000..8b26b1f89f9852e4f69f466dd7d718b21ba5b989 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft3/metaschema.json @@ -0,0 +1,172 @@ +{ + "$schema" : "http://json-schema.org/draft-03/schema#", + "id" : "http://json-schema.org/draft-03/schema#", + "type" : "object", + + "properties" : { + "type" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true, + "default" : "any" + }, + + "properties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "patternProperties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalProperties" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "items" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalItems" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "required" : { + "type" : "boolean", + "default" : false + }, + + "dependencies" : { + "type" : "object", + "additionalProperties" : { + "type" : ["string", "array", {"$ref" : "#"}], + "items" : { + "type" : "string" + } + }, + "default" : {} + }, + + "minimum" : { + "type" : "number" + }, + + "maximum" : { + "type" : "number" + }, + + "exclusiveMinimum" : { + "type" : "boolean", + "default" : false + }, + + "exclusiveMaximum" : { + "type" : "boolean", + "default" : false + }, + + "minItems" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxItems" : { + "type" : "integer", + "minimum" : 0 + }, + + "uniqueItems" : { + "type" : "boolean", + "default" : false + }, + + "pattern" : { + "type" : "string", + "format" : "regex" + }, + + "minLength" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxLength" : { + "type" : "integer" + }, + + "enum" : { + "type" : "array", + "minItems" : 1, + "uniqueItems" : true + }, + + "default" : { + "type" : "any" + }, + + "title" : { + "type" : "string" + }, + + "description" : { + "type" : "string" + }, + + "format" : { + "type" : "string" + }, + + "divisibleBy" : { + "type" : "number", + "minimum" : 0, + "exclusiveMinimum" : true, + "default" : 1 + }, + + "disallow" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true + }, + + "extends" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "id" : { + "type" : "string" + }, + + "$ref" : { + "type" : "string" + }, + + "$schema" : { + "type" : "string", + "format" : "uri" + } + }, + + "dependencies" : { + "exclusiveMinimum" : "minimum", + "exclusiveMaximum" : "maximum" + }, + + "default" : {} +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft4/metaschema.json b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft4/metaschema.json new file mode 100644 index 0000000000000000000000000000000000000000..bcbb84743e3838fab7cbec5f0a5bcbafcfc99136 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft4/metaschema.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft6/metaschema.json b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft6/metaschema.json new file mode 100644 index 0000000000000000000000000000000000000000..a0d2bf7896c2619ae657cd3b3266660156021706 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft6/metaschema.json @@ -0,0 +1,153 @@ +{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "examples": { + "type": "array", + "items": {} + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array" + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft7/metaschema.json b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft7/metaschema.json new file mode 100644 index 0000000000000000000000000000000000000000..746cde9690134529bdcde889f72c7ad5259f24ad --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/schemas/draft7/metaschema.json @@ -0,0 +1,166 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": {"$ref": "#"}, + "then": {"$ref": "#"}, + "else": {"$ref": "#"}, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +} diff --git a/lib/python3.10/site-packages/jsonschema_specifications/tests/__init__.py b/lib/python3.10/site-packages/jsonschema_specifications/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aed64be0e9d78caa9a60079f51b92737ecec84f8 Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-310.pyc b/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94d2d5d58b974b918ecbb378e6ff8bafbe69064a Binary files /dev/null and b/lib/python3.10/site-packages/jsonschema_specifications/tests/__pycache__/test_jsonschema_specifications.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py b/lib/python3.10/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py new file mode 100644 index 0000000000000000000000000000000000000000..fd2927e0c8381f3eb93029df155758c05ed5eb26 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications/tests/test_jsonschema_specifications.py @@ -0,0 +1,41 @@ +from collections.abc import Mapping +from pathlib import Path + +import pytest + +from jsonschema_specifications import REGISTRY + + +def test_it_contains_metaschemas(): + schema = REGISTRY.contents("http://json-schema.org/draft-07/schema#") + assert isinstance(schema, Mapping) + assert schema["$id"] == "http://json-schema.org/draft-07/schema#" + assert schema["title"] == "Core schema meta-schema" + + +def test_it_is_crawled(): + assert REGISTRY.crawl() == REGISTRY + + +@pytest.mark.parametrize( + "ignored_relative_path", + ["schemas/.DS_Store", "schemas/draft7/.DS_Store"], +) +def test_it_copes_with_dotfiles(ignored_relative_path): + """ + Ignore files like .DS_Store if someone has actually caused one to exist. + + We test here through the private interface as of course the global has + already loaded our schemas. + """ + + import jsonschema_specifications + + package = Path(jsonschema_specifications.__file__).parent + + ignored = package / ignored_relative_path + ignored.touch() + try: + list(jsonschema_specifications._schemas()) + finally: + ignored.unlink() diff --git a/lib/python3.10/site-packages/jupyter_client-8.6.3.dist-info/licenses/LICENSE b/lib/python3.10/site-packages/jupyter_client-8.6.3.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..076177a497fae1482179137af028465c4f6b9c94 --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client-8.6.3.dist-info/licenses/LICENSE @@ -0,0 +1,31 @@ +BSD 3-Clause License + +- Copyright (c) 2001-2015, IPython Development Team +- Copyright (c) 2015-, Jupyter Development Team + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7973da0061a4dcab4d4c4630fa3bfd3aee236d30 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/_version.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61694800360442643dcd93572c9386f0ae9230bc Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/_version.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/adapter.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/adapter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dd728e88ece8e2073a1fa4b5da0a42607ed9f95 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/adapter.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/channels.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/channels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c6f4403a80c40eeaaada3e30915ab5475d2e83a Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/channels.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/channelsabc.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/channelsabc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bc7fd65b8a62d6a65da4b8490d381ee5e28e96e Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/channelsabc.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/client.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dbad4a2edbe21f281aa7d813163ce29d2977e60 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/client.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/clientabc.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/clientabc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4fd29ebfdc0fac7f7a491ad8dc1dd9027982629 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/clientabc.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/connect.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/connect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cfae4979d8b2ed83cbd343387d49186bc7b34bf Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/connect.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/consoleapp.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/consoleapp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af24dfddab0e0da429a212ef013b6c00e72d7fc9 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/consoleapp.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/jsonutil.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/jsonutil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e76105ab398e8e731fb9fdd4813416e9aac9bd1 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/jsonutil.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelapp.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelapp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92c5c42894c2f3d150bd37ff1d53054211006401 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelapp.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelspec.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelspec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac9ede41d6aad6b0413953703638858ebee68504 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelspec.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelspecapp.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelspecapp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8df18cad2538b86f6e6c6098a0cb26181aaee302 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/kernelspecapp.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/launcher.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/launcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71390c1927b6978d71a2812ae5741cf3440c4bc7 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/launcher.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/localinterfaces.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/localinterfaces.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5678bb1227a2e17ae7865278cdf0218d5546a83 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/localinterfaces.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/manager.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5557c05040cd52b1ca1484fca657d815c1c4a8a Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/manager.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/managerabc.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/managerabc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c3e9812a51f68a820c8fe39ff184fb801b8e993 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/managerabc.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/multikernelmanager.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/multikernelmanager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ab656d5a9408b00edfb891c135e933ee717f491 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/multikernelmanager.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/restarter.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/restarter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed18f47b8ebe3d8814d7e3db390fd64903d51192 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/restarter.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/runapp.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/runapp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf83eed8c0843cdfd46406143007c1696fc3e611 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/runapp.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/session.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/session.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..658ea01b38f3cb2be2ffcf63e24c1d7fd8b6b8ed Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/session.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/threaded.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/threaded.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2aacf56e54bb78275f4d0f322f467ab8c90d683 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/threaded.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/utils.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0c479238ab6005b8c4a4e214b9680a102d1de92 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/__pycache__/win_interrupt.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/__pycache__/win_interrupt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..daf0ec5aa623fcee2f4f02db78c13f04c58286af Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/__pycache__/win_interrupt.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/asynchronous/__init__.py b/lib/python3.10/site-packages/jupyter_client/asynchronous/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..36f2c8469e44550854c75df08a36d9b256efeb5e --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/asynchronous/__init__.py @@ -0,0 +1 @@ +from .client import AsyncKernelClient # noqa diff --git a/lib/python3.10/site-packages/jupyter_client/asynchronous/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/asynchronous/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87dccb852a69a5475b69a31d53015828edb3c33d Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/asynchronous/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/asynchronous/__pycache__/client.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/asynchronous/__pycache__/client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0ada3b5f78312f6cbde6ff20d71e5a1c4ff3489 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/asynchronous/__pycache__/client.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/asynchronous/client.py b/lib/python3.10/site-packages/jupyter_client/asynchronous/client.py new file mode 100644 index 0000000000000000000000000000000000000000..cde8ecaf3cf69ad13189aa16a5cce948525cf676 --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/asynchronous/client.py @@ -0,0 +1,75 @@ +"""Implements an async kernel client""" +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import typing as t + +import zmq.asyncio +from traitlets import Instance, Type + +from ..channels import AsyncZMQSocketChannel, HBChannel +from ..client import KernelClient, reqrep + + +def wrapped(meth: t.Callable, channel: str) -> t.Callable: + """Wrap a method on a channel and handle replies.""" + + def _(self: AsyncKernelClient, *args: t.Any, **kwargs: t.Any) -> t.Any: + reply = kwargs.pop("reply", False) + timeout = kwargs.pop("timeout", None) + msg_id = meth(self, *args, **kwargs) + if not reply: + return msg_id + return self._recv_reply(msg_id, timeout=timeout, channel=channel) + + return _ + + +class AsyncKernelClient(KernelClient): + """A KernelClient with async APIs + + ``get_[channel]_msg()`` methods wait for and return messages on channels, + raising :exc:`queue.Empty` if no message arrives within ``timeout`` seconds. + """ + + context = Instance(zmq.asyncio.Context) # type:ignore[arg-type] + + def _context_default(self) -> zmq.asyncio.Context: + self._created_context = True + return zmq.asyncio.Context() + + # -------------------------------------------------------------------------- + # Channel proxy methods + # -------------------------------------------------------------------------- + + get_shell_msg = KernelClient._async_get_shell_msg + get_iopub_msg = KernelClient._async_get_iopub_msg + get_stdin_msg = KernelClient._async_get_stdin_msg + get_control_msg = KernelClient._async_get_control_msg + + wait_for_ready = KernelClient._async_wait_for_ready + + # The classes to use for the various channels + shell_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] + iopub_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] + stdin_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] + hb_channel_class = Type(HBChannel) # type:ignore[arg-type] + control_channel_class = Type(AsyncZMQSocketChannel) # type:ignore[arg-type] + + _recv_reply = KernelClient._async_recv_reply + + # replies come on the shell channel + execute = reqrep(wrapped, KernelClient.execute) + history = reqrep(wrapped, KernelClient.history) + complete = reqrep(wrapped, KernelClient.complete) + is_complete = reqrep(wrapped, KernelClient.is_complete) + inspect = reqrep(wrapped, KernelClient.inspect) + kernel_info = reqrep(wrapped, KernelClient.kernel_info) + comm_info = reqrep(wrapped, KernelClient.comm_info) + + is_alive = KernelClient._async_is_alive + execute_interactive = KernelClient._async_execute_interactive + + # replies come on the control channel + shutdown = reqrep(wrapped, KernelClient.shutdown, channel="control") diff --git a/lib/python3.10/site-packages/jupyter_client/blocking/__init__.py b/lib/python3.10/site-packages/jupyter_client/blocking/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..74b09b9b1f10261228a0fa53fb37bd1b7addebb9 --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/blocking/__init__.py @@ -0,0 +1 @@ +from .client import BlockingKernelClient # noqa diff --git a/lib/python3.10/site-packages/jupyter_client/blocking/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/blocking/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb9e631e01f85903eba68c5cc56a6b1d80ebd254 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/blocking/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/blocking/__pycache__/client.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/blocking/__pycache__/client.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9b78699a859d5bfb17a881ea495911000b64c3a Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/blocking/__pycache__/client.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/blocking/client.py b/lib/python3.10/site-packages/jupyter_client/blocking/client.py new file mode 100644 index 0000000000000000000000000000000000000000..5c815eb8da1373914d515ec0de03f6682d47585b --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/blocking/client.py @@ -0,0 +1,71 @@ +"""Implements a fully blocking kernel client. + +Useful for test suites and blocking terminal interfaces. +""" +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import typing as t + +from traitlets import Type + +from ..channels import HBChannel, ZMQSocketChannel +from ..client import KernelClient, reqrep +from ..utils import run_sync + + +def wrapped(meth: t.Callable, channel: str) -> t.Callable: + """Wrap a method on a channel and handle replies.""" + + def _(self: BlockingKernelClient, *args: t.Any, **kwargs: t.Any) -> t.Any: + reply = kwargs.pop("reply", False) + timeout = kwargs.pop("timeout", None) + msg_id = meth(self, *args, **kwargs) + if not reply: + return msg_id + return self._recv_reply(msg_id, timeout=timeout, channel=channel) + + return _ + + +class BlockingKernelClient(KernelClient): + """A KernelClient with blocking APIs + + ``get_[channel]_msg()`` methods wait for and return messages on channels, + raising :exc:`queue.Empty` if no message arrives within ``timeout`` seconds. + """ + + # -------------------------------------------------------------------------- + # Channel proxy methods + # -------------------------------------------------------------------------- + + get_shell_msg = run_sync(KernelClient._async_get_shell_msg) + get_iopub_msg = run_sync(KernelClient._async_get_iopub_msg) + get_stdin_msg = run_sync(KernelClient._async_get_stdin_msg) + get_control_msg = run_sync(KernelClient._async_get_control_msg) + + wait_for_ready = run_sync(KernelClient._async_wait_for_ready) + + # The classes to use for the various channels + shell_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] + iopub_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] + stdin_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] + hb_channel_class = Type(HBChannel) # type:ignore[arg-type] + control_channel_class = Type(ZMQSocketChannel) # type:ignore[arg-type] + + _recv_reply = run_sync(KernelClient._async_recv_reply) + + # replies come on the shell channel + execute = reqrep(wrapped, KernelClient.execute) + history = reqrep(wrapped, KernelClient.history) + complete = reqrep(wrapped, KernelClient.complete) + inspect = reqrep(wrapped, KernelClient.inspect) + kernel_info = reqrep(wrapped, KernelClient.kernel_info) + comm_info = reqrep(wrapped, KernelClient.comm_info) + + is_alive = run_sync(KernelClient._async_is_alive) + execute_interactive = run_sync(KernelClient._async_execute_interactive) + + # replies come on the control channel + shutdown = reqrep(wrapped, KernelClient.shutdown, channel="control") diff --git a/lib/python3.10/site-packages/jupyter_client/ioloop/__init__.py b/lib/python3.10/site-packages/jupyter_client/ioloop/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..204d5f8aad2a78d66724e716166cb393cfcf4f6f --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/ioloop/__init__.py @@ -0,0 +1,4 @@ +from .manager import AsyncIOLoopKernelManager # noqa +from .manager import IOLoopKernelManager # noqa +from .restarter import AsyncIOLoopKernelRestarter # noqa +from .restarter import IOLoopKernelRestarter # noqa diff --git a/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30b336ac2549f6d8eb7fed663f2973757e26f94e Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/manager.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d710abf732a89ad8ac876ad352cbae21d896cc1 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/manager.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/restarter.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/restarter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f869a35201ba1c9c32ed66be0b936d4f1f3462d Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/ioloop/__pycache__/restarter.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/ioloop/manager.py b/lib/python3.10/site-packages/jupyter_client/ioloop/manager.py new file mode 100644 index 0000000000000000000000000000000000000000..5a6c8aec6da6a30a3987aa3ff8088930917a474c --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/ioloop/manager.py @@ -0,0 +1,116 @@ +"""A kernel manager with a tornado IOLoop""" +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +import typing as t + +import zmq +from tornado import ioloop +from traitlets import Instance, Type +from zmq.eventloop.zmqstream import ZMQStream + +from ..manager import AsyncKernelManager, KernelManager +from .restarter import AsyncIOLoopKernelRestarter, IOLoopKernelRestarter + + +def as_zmqstream(f: t.Any) -> t.Callable: + """Convert a socket to a zmq stream.""" + + def wrapped(self: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Any: + save_socket_class = None + # zmqstreams only support sync sockets + if self.context._socket_class is not zmq.Socket: + save_socket_class = self.context._socket_class + self.context._socket_class = zmq.Socket + try: + socket = f(self, *args, **kwargs) + finally: + if save_socket_class: + # restore default socket class + self.context._socket_class = save_socket_class + return ZMQStream(socket, self.loop) + + return wrapped + + +class IOLoopKernelManager(KernelManager): + """An io loop kernel manager.""" + + loop = Instance("tornado.ioloop.IOLoop") + + def _loop_default(self) -> ioloop.IOLoop: + return ioloop.IOLoop.current() + + restarter_class = Type( + default_value=IOLoopKernelRestarter, + klass=IOLoopKernelRestarter, + help=( + "Type of KernelRestarter to use. " + "Must be a subclass of IOLoopKernelRestarter.\n" + "Override this to customize how kernel restarts are managed." + ), + config=True, + ) + _restarter: t.Any = Instance("jupyter_client.ioloop.IOLoopKernelRestarter", allow_none=True) + + def start_restarter(self) -> None: + """Start the restarter.""" + if self.autorestart and self.has_kernel: + if self._restarter is None: + self._restarter = self.restarter_class( + kernel_manager=self, loop=self.loop, parent=self, log=self.log + ) + self._restarter.start() + + def stop_restarter(self) -> None: + """Stop the restarter.""" + if self.autorestart and self._restarter is not None: + self._restarter.stop() + + connect_shell = as_zmqstream(KernelManager.connect_shell) + connect_control = as_zmqstream(KernelManager.connect_control) + connect_iopub = as_zmqstream(KernelManager.connect_iopub) + connect_stdin = as_zmqstream(KernelManager.connect_stdin) + connect_hb = as_zmqstream(KernelManager.connect_hb) + + +class AsyncIOLoopKernelManager(AsyncKernelManager): + """An async ioloop kernel manager.""" + + loop = Instance("tornado.ioloop.IOLoop") + + def _loop_default(self) -> ioloop.IOLoop: + return ioloop.IOLoop.current() + + restarter_class = Type( + default_value=AsyncIOLoopKernelRestarter, + klass=AsyncIOLoopKernelRestarter, + help=( + "Type of KernelRestarter to use. " + "Must be a subclass of AsyncIOLoopKernelManager.\n" + "Override this to customize how kernel restarts are managed." + ), + config=True, + ) + _restarter: t.Any = Instance( + "jupyter_client.ioloop.AsyncIOLoopKernelRestarter", allow_none=True + ) + + def start_restarter(self) -> None: + """Start the restarter.""" + if self.autorestart and self.has_kernel: + if self._restarter is None: + self._restarter = self.restarter_class( + kernel_manager=self, loop=self.loop, parent=self, log=self.log + ) + self._restarter.start() + + def stop_restarter(self) -> None: + """Stop the restarter.""" + if self.autorestart and self._restarter is not None: + self._restarter.stop() + + connect_shell = as_zmqstream(AsyncKernelManager.connect_shell) + connect_control = as_zmqstream(AsyncKernelManager.connect_control) + connect_iopub = as_zmqstream(AsyncKernelManager.connect_iopub) + connect_stdin = as_zmqstream(AsyncKernelManager.connect_stdin) + connect_hb = as_zmqstream(AsyncKernelManager.connect_hb) diff --git a/lib/python3.10/site-packages/jupyter_client/ioloop/restarter.py b/lib/python3.10/site-packages/jupyter_client/ioloop/restarter.py new file mode 100644 index 0000000000000000000000000000000000000000..64b508402d120ac0e5e35d0fe399d51f191b2d3c --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/ioloop/restarter.py @@ -0,0 +1,102 @@ +"""A basic in process kernel monitor with autorestarting. + +This watches a kernel's state using KernelManager.is_alive and auto +restarts the kernel if it dies. +""" +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +import time +import warnings +from typing import Any + +from traitlets import Instance + +from ..restarter import KernelRestarter + + +class IOLoopKernelRestarter(KernelRestarter): + """Monitor and autorestart a kernel.""" + + loop = Instance("tornado.ioloop.IOLoop") + + def _loop_default(self) -> Any: + warnings.warn( + "IOLoopKernelRestarter.loop is deprecated in jupyter-client 5.2", + DeprecationWarning, + stacklevel=4, + ) + from tornado import ioloop + + return ioloop.IOLoop.current() + + _pcallback = None + + def start(self) -> None: + """Start the polling of the kernel.""" + if self._pcallback is None: + from tornado.ioloop import PeriodicCallback + + self._pcallback = PeriodicCallback( + self.poll, + 1000 * self.time_to_dead, + ) + self._pcallback.start() + + def stop(self) -> None: + """Stop the kernel polling.""" + if self._pcallback is not None: + self._pcallback.stop() + self._pcallback = None + + +class AsyncIOLoopKernelRestarter(IOLoopKernelRestarter): + """An async io loop kernel restarter.""" + + async def poll(self) -> None: # type:ignore[override] + """Poll the kernel.""" + if self.debug: + self.log.debug("Polling kernel...") + is_alive = await self.kernel_manager.is_alive() + now = time.time() + if not is_alive: + self._last_dead = now + if self._restarting: + self._restart_count += 1 + else: + self._restart_count = 1 + + if self._restart_count > self.restart_limit: + self.log.warning("AsyncIOLoopKernelRestarter: restart failed") + self._fire_callbacks("dead") + self._restarting = False + self._restart_count = 0 + self.stop() + else: + newports = self.random_ports_until_alive and self._initial_startup + self.log.info( + "AsyncIOLoopKernelRestarter: restarting kernel (%i/%i), %s random ports", + self._restart_count, + self.restart_limit, + "new" if newports else "keep", + ) + self._fire_callbacks("restart") + await self.kernel_manager.restart_kernel(now=True, newports=newports) + self._restarting = True + else: + # Since `is_alive` only tests that the kernel process is alive, it does not + # indicate that the kernel has successfully completed startup. To solve this + # correctly, we would need to wait for a kernel info reply, but it is not + # necessarily appropriate to start a kernel client + channels in the + # restarter. Therefore, we use "has been alive continuously for X time" as a + # heuristic for a stable start up. + # See https://github.com/jupyter/jupyter_client/pull/717 for details. + stable_start_time = self.stable_start_time + if self.kernel_manager.provisioner: + stable_start_time = self.kernel_manager.provisioner.get_stable_start_time( + recommended=stable_start_time + ) + if self._initial_startup and now - self._last_dead >= stable_start_time: + self._initial_startup = False + if self._restarting and now - self._last_dead >= stable_start_time: + self.log.debug("AsyncIOLoopKernelRestarter: restart apparently succeeded") + self._restarting = False diff --git a/lib/python3.10/site-packages/jupyter_client/provisioning/__init__.py b/lib/python3.10/site-packages/jupyter_client/provisioning/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6c47aee175d3baf9b6728a63aa9849b8f970bb --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/provisioning/__init__.py @@ -0,0 +1,3 @@ +from .factory import KernelProvisionerFactory # noqa +from .local_provisioner import LocalProvisioner # noqa +from .provisioner_base import KernelProvisionerBase # noqa diff --git a/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efd281c27d0bd3497a0936a9c401183b55a2b127 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/factory.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/factory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..161d26d3c9f06ab633ca84cb781e45ec02145f01 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/factory.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/local_provisioner.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/local_provisioner.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e22225fcae4459502620f7ece99eac509e9d696 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/local_provisioner.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/provisioner_base.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/provisioner_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90c30ed88b03409334b4520d490b39fb4f81e06f Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/provisioning/__pycache__/provisioner_base.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/provisioning/factory.py b/lib/python3.10/site-packages/jupyter_client/provisioning/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..bad7c15c1579bd35106afcc8bd68ad4bef11953c --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/provisioning/factory.py @@ -0,0 +1,200 @@ +"""Kernel Provisioner Classes""" +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +import glob +import sys +from os import getenv, path +from typing import Any, Dict, List + +# See compatibility note on `group` keyword in https://docs.python.org/3/library/importlib.metadata.html#entry-points +if sys.version_info < (3, 10): # pragma: no cover + from importlib_metadata import EntryPoint, entry_points # type:ignore[import-not-found] +else: # pragma: no cover + from importlib.metadata import EntryPoint, entry_points + +from traitlets.config import SingletonConfigurable, Unicode, default + +from .provisioner_base import KernelProvisionerBase + + +class KernelProvisionerFactory(SingletonConfigurable): + """ + :class:`KernelProvisionerFactory` is responsible for creating provisioner instances. + + A singleton instance, `KernelProvisionerFactory` is also used by the :class:`KernelSpecManager` + to validate `kernel_provisioner` references found in kernel specifications to confirm their + availability (in cases where the kernel specification references a kernel provisioner that has + not been installed into the current Python environment). + + It's ``default_provisioner_name`` attribute can be used to specify the default provisioner + to use when a kernel_spec is found to not reference a provisioner. It's value defaults to + `"local-provisioner"` which identifies the local provisioner implemented by + :class:`LocalProvisioner`. + """ + + GROUP_NAME = "jupyter_client.kernel_provisioners" + provisioners: Dict[str, EntryPoint] = {} + + default_provisioner_name_env = "JUPYTER_DEFAULT_PROVISIONER_NAME" + default_provisioner_name = Unicode( + config=True, + help="""Indicates the name of the provisioner to use when no kernel_provisioner + entry is present in the kernelspec.""", + ) + + @default("default_provisioner_name") + def _default_provisioner_name_default(self) -> str: + """The default provisioner name.""" + return getenv(self.default_provisioner_name_env, "local-provisioner") + + def __init__(self, **kwargs: Any) -> None: + """Initialize a kernel provisioner factory.""" + super().__init__(**kwargs) + + for ep in KernelProvisionerFactory._get_all_provisioners(): + self.provisioners[ep.name] = ep + + def is_provisioner_available(self, kernel_spec: Any) -> bool: + """ + Reads the associated ``kernel_spec`` to determine the provisioner and returns whether it + exists as an entry_point (True) or not (False). If the referenced provisioner is not + in the current cache or cannot be loaded via entry_points, a warning message is issued + indicating it is not available. + """ + is_available: bool = True + provisioner_cfg = self._get_provisioner_config(kernel_spec) + provisioner_name = str(provisioner_cfg.get("provisioner_name")) + if not self._check_availability(provisioner_name): + is_available = False + self.log.warning( + f"Kernel '{kernel_spec.display_name}' is referencing a kernel " + f"provisioner ('{provisioner_name}') that is not available. " + f"Ensure the appropriate package has been installed and retry." + ) + return is_available + + def create_provisioner_instance( + self, kernel_id: str, kernel_spec: Any, parent: Any + ) -> KernelProvisionerBase: + """ + Reads the associated ``kernel_spec`` to see if it has a `kernel_provisioner` stanza. + If one exists, it instantiates an instance. If a kernel provisioner is not + specified in the kernel specification, a default provisioner stanza is fabricated + and instantiated corresponding to the current value of ``default_provisioner_name`` trait. + The instantiated instance is returned. + + If the provisioner is found to not exist (not registered via entry_points), + `ModuleNotFoundError` is raised. + """ + provisioner_cfg = self._get_provisioner_config(kernel_spec) + provisioner_name = str(provisioner_cfg.get("provisioner_name")) + if not self._check_availability(provisioner_name): + msg = f"Kernel provisioner '{provisioner_name}' has not been registered." + raise ModuleNotFoundError(msg) + + self.log.debug( + f"Instantiating kernel '{kernel_spec.display_name}' with " + f"kernel provisioner: {provisioner_name}" + ) + provisioner_class = self.provisioners[provisioner_name].load() + provisioner_config = provisioner_cfg.get("config") + provisioner: KernelProvisionerBase = provisioner_class( + kernel_id=kernel_id, kernel_spec=kernel_spec, parent=parent, **provisioner_config + ) + return provisioner + + def _check_availability(self, provisioner_name: str) -> bool: + """ + Checks that the given provisioner is available. + + If the given provisioner is not in the current set of loaded provisioners an attempt + is made to fetch the named entry point and, if successful, loads it into the cache. + + :param provisioner_name: + :return: + """ + is_available = True + if provisioner_name not in self.provisioners: + try: + ep = self._get_provisioner(provisioner_name) + self.provisioners[provisioner_name] = ep # Update cache + except Exception: + is_available = False + return is_available + + def _get_provisioner_config(self, kernel_spec: Any) -> Dict[str, Any]: + """ + Return the kernel_provisioner stanza from the kernel_spec. + + Checks the kernel_spec's metadata dictionary for a kernel_provisioner entry. + If found, it is returned, else one is created relative to the DEFAULT_PROVISIONER + and returned. + + Parameters + ---------- + kernel_spec : Any - this is a KernelSpec type but listed as Any to avoid circular import + The kernel specification object from which the provisioner dictionary is derived. + + Returns + ------- + dict + The provisioner portion of the kernel_spec. If one does not exist, it will contain + the default information. If no `config` sub-dictionary exists, an empty `config` + dictionary will be added. + """ + env_provisioner = kernel_spec.metadata.get("kernel_provisioner", {}) + if "provisioner_name" in env_provisioner: # If no provisioner_name, return default + if ( + "config" not in env_provisioner + ): # if provisioner_name, but no config stanza, add one + env_provisioner.update({"config": {}}) + return env_provisioner # Return what we found (plus config stanza if necessary) + return {"provisioner_name": self.default_provisioner_name, "config": {}} + + def get_provisioner_entries(self) -> Dict[str, str]: + """ + Returns a dictionary of provisioner entries. + + The key is the provisioner name for its entry point. The value is the colon-separated + string of the entry point's module name and object name. + """ + entries = {} + for name, ep in self.provisioners.items(): + entries[name] = ep.value + return entries + + @staticmethod + def _get_all_provisioners() -> List[EntryPoint]: + """Wrapper around entry_points (to fetch the set of provisioners) - primarily to facilitate testing.""" + return entry_points(group=KernelProvisionerFactory.GROUP_NAME) + + def _get_provisioner(self, name: str) -> EntryPoint: + """Wrapper around entry_points (to fetch a single provisioner) - primarily to facilitate testing.""" + eps = entry_points(group=KernelProvisionerFactory.GROUP_NAME, name=name) + if eps: + return eps[0] + + # Check if the entrypoint name is 'local-provisioner'. Although this should never + # happen, we have seen cases where the previous distribution of jupyter_client has + # remained which doesn't include kernel-provisioner entrypoints (so 'local-provisioner' + # is deemed not found even though its definition is in THIS package). In such cases, + # the entrypoints package uses what it first finds - which is the older distribution + # resulting in a violation of a supposed invariant condition. To address this scenario, + # we will log a warning message indicating this situation, then build the entrypoint + # instance ourselves - since we have that information. + if name == "local-provisioner": + distros = glob.glob(f"{path.dirname(path.dirname(__file__))}-*") + self.log.warning( + f"Kernel Provisioning: The 'local-provisioner' is not found. This is likely " + f"due to the presence of multiple jupyter_client distributions and a previous " + f"distribution is being used as the source for entrypoints - which does not " + f"include 'local-provisioner'. That distribution should be removed such that " + f"only the version-appropriate distribution remains (version >= 7). Until " + f"then, a 'local-provisioner' entrypoint will be automatically constructed " + f"and used.\nThe candidate distribution locations are: {distros}" + ) + return EntryPoint( + "local-provisioner", "jupyter_client.provisioning", "LocalProvisioner" + ) + + raise diff --git a/lib/python3.10/site-packages/jupyter_client/provisioning/local_provisioner.py b/lib/python3.10/site-packages/jupyter_client/provisioning/local_provisioner.py new file mode 100644 index 0000000000000000000000000000000000000000..42d8d32dc001926da2b9b4c598ff5c614b160388 --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/provisioning/local_provisioner.py @@ -0,0 +1,242 @@ +"""Kernel Provisioner Classes""" +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +import asyncio +import os +import signal +import sys +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from ..connect import KernelConnectionInfo, LocalPortCache +from ..launcher import launch_kernel +from ..localinterfaces import is_local_ip, local_ips +from .provisioner_base import KernelProvisionerBase + + +class LocalProvisioner(KernelProvisionerBase): # type:ignore[misc] + """ + :class:`LocalProvisioner` is a concrete class of ABC :py:class:`KernelProvisionerBase` + and is the out-of-box default implementation used when no kernel provisioner is + specified in the kernel specification (``kernel.json``). It provides functional + parity to existing applications by launching the kernel locally and using + :class:`subprocess.Popen` to manage its lifecycle. + + This class is intended to be subclassed for customizing local kernel environments + and serve as a reference implementation for other custom provisioners. + """ + + process = None + _exit_future = None + pid = None + pgid = None + ip = None + ports_cached = False + + @property + def has_process(self) -> bool: + return self.process is not None + + async def poll(self) -> Optional[int]: + """Poll the provisioner.""" + ret = 0 + if self.process: + ret = self.process.poll() # type:ignore[unreachable] + return ret + + async def wait(self) -> Optional[int]: + """Wait for the provisioner process.""" + ret = 0 + if self.process: + # Use busy loop at 100ms intervals, polling until the process is + # not alive. If we find the process is no longer alive, complete + # its cleanup via the blocking wait(). Callers are responsible for + # issuing calls to wait() using a timeout (see kill()). + while await self.poll() is None: # type:ignore[unreachable] + await asyncio.sleep(0.1) + + # Process is no longer alive, wait and clear + ret = self.process.wait() + # Make sure all the fds get closed. + for attr in ["stdout", "stderr", "stdin"]: + fid = getattr(self.process, attr) + if fid: + fid.close() + self.process = None # allow has_process to now return False + return ret + + async def send_signal(self, signum: int) -> None: + """Sends a signal to the process group of the kernel (this + usually includes the kernel and any subprocesses spawned by + the kernel). + + Note that since only SIGTERM is supported on Windows, we will + check if the desired signal is for interrupt and apply the + applicable code on Windows in that case. + """ + if self.process: + if signum == signal.SIGINT and sys.platform == "win32": # type:ignore[unreachable] + from ..win_interrupt import send_interrupt + + send_interrupt(self.process.win32_interrupt_event) + return + + # Prefer process-group over process + if self.pgid and hasattr(os, "killpg"): + try: + os.killpg(self.pgid, signum) + return + except OSError: + pass # We'll retry sending the signal to only the process below + + # If we're here, send the signal to the process and let caller handle exceptions + self.process.send_signal(signum) + return + + async def kill(self, restart: bool = False) -> None: + """Kill the provisioner and optionally restart.""" + if self.process: + if hasattr(signal, "SIGKILL"): # type:ignore[unreachable] + # If available, give preference to signalling the process-group over `kill()`. + try: + await self.send_signal(signal.SIGKILL) + return + except OSError: + pass + try: + self.process.kill() + except OSError as e: + LocalProvisioner._tolerate_no_process(e) + + async def terminate(self, restart: bool = False) -> None: + """Terminate the provisioner and optionally restart.""" + if self.process: + if hasattr(signal, "SIGTERM"): # type:ignore[unreachable] + # If available, give preference to signalling the process group over `terminate()`. + try: + await self.send_signal(signal.SIGTERM) + return + except OSError: + pass + try: + self.process.terminate() + except OSError as e: + LocalProvisioner._tolerate_no_process(e) + + @staticmethod + def _tolerate_no_process(os_error: OSError) -> None: + # In Windows, we will get an Access Denied error if the process + # has already terminated. Ignore it. + if sys.platform == "win32": + if os_error.winerror != 5: + raise + # On Unix, we may get an ESRCH error (or ProcessLookupError instance) if + # the process has already terminated. Ignore it. + else: + from errno import ESRCH + + if not isinstance(os_error, ProcessLookupError) or os_error.errno != ESRCH: + raise + + async def cleanup(self, restart: bool = False) -> None: + """Clean up the resources used by the provisioner and optionally restart.""" + if self.ports_cached and not restart: + # provisioner is about to be destroyed, return cached ports + lpc = LocalPortCache.instance() + ports = ( + self.connection_info["shell_port"], + self.connection_info["iopub_port"], + self.connection_info["stdin_port"], + self.connection_info["hb_port"], + self.connection_info["control_port"], + ) + for port in ports: + if TYPE_CHECKING: + assert isinstance(port, int) + lpc.return_port(port) + + async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: + """Perform any steps in preparation for kernel process launch. + + This includes applying additional substitutions to the kernel launch command and env. + It also includes preparation of launch parameters. + + Returns the updated kwargs. + """ + + # This should be considered temporary until a better division of labor can be defined. + km = self.parent + if km: + if km.transport == "tcp" and not is_local_ip(km.ip): + msg = ( + "Can only launch a kernel on a local interface. " + f"This one is not: {km.ip}." + "Make sure that the '*_address' attributes are " + "configured properly. " + f"Currently valid addresses are: {local_ips()}" + ) + raise RuntimeError(msg) + # build the Popen cmd + extra_arguments = kwargs.pop("extra_arguments", []) + + # write connection file / get default ports + # TODO - change when handshake pattern is adopted + if km.cache_ports and not self.ports_cached: + lpc = LocalPortCache.instance() + km.shell_port = lpc.find_available_port(km.ip) + km.iopub_port = lpc.find_available_port(km.ip) + km.stdin_port = lpc.find_available_port(km.ip) + km.hb_port = lpc.find_available_port(km.ip) + km.control_port = lpc.find_available_port(km.ip) + self.ports_cached = True + if "env" in kwargs: + jupyter_session = kwargs["env"].get("JPY_SESSION_NAME", "") + km.write_connection_file(jupyter_session=jupyter_session) + else: + km.write_connection_file() + self.connection_info = km.get_connection_info() + + kernel_cmd = km.format_kernel_cmd( + extra_arguments=extra_arguments + ) # This needs to remain here for b/c + else: + extra_arguments = kwargs.pop("extra_arguments", []) + kernel_cmd = self.kernel_spec.argv + extra_arguments + + return await super().pre_launch(cmd=kernel_cmd, **kwargs) + + async def launch_kernel(self, cmd: List[str], **kwargs: Any) -> KernelConnectionInfo: + """Launch a kernel with a command.""" + scrubbed_kwargs = LocalProvisioner._scrub_kwargs(kwargs) + self.process = launch_kernel(cmd, **scrubbed_kwargs) + pgid = None + if hasattr(os, "getpgid"): + try: + pgid = os.getpgid(self.process.pid) + except OSError: + pass + + self.pid = self.process.pid + self.pgid = pgid + return self.connection_info + + @staticmethod + def _scrub_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]: + """Remove any keyword arguments that Popen does not tolerate.""" + keywords_to_scrub: List[str] = ["extra_arguments", "kernel_id"] + scrubbed_kwargs = kwargs.copy() + for kw in keywords_to_scrub: + scrubbed_kwargs.pop(kw, None) + return scrubbed_kwargs + + async def get_provisioner_info(self) -> Dict: + """Captures the base information necessary for persistence relative to this instance.""" + provisioner_info = await super().get_provisioner_info() + provisioner_info.update({"pid": self.pid, "pgid": self.pgid, "ip": self.ip}) + return provisioner_info + + async def load_provisioner_info(self, provisioner_info: Dict) -> None: + """Loads the base information necessary for persistence relative to this instance.""" + await super().load_provisioner_info(provisioner_info) + self.pid = provisioner_info["pid"] + self.pgid = provisioner_info["pgid"] + self.ip = provisioner_info["ip"] diff --git a/lib/python3.10/site-packages/jupyter_client/provisioning/provisioner_base.py b/lib/python3.10/site-packages/jupyter_client/provisioning/provisioner_base.py new file mode 100644 index 0000000000000000000000000000000000000000..eff894327a605ca975cb9804a34234355cb5b791 --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/provisioning/provisioner_base.py @@ -0,0 +1,257 @@ +"""Kernel Provisioner Classes""" +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +import os +from abc import ABC, ABCMeta, abstractmethod +from typing import Any, Dict, List, Optional, Union + +from traitlets.config import Instance, LoggingConfigurable, Unicode + +from ..connect import KernelConnectionInfo + + +class KernelProvisionerMeta(ABCMeta, type(LoggingConfigurable)): # type: ignore[misc] + pass + + +class KernelProvisionerBase( # type:ignore[misc] + ABC, LoggingConfigurable, metaclass=KernelProvisionerMeta +): + """ + Abstract base class defining methods for KernelProvisioner classes. + + A majority of methods are abstract (requiring implementations via a subclass) while + some are optional and others provide implementations common to all instances. + Subclasses should be aware of which methods require a call to the superclass. + + Many of these methods model those of :class:`subprocess.Popen` for parity with + previous versions where the kernel process was managed directly. + """ + + # The kernel specification associated with this provisioner + kernel_spec: Any = Instance("jupyter_client.kernelspec.KernelSpec", allow_none=True) + kernel_id: Union[str, Unicode] = Unicode(None, allow_none=True) + connection_info: KernelConnectionInfo = {} + + @property + @abstractmethod + def has_process(self) -> bool: + """ + Returns true if this provisioner is currently managing a process. + + This property is asserted to be True immediately following a call to + the provisioner's :meth:`launch_kernel` method. + """ + pass + + @abstractmethod + async def poll(self) -> Optional[int]: + """ + Checks if kernel process is still running. + + If running, None is returned, otherwise the process's integer-valued exit code is returned. + This method is called from :meth:`KernelManager.is_alive`. + """ + pass + + @abstractmethod + async def wait(self) -> Optional[int]: + """ + Waits for kernel process to terminate. + + This method is called from `KernelManager.finish_shutdown()` and + `KernelManager.kill_kernel()` when terminating a kernel gracefully or + immediately, respectively. + """ + pass + + @abstractmethod + async def send_signal(self, signum: int) -> None: + """ + Sends signal identified by signum to the kernel process. + + This method is called from `KernelManager.signal_kernel()` to send the + kernel process a signal. + """ + pass + + @abstractmethod + async def kill(self, restart: bool = False) -> None: + """ + Kill the kernel process. + + This is typically accomplished via a SIGKILL signal, which cannot be caught. + This method is called from `KernelManager.kill_kernel()` when terminating + a kernel immediately. + + restart is True if this operation will precede a subsequent launch_kernel request. + """ + pass + + @abstractmethod + async def terminate(self, restart: bool = False) -> None: + """ + Terminates the kernel process. + + This is typically accomplished via a SIGTERM signal, which can be caught, allowing + the kernel provisioner to perform possible cleanup of resources. This method is + called indirectly from `KernelManager.finish_shutdown()` during a kernel's + graceful termination. + + restart is True if this operation precedes a start launch_kernel request. + """ + pass + + @abstractmethod + async def launch_kernel(self, cmd: List[str], **kwargs: Any) -> KernelConnectionInfo: + """ + Launch the kernel process and return its connection information. + + This method is called from `KernelManager.launch_kernel()` during the + kernel manager's start kernel sequence. + """ + pass + + @abstractmethod + async def cleanup(self, restart: bool = False) -> None: + """ + Cleanup any resources allocated on behalf of the kernel provisioner. + + This method is called from `KernelManager.cleanup_resources()` as part of + its shutdown kernel sequence. + + restart is True if this operation precedes a start launch_kernel request. + """ + pass + + async def shutdown_requested(self, restart: bool = False) -> None: + """ + Allows the provisioner to determine if the kernel's shutdown has been requested. + + This method is called from `KernelManager.request_shutdown()` as part of + its shutdown sequence. + + This method is optional and is primarily used in scenarios where the provisioner + may need to perform other operations in preparation for a kernel's shutdown. + """ + pass + + async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]: + """ + Perform any steps in preparation for kernel process launch. + + This includes applying additional substitutions to the kernel launch command + and environment. It also includes preparation of launch parameters. + + NOTE: Subclass implementations are advised to call this method as it applies + environment variable substitutions from the local environment and calls the + provisioner's :meth:`_finalize_env()` method to allow each provisioner the + ability to cleanup the environment variables that will be used by the kernel. + + This method is called from `KernelManager.pre_start_kernel()` as part of its + start kernel sequence. + + Returns the (potentially updated) keyword arguments that are passed to + :meth:`launch_kernel()`. + """ + env = kwargs.pop("env", os.environ).copy() + env.update(self.__apply_env_substitutions(env)) + self._finalize_env(env) + kwargs["env"] = env + + return kwargs + + async def post_launch(self, **kwargs: Any) -> None: + """ + Perform any steps following the kernel process launch. + + This method is called from `KernelManager.post_start_kernel()` as part of its + start kernel sequence. + """ + pass + + async def get_provisioner_info(self) -> Dict[str, Any]: + """ + Captures the base information necessary for persistence relative to this instance. + + This enables applications that subclass `KernelManager` to persist a kernel provisioner's + relevant information to accomplish functionality like disaster recovery or high availability + by calling this method via the kernel manager's `provisioner` attribute. + + NOTE: The superclass method must always be called first to ensure proper serialization. + """ + provisioner_info: Dict[str, Any] = {} + provisioner_info["kernel_id"] = self.kernel_id + provisioner_info["connection_info"] = self.connection_info + return provisioner_info + + async def load_provisioner_info(self, provisioner_info: Dict) -> None: + """ + Loads the base information necessary for persistence relative to this instance. + + The inverse of `get_provisioner_info()`, this enables applications that subclass + `KernelManager` to re-establish communication with a provisioner that is managing + a (presumably) remote kernel from an entirely different process that the original + provisioner. + + NOTE: The superclass method must always be called first to ensure proper deserialization. + """ + self.kernel_id = provisioner_info["kernel_id"] + self.connection_info = provisioner_info["connection_info"] + + def get_shutdown_wait_time(self, recommended: float = 5.0) -> float: + """ + Returns the time allowed for a complete shutdown. This may vary by provisioner. + + This method is called from `KernelManager.finish_shutdown()` during the graceful + phase of its kernel shutdown sequence. + + The recommended value will typically be what is configured in the kernel manager. + """ + return recommended + + def get_stable_start_time(self, recommended: float = 10.0) -> float: + """ + Returns the expected upper bound for a kernel (re-)start to complete. + This may vary by provisioner. + + The recommended value will typically be what is configured in the kernel restarter. + """ + return recommended + + def _finalize_env(self, env: Dict[str, str]) -> None: + """ + Ensures env is appropriate prior to launch. + + This method is called from `KernelProvisionerBase.pre_launch()` during the kernel's + start sequence. + + NOTE: Subclasses should be sure to call super()._finalize_env(env) + """ + if self.kernel_spec.language and self.kernel_spec.language.lower().startswith("python"): + # Don't allow PYTHONEXECUTABLE to be passed to kernel process. + # If set, it can bork all the things. + env.pop("PYTHONEXECUTABLE", None) + + def __apply_env_substitutions(self, substitution_values: Dict[str, str]) -> Dict[str, str]: + """ + Walks entries in the kernelspec's env stanza and applies substitutions from current env. + + This method is called from `KernelProvisionerBase.pre_launch()` during the kernel's + start sequence. + + Returns the substituted list of env entries. + + NOTE: This method is private and is not intended to be overridden by provisioners. + """ + substituted_env = {} + if self.kernel_spec: + from string import Template + + # For each templated env entry, fill any templated references + # matching names of env variables with those values and build + # new dict with substitutions. + templated_env = self.kernel_spec.env + for k, v in templated_env.items(): + substituted_env.update({k: Template(v).safe_substitute(substitution_values)}) + return substituted_env diff --git a/lib/python3.10/site-packages/jupyter_client/ssh/__init__.py b/lib/python3.10/site-packages/jupyter_client/ssh/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..50f2cdde7802753bbe423fcb880dd62e8b27abbf --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/ssh/__init__.py @@ -0,0 +1 @@ +from .tunnel import * # noqa diff --git a/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4729d23459095fb3c6ca088bf8133016d6f26c2d Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/forward.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/forward.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d345c5e4a61398741fee32eeee073e10dd327d7 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/forward.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/tunnel.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/tunnel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44df7207521ea795d026e090c4328a0e938ff1e1 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_client/ssh/__pycache__/tunnel.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_client/ssh/forward.py b/lib/python3.10/site-packages/jupyter_client/ssh/forward.py new file mode 100644 index 0000000000000000000000000000000000000000..e2f28d218026382cfeb02906da05e9c43aa3463a --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/ssh/forward.py @@ -0,0 +1,102 @@ +"""Sample script showing how to do local port forwarding over paramiko. + +This script connects to the requested SSH server and sets up local port +forwarding (the openssh -L option) from a local port through a tunneled +connection to a destination reachable from the SSH server machine. +""" +# +# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1. +# Original Copyright (C) 2003-2007 Robey Pointer +# Edits Copyright (C) 2010 The IPython Team +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA. +import logging +import select +import socketserver +import typing as t + +logger = logging.getLogger("ssh") + + +class ForwardServer(socketserver.ThreadingTCPServer): + """A server to use for ssh forwarding.""" + + daemon_threads = True + allow_reuse_address = True + + +class Handler(socketserver.BaseRequestHandler): + """A handle for server requests.""" + + @t.no_type_check + def handle(self): + """Handle a request.""" + try: + chan = self.ssh_transport.open_channel( + "direct-tcpip", + (self.chain_host, self.chain_port), + self.request.getpeername(), + ) + except Exception as e: + logger.debug( + "Incoming request to %s:%d failed: %s" % (self.chain_host, self.chain_port, repr(e)) + ) + return + if chan is None: + logger.debug( + "Incoming request to %s:%d was rejected by the SSH server." + % (self.chain_host, self.chain_port) + ) + return + + logger.debug( + "Connected! Tunnel open {!r} -> {!r} -> {!r}".format( + self.request.getpeername(), + chan.getpeername(), + (self.chain_host, self.chain_port), + ) + ) + while True: + r, w, x = select.select([self.request, chan], [], []) + if self.request in r: + data = self.request.recv(1024) + if len(data) == 0: + break + chan.send(data) + if chan in r: + data = chan.recv(1024) + if len(data) == 0: + break + self.request.send(data) + chan.close() + self.request.close() + logger.debug("Tunnel closed ") + + +def forward_tunnel(local_port: int, remote_host: str, remote_port: int, transport: t.Any) -> None: + """Forward an ssh tunnel.""" + + # this is a little convoluted, but lets me configure things for the Handler + # object. (SocketServer doesn't give Handlers any way to access the outer + # server normally.) + class SubHander(Handler): + chain_host = remote_host + chain_port = remote_port + ssh_transport = transport + + ForwardServer(("127.0.0.1", local_port), SubHander).serve_forever() + + +__all__ = ["forward_tunnel"] diff --git a/lib/python3.10/site-packages/jupyter_client/ssh/tunnel.py b/lib/python3.10/site-packages/jupyter_client/ssh/tunnel.py new file mode 100644 index 0000000000000000000000000000000000000000..3b1b533ca32be3b38d6a56a5f6ed7ef2cc23f6aa --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_client/ssh/tunnel.py @@ -0,0 +1,446 @@ +"""Basic ssh tunnel utilities, and convenience functions for tunneling +zeromq connections. +""" +# Copyright (C) 2010-2011 IPython Development Team +# Copyright (C) 2011- PyZMQ Developers +# +# Redistributed from IPython under the terms of the BSD License. +from __future__ import annotations + +import atexit +import os +import re +import signal +import socket +import sys +import warnings +from getpass import getpass, getuser +from multiprocessing import Process +from typing import Any, cast + +try: + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + import paramiko + + SSHException = paramiko.ssh_exception.SSHException +except ImportError: + paramiko = None # type:ignore[assignment] + + class SSHException(Exception): # type:ignore[no-redef] # noqa + pass + +else: + from .forward import forward_tunnel + +try: + import pexpect # type: ignore[import-untyped] +except ImportError: + pexpect = None + + +def select_random_ports(n: int) -> list[int]: + """Select and return n random ports that are available.""" + ports = [] + sockets = [] + for _ in range(n): + sock = socket.socket() + sock.bind(("", 0)) + ports.append(sock.getsockname()[1]) + sockets.append(sock) + for sock in sockets: + sock.close() + return ports + + +# ----------------------------------------------------------------------------- +# Check for passwordless login +# ----------------------------------------------------------------------------- +_password_pat = re.compile((rb"pass(word|phrase):"), re.IGNORECASE) + + +def try_passwordless_ssh(server: str, keyfile: str | None, paramiko: Any = None) -> Any: + """Attempt to make an ssh connection without a password. + This is mainly used for requiring password input only once + when many tunnels may be connected to the same server. + + If paramiko is None, the default for the platform is chosen. + """ + if paramiko is None: + paramiko = sys.platform == "win32" + f = _try_passwordless_paramiko if paramiko else _try_passwordless_openssh + return f(server, keyfile) + + +def _try_passwordless_openssh(server: str, keyfile: str | None) -> bool: + """Try passwordless login with shell ssh command.""" + if pexpect is None: + msg = "pexpect unavailable, use paramiko" + raise ImportError(msg) + cmd = "ssh -f " + server + if keyfile: + cmd += " -i " + keyfile + cmd += " exit" + + # pop SSH_ASKPASS from env + env = os.environ.copy() + env.pop("SSH_ASKPASS", None) + + ssh_newkey = "Are you sure you want to continue connecting" + p = pexpect.spawn(cmd, env=env) + while True: + try: + i = p.expect([ssh_newkey, _password_pat], timeout=0.1) + if i == 0: + msg = "The authenticity of the host can't be established." + raise SSHException(msg) + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + return True + else: + return False + + +def _try_passwordless_paramiko(server: str, keyfile: str | None) -> bool: + """Try passwordless login with paramiko.""" + if paramiko is None: + msg = "Paramiko unavailable, " # type:ignore[unreachable] + if sys.platform == "win32": + msg += "Paramiko is required for ssh tunneled connections on Windows." + else: + msg += "use OpenSSH." + raise ImportError(msg) + username, server, port = _split_server(server) + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + try: + client.connect(server, port, username=username, key_filename=keyfile, look_for_keys=True) + except paramiko.AuthenticationException: + return False + else: + client.close() + return True + + +def tunnel_connection( + socket: socket.socket, + addr: str, + server: str, + keyfile: str | None = None, + password: str | None = None, + paramiko: Any = None, + timeout: int = 60, +) -> int: + """Connect a socket to an address via an ssh tunnel. + + This is a wrapper for socket.connect(addr), when addr is not accessible + from the local machine. It simply creates an ssh tunnel using the remaining args, + and calls socket.connect('tcp://localhost:lport') where lport is the randomly + selected local port of the tunnel. + + """ + new_url, tunnel = open_tunnel( + addr, + server, + keyfile=keyfile, + password=password, + paramiko=paramiko, + timeout=timeout, + ) + socket.connect(new_url) + return tunnel + + +def open_tunnel( + addr: str, + server: str, + keyfile: str | None = None, + password: str | None = None, + paramiko: Any = None, + timeout: int = 60, +) -> tuple[str, int]: + """Open a tunneled connection from a 0MQ url. + + For use inside tunnel_connection. + + Returns + ------- + + (url, tunnel) : (str, object) + The 0MQ url that has been forwarded, and the tunnel object + """ + + lport = select_random_ports(1)[0] + _, addr = addr.split("://") + ip, rport = addr.split(":") + rport_int = int(rport) + paramiko = sys.platform == "win32" if paramiko is None else paramiko_tunnel + tunnelf = paramiko_tunnel if paramiko else openssh_tunnel + + tunnel = tunnelf( + lport, + rport_int, + server, + remoteip=ip, + keyfile=keyfile, + password=password, + timeout=timeout, + ) + return "tcp://127.0.0.1:%i" % lport, cast(int, tunnel) + + +def openssh_tunnel( + lport: int, + rport: int, + server: str, + remoteip: str = "127.0.0.1", + keyfile: str | None = None, + password: str | None | bool = None, + timeout: int = 60, +) -> int: + """Create an ssh tunnel using command-line ssh that connects port lport + on this machine to localhost:rport on server. The tunnel + will automatically close when not in use, remaining open + for a minimum of timeout seconds for an initial connection. + + This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, + as seen from `server`. + + keyfile and password may be specified, but ssh config is checked for defaults. + + Parameters + ---------- + + lport : int + local port for connecting to the tunnel from this machine. + rport : int + port on the remote machine to connect to. + server : str + The ssh server to connect to. The full ssh server string will be parsed. + user@server:port + remoteip : str [Default: 127.0.0.1] + The remote ip, specifying the destination of the tunnel. + Default is localhost, which means that the tunnel would redirect + localhost:lport on this machine to localhost:rport on the *server*. + + keyfile : str; path to public key file + This specifies a key to be used in ssh login, default None. + Regular default ssh keys will be used without specifying this argument. + password : str; + Your ssh password to the ssh server. Note that if this is left None, + you will be prompted for it if passwordless key based login is unavailable. + timeout : int [default: 60] + The time (in seconds) after which no activity will result in the tunnel + closing. This prevents orphaned tunnels from running forever. + """ + if pexpect is None: + msg = "pexpect unavailable, use paramiko_tunnel" + raise ImportError(msg) + ssh = "ssh " + if keyfile: + ssh += "-i " + keyfile + + if ":" in server: + server, port = server.split(":") + ssh += " -p %s" % port + + cmd = f"{ssh} -O check {server}" + (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) + if not exitstatus: + pid = int(output[output.find(b"(pid=") + 5 : output.find(b")")]) + cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % ( + ssh, + lport, + remoteip, + rport, + server, + ) + (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) + if not exitstatus: + atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1)) + return pid + cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % ( + ssh, + lport, + remoteip, + rport, + server, + timeout, + ) + + # pop SSH_ASKPASS from env + env = os.environ.copy() + env.pop("SSH_ASKPASS", None) + + ssh_newkey = "Are you sure you want to continue connecting" + tunnel = pexpect.spawn(cmd, env=env) + failed = False + while True: + try: + i = tunnel.expect([ssh_newkey, _password_pat], timeout=0.1) + if i == 0: + msg = "The authenticity of the host can't be established." + raise SSHException(msg) + except pexpect.TIMEOUT: + continue + except pexpect.EOF as e: + tunnel.wait() + if tunnel.exitstatus: + raise RuntimeError("tunnel '%s' failed to start" % (cmd)) from e + else: + return tunnel.pid + else: + if failed: + warnings.warn("Password rejected, try again", stacklevel=2) + password = None + if password is None: + password = getpass("%s's password: " % (server)) + tunnel.sendline(password) + failed = True + + +def _stop_tunnel(cmd: Any) -> None: + pexpect.run(cmd) + + +def _split_server(server: str) -> tuple[str, str, int]: + if "@" in server: + username, server = server.split("@", 1) + else: + username = getuser() + if ":" in server: + server, port_str = server.split(":") + port = int(port_str) + else: + port = 22 + return username, server, port + + +def paramiko_tunnel( + lport: int, + rport: int, + server: str, + remoteip: str = "127.0.0.1", + keyfile: str | None = None, + password: str | None = None, + timeout: float = 60, +) -> Process: + """launch a tunner with paramiko in a subprocess. This should only be used + when shell ssh is unavailable (e.g. Windows). + + This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, + as seen from `server`. + + If you are familiar with ssh tunnels, this creates the tunnel: + + ssh server -L localhost:lport:remoteip:rport + + keyfile and password may be specified, but ssh config is checked for defaults. + + + Parameters + ---------- + + lport : int + local port for connecting to the tunnel from this machine. + rport : int + port on the remote machine to connect to. + server : str + The ssh server to connect to. The full ssh server string will be parsed. + user@server:port + remoteip : str [Default: 127.0.0.1] + The remote ip, specifying the destination of the tunnel. + Default is localhost, which means that the tunnel would redirect + localhost:lport on this machine to localhost:rport on the *server*. + + keyfile : str; path to public key file + This specifies a key to be used in ssh login, default None. + Regular default ssh keys will be used without specifying this argument. + password : str; + Your ssh password to the ssh server. Note that if this is left None, + you will be prompted for it if passwordless key based login is unavailable. + timeout : int [default: 60] + The time (in seconds) after which no activity will result in the tunnel + closing. This prevents orphaned tunnels from running forever. + + """ + if paramiko is None: + msg = "Paramiko not available" # type:ignore[unreachable] + raise ImportError(msg) + + if password is None and not _try_passwordless_paramiko(server, keyfile): + password = getpass("%s's password: " % (server)) + + p = Process( + target=_paramiko_tunnel, + args=(lport, rport, server, remoteip), + kwargs={"keyfile": keyfile, "password": password}, + ) + p.daemon = True + p.start() + return p + + +def _paramiko_tunnel( + lport: int, + rport: int, + server: str, + remoteip: str, + keyfile: str | None = None, + password: str | None = None, +) -> None: + """Function for actually starting a paramiko tunnel, to be passed + to multiprocessing.Process(target=this), and not called directly. + """ + username, server, port = _split_server(server) + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + + try: + client.connect( + server, + port, + username=username, + key_filename=keyfile, + look_for_keys=True, + password=password, + ) + # except paramiko.AuthenticationException: + # if password is None: + # password = getpass("%s@%s's password: "%(username, server)) + # client.connect(server, port, username=username, password=password) + # else: + # raise + except Exception as e: + warnings.warn("*** Failed to connect to %s:%d: %r" % (server, port, e), stacklevel=2) + sys.exit(1) + + # Don't let SIGINT kill the tunnel subprocess + signal.signal(signal.SIGINT, signal.SIG_IGN) + + try: + forward_tunnel(lport, remoteip, rport, client.get_transport()) + except KeyboardInterrupt: + warnings.warn("SIGINT: Port forwarding stopped cleanly", stacklevel=2) + sys.exit(0) + except Exception as e: + warnings.warn("Port forwarding stopped uncleanly: %s" % e, stacklevel=2) + sys.exit(255) + + +if sys.platform == "win32": + ssh_tunnel = paramiko_tunnel +else: + ssh_tunnel = openssh_tunnel + + +__all__ = [ + "tunnel_connection", + "ssh_tunnel", + "openssh_tunnel", + "paramiko_tunnel", + "try_passwordless_ssh", +] diff --git a/lib/python3.10/site-packages/jupyter_core-5.8.1.dist-info/licenses/LICENSE b/lib/python3.10/site-packages/jupyter_core-5.8.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..a44e199a8af58e5922f8ebb696218312c317dedc --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_core-5.8.1.dist-info/licenses/LICENSE @@ -0,0 +1,30 @@ +BSD 3-Clause License + +- Copyright (c) 2015-, Jupyter Development Team + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/lib/python3.10/site-packages/jupyter_core/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bfc3b508cd5f01d3c93042a8cc5c7e9f0eaf08f Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_core/__pycache__/__main__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82cc64ba4243c45be6f0f1ca18d44d56186c9a8c Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/__pycache__/__main__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_core/__pycache__/application.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/__pycache__/application.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16a57192a3ed1a2f39060419ab815605b523f4e3 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/__pycache__/application.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_core/__pycache__/command.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/__pycache__/command.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53951081d3b84a9da4caf07d95e3139117b4efe7 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/__pycache__/command.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_core/__pycache__/migrate.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/__pycache__/migrate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39f7a236beec441a5191e2017335d5df4fff9341 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/__pycache__/migrate.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_core/__pycache__/paths.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/__pycache__/paths.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b6a7768ba9731c9a73cdc5339dcd70d8cb9be81 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/__pycache__/paths.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_core/__pycache__/troubleshoot.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/__pycache__/troubleshoot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..172e68b6b00ede7b4abb5ad0b4a534b236d23395 Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/__pycache__/troubleshoot.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_core/__pycache__/version.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b207e22a69dd0d23792b7e1a190fb4e095722bc Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/__pycache__/version.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyter_core/utils/__init__.py b/lib/python3.10/site-packages/jupyter_core/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..665eac29cde05887402b6e577b78ed0ba2e22c16 --- /dev/null +++ b/lib/python3.10/site-packages/jupyter_core/utils/__init__.py @@ -0,0 +1,204 @@ +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import asyncio +import atexit +import errno +import inspect +import sys +import threading +import warnings +from contextvars import ContextVar +from pathlib import Path +from types import FrameType +from typing import Any, Awaitable, Callable, TypeVar, cast + + +def ensure_dir_exists(path: str | Path, mode: int = 0o777) -> None: + """Ensure that a directory exists + + If it doesn't exist, try to create it, protecting against a race condition + if another process is doing the same. + The default permissions are determined by the current umask. + """ + try: + Path(path).mkdir(parents=True, mode=mode) + except OSError as e: + if e.errno != errno.EEXIST: + raise + if not Path(path).is_dir(): + msg = f"{path!r} exists but is not a directory" + raise OSError(msg) + + +def _get_frame(level: int) -> FrameType | None: + """Get the frame at the given stack level.""" + # sys._getframe is much faster than inspect.stack, but isn't guaranteed to + # exist in all python implementations, so we fall back to inspect.stack() + + # We need to add one to level to account for this get_frame call. + if hasattr(sys, "_getframe"): + frame = sys._getframe(level + 1) + else: + frame = inspect.stack(context=0)[level + 1].frame + return frame + + +# This function is from https://github.com/python/cpython/issues/67998 +# (https://bugs.python.org/file39550/deprecated_module_stacklevel.diff) and +# calculates the appropriate stacklevel for deprecations to target the +# deprecation for the caller, no matter how many internal stack frames we have +# added in the process. For example, with the deprecation warning in the +# __init__ below, the appropriate stacklevel will change depending on how deep +# the inheritance hierarchy is. +def _external_stacklevel(internal: list[str]) -> int: + """Find the stacklevel of the first frame that doesn't contain any of the given internal strings + + The depth will be 1 at minimum in order to start checking at the caller of + the function that called this utility method. + """ + # Get the level of my caller's caller + level = 2 + frame = _get_frame(level) + + # Normalize the path separators: + normalized_internal = [str(Path(s)) for s in internal] + + # climb the stack frames while we see internal frames + while frame and any(s in str(Path(frame.f_code.co_filename)) for s in normalized_internal): + level += 1 + frame = frame.f_back + + # Return the stack level from the perspective of whoever called us (i.e., one level up) + return level - 1 + + +def deprecation(message: str, internal: str | list[str] = "jupyter_core/") -> None: + """Generate a deprecation warning targeting the first frame that is not 'internal' + + internal is a string or list of strings, which if they appear in filenames in the + frames, the frames will be considered internal. Changing this can be useful if, for example, + we know that our internal code is calling out to another library. + """ + _internal: list[str] + _internal = [internal] if isinstance(internal, str) else internal + + # stack level of the first external frame from here + stacklevel = _external_stacklevel(_internal) + + # The call to .warn adds one frame, so bump the stacklevel up by one + warnings.warn(message, DeprecationWarning, stacklevel=stacklevel + 1) + + +T = TypeVar("T") + + +class _TaskRunner: + """A task runner that runs an asyncio event loop on a background thread.""" + + def __init__(self) -> None: + self.__io_loop: asyncio.AbstractEventLoop | None = None + self.__runner_thread: threading.Thread | None = None + self.__lock = threading.Lock() + atexit.register(self._close) + + def _close(self) -> None: + if self.__io_loop: + self.__io_loop.stop() + + def _runner(self) -> None: + loop = self.__io_loop + assert loop is not None + try: + loop.run_forever() + finally: + loop.close() + + def run(self, coro: Any) -> Any: + """Synchronously run a coroutine on a background thread.""" + with self.__lock: + name = f"{threading.current_thread().name} - runner" + if self.__io_loop is None: + self.__io_loop = asyncio.new_event_loop() + self.__runner_thread = threading.Thread(target=self._runner, daemon=True, name=name) + self.__runner_thread.start() + fut = asyncio.run_coroutine_threadsafe(coro, self.__io_loop) + return fut.result(None) + + +_runner_map: dict[str, _TaskRunner] = {} +_loop: ContextVar[asyncio.AbstractEventLoop | None] = ContextVar("_loop", default=None) + + +def run_sync(coro: Callable[..., Awaitable[T]]) -> Callable[..., T]: + """Wraps coroutine in a function that blocks until it has executed. + + Parameters + ---------- + coro : coroutine-function + The coroutine-function to be executed. + + Returns + ------- + result : + Whatever the coroutine-function returns. + """ + + assert inspect.iscoroutinefunction(coro) + + def wrapped(*args: Any, **kwargs: Any) -> Any: + name = threading.current_thread().name + inner = coro(*args, **kwargs) + try: + asyncio.get_running_loop() + except RuntimeError: + # No loop running, run the loop for this thread. + loop = ensure_event_loop() + return loop.run_until_complete(inner) + + # Loop is currently running in this thread, + # use a task runner. + if name not in _runner_map: + _runner_map[name] = _TaskRunner() + return _runner_map[name].run(inner) + + wrapped.__doc__ = coro.__doc__ + return wrapped + + +def ensure_event_loop(prefer_selector_loop: bool = False) -> asyncio.AbstractEventLoop: + # Get the loop for this thread, or create a new one. + loop = _loop.get() + if loop is not None and not loop.is_closed(): + return loop + try: + loop = asyncio.get_running_loop() + except RuntimeError: + if sys.platform == "win32" and prefer_selector_loop: + loop = asyncio.WindowsSelectorEventLoopPolicy().new_event_loop() + else: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + _loop.set(loop) + return loop + + +async def ensure_async(obj: Awaitable[T] | T) -> T: + """Convert a non-awaitable object to a coroutine if needed, + and await it if it was not already awaited. + + This function is meant to be called on the result of calling a function, + when that function could either be asynchronous or not. + """ + if inspect.isawaitable(obj): + obj = cast(Awaitable[T], obj) + try: + result = await obj + except RuntimeError as e: + if str(e) == "cannot reuse already awaited coroutine": + # obj is already the coroutine's result + return cast(T, obj) + raise + return result + return obj diff --git a/lib/python3.10/site-packages/jupyter_core/utils/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyter_core/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..284200514605552a4da6ed997fb49cdc21a420fc Binary files /dev/null and b/lib/python3.10/site-packages/jupyter_core/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyterlab_widgets-3.0.15.dist-info/licenses/LICENSE b/lib/python3.10/site-packages/jupyterlab_widgets-3.0.15.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..75f346eb7436fd3b5f60de36971f7ab35970cbb6 --- /dev/null +++ b/lib/python3.10/site-packages/jupyterlab_widgets-3.0.15.dist-info/licenses/LICENSE @@ -0,0 +1,292 @@ +Copyright (c) 2015 Project Jupyter Contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ + +This package bundles several JavaScript npm packages in the +jupyterlab_widgets/static directory. Their licenses (as packaged in their +distributions in the node_modules package installation directory) are copied +below. + +------------------------------------------------------------------------------ +From css-loader/LICENSE: + +Copyright JS Foundation and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +------------------------------------------------------------------------------ +From style-loader/LICENSE: + +Copyright JS Foundation and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +------------------------------------------------------------------------------ +From backbone/backbone.js + +// (c) 2010-2015 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors +// Backbone may be freely distributed under the MIT license. +// For all details and documentation: +// http://backbonejs.org + +------------------------------------------------------------------------------ +From base-64/LICENSE + +The MIT License (MIT) + +Copyright (c) 2014 Jameson Little + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +------------------------------------------------------------------------------ +From lodash/LICENSE + +Copyright OpenJS Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + +------------------------------------------------------------------------------ +From d3-format/LICENSE: + +Copyright 2010-2015 Mike Bostock +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of contributors may be used to + endorse or promote products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------ +From noUISlider/LICENSE.md (https://github.com/leongersen/noUiSlider/blob/eca62f9e56aaf02f0841b36e7993adf8db3721d5/LICENSE.md) + +MIT License + +Copyright (c) 2019 Léon Gersen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +------------------------------------------------------------------ +From jquery/LICENSE.txt + +Copyright JS Foundation and other contributors, https://js.foundation/ + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +------------------------------------------------------------------ +From semver/LICENSE: + +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +------------------------------------------------------------------ +From underscore/LICENSE + +Copyright (c) 2009-2018 Jeremy Ashkenas, DocumentCloud and Investigative +Reporters & Editors + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/lib/python3.10/site-packages/jupyterlab_widgets/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/jupyterlab_widgets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..356db8417879412d417a15eef760b12929ff31dd Binary files /dev/null and b/lib/python3.10/site-packages/jupyterlab_widgets/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/jupyterlab_widgets/__pycache__/_version.cpython-310.pyc b/lib/python3.10/site-packages/jupyterlab_widgets/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e6b0d9af7bc91fc54eed16d0e77b07396673657 Binary files /dev/null and b/lib/python3.10/site-packages/jupyterlab_widgets/__pycache__/_version.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/kiwisolver/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/kiwisolver/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e52635cbdbee15d60be9f7527656b8e2d931e915 Binary files /dev/null and b/lib/python3.10/site-packages/kiwisolver/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/kiwisolver/__pycache__/exceptions.cpython-310.pyc b/lib/python3.10/site-packages/kiwisolver/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..260f2cb208f96269e5cd1e50f230b510be949033 Binary files /dev/null and b/lib/python3.10/site-packages/kiwisolver/__pycache__/exceptions.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/kiwisolver/_cext.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/kiwisolver/_cext.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0a2972212bece1366bac9f5ac314395408b50d41 --- /dev/null +++ b/lib/python3.10/site-packages/kiwisolver/_cext.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2637add9600ed095e9f7aca3de0cd19c4ad06b4e451c708e9c7f9b92aea37d4a +size 6666480 diff --git a/lib/python3.10/site-packages/lazy_loader/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/lazy_loader/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c115776f830aa997f8c657a8caaa151b042bc6b Binary files /dev/null and b/lib/python3.10/site-packages/lazy_loader/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/lazy_loader/tests/__init__.py b/lib/python3.10/site-packages/lazy_loader/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db927a28d166043ee13ad1321951dd0c5c61e672 Binary files /dev/null and b/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/import_np_parallel.cpython-310.pyc b/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/import_np_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ee3f82713e01c94c963ba81e5a76062c6d9f358 Binary files /dev/null and b/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/import_np_parallel.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/test_lazy_loader.cpython-310.pyc b/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/test_lazy_loader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d71b8ad0fe152af4b5e44f6c07aeb5d2704a3c1 Binary files /dev/null and b/lib/python3.10/site-packages/lazy_loader/tests/__pycache__/test_lazy_loader.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__init__.py b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..540fd731f20cc99ca8570cc4c4b2bf75a166797e --- /dev/null +++ b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__init__.py @@ -0,0 +1,5 @@ +import lazy_loader as lazy + +__getattr__, __lazy_dir__, __all__ = lazy.attach( + __name__, submod_attrs={"some_func": ["some_func"]} +) diff --git a/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__init__.pyi b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d3349bb667ed72831d7b585cce512209cc533c07 --- /dev/null +++ b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__init__.pyi @@ -0,0 +1 @@ +from .some_func import some_func diff --git a/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..713f872cd9e3040bd17ee4fbe903d370e0b0b0a4 Binary files /dev/null and b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__pycache__/some_func.cpython-310.pyc b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__pycache__/some_func.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64cc76988c66456e1922a1fda9730e10ad976a85 Binary files /dev/null and b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/__pycache__/some_func.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/some_func.py b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/some_func.py new file mode 100644 index 0000000000000000000000000000000000000000..10e99ed0ab8be6458a54c62a9c2cf19a5f99145f --- /dev/null +++ b/lib/python3.10/site-packages/lazy_loader/tests/fake_pkg/some_func.py @@ -0,0 +1,3 @@ +def some_func(): + """Function with same name as submodule.""" + pass diff --git a/lib/python3.10/site-packages/lazy_loader/tests/import_np_parallel.py b/lib/python3.10/site-packages/lazy_loader/tests/import_np_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..50dc0d0081001613247d31162d69273b42a2c761 --- /dev/null +++ b/lib/python3.10/site-packages/lazy_loader/tests/import_np_parallel.py @@ -0,0 +1,13 @@ +import threading +import time + +import lazy_loader as lazy + + +def import_np(): + time.sleep(0.5) + lazy.load("numpy") + + +for _ in range(10): + threading.Thread(target=import_np).start() diff --git a/lib/python3.10/site-packages/lazy_loader/tests/test_lazy_loader.py b/lib/python3.10/site-packages/lazy_loader/tests/test_lazy_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..19187bab54b2ba6fa80741ab35d9b510886fe474 --- /dev/null +++ b/lib/python3.10/site-packages/lazy_loader/tests/test_lazy_loader.py @@ -0,0 +1,192 @@ +import importlib +import os +import subprocess +import sys +import types +from unittest import mock + +import pytest + +import lazy_loader as lazy + + +def test_lazy_import_basics(): + math = lazy.load("math") + anything_not_real = lazy.load("anything_not_real") + + # Now test that accessing attributes does what it should + assert math.sin(math.pi) == pytest.approx(0, 1e-6) + # poor-mans pytest.raises for testing errors on attribute access + try: + anything_not_real.pi + raise AssertionError() # Should not get here + except ModuleNotFoundError: + pass + assert isinstance(anything_not_real, lazy.DelayedImportErrorModule) + # see if it changes for second access + try: + anything_not_real.pi + raise AssertionError() # Should not get here + except ModuleNotFoundError: + pass + + +def test_lazy_import_subpackages(): + with pytest.warns(RuntimeWarning): + hp = lazy.load("html.parser") + assert "html" in sys.modules + assert type(sys.modules["html"]) == type(pytest) + assert isinstance(hp, importlib.util._LazyModule) + assert "html.parser" in sys.modules + assert sys.modules["html.parser"] == hp + + +def test_lazy_import_impact_on_sys_modules(): + math = lazy.load("math") + anything_not_real = lazy.load("anything_not_real") + + assert isinstance(math, types.ModuleType) + assert "math" in sys.modules + assert isinstance(anything_not_real, lazy.DelayedImportErrorModule) + assert "anything_not_real" not in sys.modules + + # only do this if numpy is installed + pytest.importorskip("numpy") + np = lazy.load("numpy") + assert isinstance(np, types.ModuleType) + assert "numpy" in sys.modules + + np.pi # trigger load of numpy + + assert isinstance(np, types.ModuleType) + assert "numpy" in sys.modules + + +def test_lazy_import_nonbuiltins(): + np = lazy.load("numpy") + sp = lazy.load("scipy") + if not isinstance(np, lazy.DelayedImportErrorModule): + assert np.sin(np.pi) == pytest.approx(0, 1e-6) + if isinstance(sp, lazy.DelayedImportErrorModule): + try: + sp.pi + raise AssertionError() + except ModuleNotFoundError: + pass + + +def test_lazy_attach(): + name = "mymod" + submods = ["mysubmodule", "anothersubmodule"] + myall = {"not_real_submod": ["some_var_or_func"]} + + locls = { + "attach": lazy.attach, + "name": name, + "submods": submods, + "myall": myall, + } + s = "__getattr__, __lazy_dir__, __all__ = attach(name, submods, myall)" + + exec(s, {}, locls) + expected = { + "attach": lazy.attach, + "name": name, + "submods": submods, + "myall": myall, + "__getattr__": None, + "__lazy_dir__": None, + "__all__": None, + } + assert locls.keys() == expected.keys() + for k, v in expected.items(): + if v is not None: + assert locls[k] == v + + +def test_attach_same_module_and_attr_name(): + from lazy_loader.tests import fake_pkg + + # Grab attribute twice, to ensure that importing it does not + # override function by module + assert isinstance(fake_pkg.some_func, types.FunctionType) + assert isinstance(fake_pkg.some_func, types.FunctionType) + + # Ensure imports from submodule still work + from lazy_loader.tests.fake_pkg.some_func import some_func + + assert isinstance(some_func, types.FunctionType) + + +FAKE_STUB = """ +from . import rank +from ._gaussian import gaussian +from .edges import sobel, scharr, prewitt, roberts +""" + + +def test_stub_loading(tmp_path): + stub = tmp_path / "stub.pyi" + stub.write_text(FAKE_STUB) + _get, _dir, _all = lazy.attach_stub("my_module", str(stub)) + expect = {"gaussian", "sobel", "scharr", "prewitt", "roberts", "rank"} + assert set(_dir()) == set(_all) == expect + + +def test_stub_loading_parity(): + from lazy_loader.tests import fake_pkg + + from_stub = lazy.attach_stub(fake_pkg.__name__, fake_pkg.__file__) + stub_getter, stub_dir, stub_all = from_stub + assert stub_all == fake_pkg.__all__ + assert stub_dir() == fake_pkg.__lazy_dir__() + assert stub_getter("some_func") == fake_pkg.some_func + + +def test_stub_loading_errors(tmp_path): + stub = tmp_path / "stub.pyi" + stub.write_text("from ..mod import func\n") + + with pytest.raises(ValueError, match="Only within-module imports are supported"): + lazy.attach_stub("name", str(stub)) + + with pytest.raises(ValueError, match="Cannot load imports from non-existent stub"): + lazy.attach_stub("name", "not a file") + + stub2 = tmp_path / "stub2.pyi" + stub2.write_text("from .mod import *\n") + with pytest.raises(ValueError, match=".*does not support star import"): + lazy.attach_stub("name", str(stub2)) + + +def test_require_kwarg(): + have_importlib_metadata = importlib.util.find_spec("importlib.metadata") is not None + dot = "." if have_importlib_metadata else "_" + # Test with a module that definitely exists, behavior hinges on requirement + with mock.patch(f"importlib{dot}metadata.version") as version: + version.return_value = "1.0.0" + math = lazy.load("math", require="somepkg >= 2.0") + assert isinstance(math, lazy.DelayedImportErrorModule) + + math = lazy.load("math", require="somepkg >= 1.0") + assert math.sin(math.pi) == pytest.approx(0, 1e-6) + + # We can fail even after a successful import + math = lazy.load("math", require="somepkg >= 2.0") + assert isinstance(math, lazy.DelayedImportErrorModule) + + # When a module can be loaded but the version can't be checked, + # raise a ValueError + with pytest.raises(ValueError): + lazy.load("math", require="somepkg >= 1.0") + + +def test_parallel_load(): + pytest.importorskip("numpy") + + subprocess.run( + [ + sys.executable, + os.path.join(os.path.dirname(__file__), "import_np_parallel.py"), + ] + ) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-config-version.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-config-version.cmake new file mode 100644 index 0000000000000000000000000000000000000000..766886dfce4f42104942671ee00387a3155babaf --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-config-version.cmake @@ -0,0 +1,16 @@ +## Copyright 2009-2021 Intel Corporation +## SPDX-License-Identifier: Apache-2.0 + +SET(PACKAGE_VERSION 4.4.0) + +SET(PACKAGE_VERSION_EXACT 0) +SET(PACKAGE_VERSION_COMPATIBLE 0) + +IF (PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) + SET(PACKAGE_VERSION_EXACT 1) + SET(PACKAGE_VERSION_COMPATIBLE 1) +ENDIF() + +IF (PACKAGE_FIND_VERSION_MAJOR EQUAL 4 AND PACKAGE_FIND_VERSION VERSION_LESS PACKAGE_VERSION) + SET(PACKAGE_VERSION_COMPATIBLE 1) +ENDIF() diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-config.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-config.cmake new file mode 100644 index 0000000000000000000000000000000000000000..8e29b83d3b25f5c9166fbd5f28beaee1e9d0e6d0 --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-config.cmake @@ -0,0 +1,112 @@ +## Copyright 2009-2021 Intel Corporation +## SPDX-License-Identifier: Apache-2.0 + +SET(EMBREE_ROOT_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../") +GET_FILENAME_COMPONENT(EMBREE_ROOT_DIR "${EMBREE_ROOT_DIR}" ABSOLUTE) + +SET(EMBREE_INCLUDE_DIRS "${EMBREE_ROOT_DIR}/include") +SET(EMBREE_LIBRARY "${EMBREE_ROOT_DIR}/lib/libembree4.a") +SET(EMBREE_LIBRARIES ${EMBREE_LIBRARY}) + +SET(EMBREE_VERSION 4.4.0) +SET(EMBREE_VERSION_MAJOR 4) +SET(EMBREE_VERSION_MINOR 4) +SET(EMBREE_VERSION_PATCH 0) +SET(EMBREE_VERSION_NOTE "") + +SET(EMBREE_MAX_ISA DEFAULT) +SET(EMBREE_ISA_SSE2 ON) +SET(EMBREE_ISA_SSE42 OFF) +SET(EMBREE_ISA_AVX OFF) +SET(EMBREE_ISA_AVX2 OFF) +SET(EMBREE_ISA_AVX512 OFF) +SET(EMBREE_ISA_AVX512SKX OFF) # just for compatibility +SET(EMBREE_ISA_NEON OFF) +SET(EMBREE_ISA_NEON2X OFF) + +SET(EMBREE_BUILD_TYPE Release) +SET(EMBREE_ISPC_SUPPORT OFF) +SET(EMBREE_STATIC_LIB ON) +SET(EMBREE_SYCL_SUPPORT OFF) +SET(EMBREE_SYCL_GEOMETRY_CALLBACK OFF) +SET(EMBREE_TUTORIALS OFF) + +SET(EMBREE_RAY_MASK ON) +SET(EMBREE_STAT_COUNTERS OFF) +SET(EMBREE_BACKFACE_CULLING OFF) +SET(EMBREE_FILTER_FUNCTION ON) +SET(EMBREE_IGNORE_INVALID_RAYS OFF) +SET(EMBREE_TASKING_SYSTEM INTERNAL) +SET(EMBREE_TBB_COMPONENT tbb) +SET(EMBREE_COMPACT_POLYS OFF) + +SET(EMBREE_GEOMETRY_TRIANGLE ON) +SET(EMBREE_GEOMETRY_QUAD ON) +SET(EMBREE_GEOMETRY_CURVE ON) +SET(EMBREE_GEOMETRY_SUBDIVISION ON) +SET(EMBREE_GEOMETRY_USER ON) +SET(EMBREE_GEOMETRY_INSTANCE ON) +SET(EMBREE_GEOMETRY_INSTANCE_ARRAY ON) +SET(EMBREE_GEOMETRY_GRID ON) +SET(EMBREE_GEOMETRY_POINT ON) + +SET(EMBREE_RAY_PACKETS ON) +SET(EMBREE_MAX_INSTANCE_LEVEL_COUNT 1) +SET(EMBREE_CURVE_SELF_INTERSECTION_AVOIDANCE_FACTOR 2.0) +SET(EMBREE_DISC_POINT_SELF_INTERSECTION_AVOIDANCE ON) +SET(EMBREE_MIN_WIDTH OFF) + +IF (EMBREE_STATIC_LIB AND (EMBREE_TASKING_SYSTEM STREQUAL "TBB")) + INCLUDE(CMakeFindDependencyMacro) + FIND_DEPENDENCY(TBB) +ENDIF() + +IF (EMBREE_STATIC_LIB) + + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/sys-targets.cmake") + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/math-targets.cmake") + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/simd-targets.cmake") + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/lexers-targets.cmake") + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/tasking-targets.cmake") + + IF (EMBREE_ISA_SSE42) + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/embree_sse42-targets.cmake") + ENDIF() + + IF (EMBREE_ISA_AVX) + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/embree_avx-targets.cmake") + ENDIF() + + IF (EMBREE_ISA_AVX2) + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/embree_avx2-targets.cmake") + ENDIF() + + IF (EMBREE_ISA_AVX512) + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/embree_avx512-targets.cmake") + ENDIF() + +ENDIF() + +IF (EMBREE_SYCL_SUPPORT) + + SET(EMBREE_SYCL_AOT_DEVICES ) + SET(EMBREE_SYCL_LARGEGRF OFF) + SET(EMBREE_SYCL_RT_VALIDATION_API OFF) + IF (EMBREE_SYCL_RT_VALIDATION_API) + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/embree_rthwif_sycl-targets.cmake") + ENDIF() + IF (EMBREE_STATIC_LIB) + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/embree_rthwif-targets.cmake") + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/ze_wrapper-targets.cmake") + ENDIF() + INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/embree_sycl-targets.cmake") + +ENDIF() + +INCLUDE("${EMBREE_ROOT_DIR}/lib/cmake/embree-4.4.0/embree-targets.cmake") + + +SET(EMBREE_TESTING_ONLY_SYCL_TESTS ) +SET(EMBREE_TESTING_INTENSITY 0) +SET(EMBREE_TESTING_MEMCHECK ) +SET(EMBREE_TESTING_BENCHMARK ) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-targets-release.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-targets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..0466b3df647e618ec0859c140feacbc1a576a48d --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-targets-release.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "embree" for configuration "Release" +set_property(TARGET embree APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(embree PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libembree4.a" + ) + +list(APPEND _cmake_import_check_targets embree ) +list(APPEND _cmake_import_check_files_for_embree "${_IMPORT_PREFIX}/lib/libembree4.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-targets.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-targets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..0d99bbe678c146b14f18cb04b4ed6d4f938ec49b --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/embree-targets.cmake @@ -0,0 +1,124 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.12 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.12") + message(FATAL_ERROR "CMake >= 2.8.12 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.12...3.29) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS embree) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target embree +add_library(embree STATIC IMPORTED) + +set_target_properties(embree PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include;${_IMPORT_PREFIX}/include/" + INTERFACE_LINK_LIBRARIES "\$;\$;\$;\$;\$" + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "include/" +) + +# Load information for each installed configuration. +file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/embree-targets-*.cmake") +foreach(_cmake_config_file IN LISTS _cmake_config_files) + include("${_cmake_config_file}") +endforeach() +unset(_cmake_config_file) +unset(_cmake_config_files) + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + if(CMAKE_VERSION VERSION_LESS "3.28" + OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target} + OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}") + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + endif() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# Make sure the targets which have been exported in some other +# export set exist. +unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) +foreach(_target "sys" "math" "simd" "lexers" "tasking" ) + if(NOT TARGET "${_target}" ) + set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}") + endif() +endforeach() + +if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) + if(CMAKE_FIND_PACKAGE_NAME) + set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE) + set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}") + else() + message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}") + endif() +endif() +unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/lexers-targets-release.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/lexers-targets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..72c792e435d7001549c4816f4bbc4bd83d75d387 --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/lexers-targets-release.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "lexers" for configuration "Release" +set_property(TARGET lexers APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(lexers PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/liblexers.a" + ) + +list(APPEND _cmake_import_check_targets lexers ) +list(APPEND _cmake_import_check_files_for_lexers "${_IMPORT_PREFIX}/lib/liblexers.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/lexers-targets.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/lexers-targets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..2973549de90fb518400e235795728bbf5bdac17f --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/lexers-targets.cmake @@ -0,0 +1,122 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.12 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.12") + message(FATAL_ERROR "CMake >= 2.8.12 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.12...3.29) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS lexers) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target lexers +add_library(lexers STATIC IMPORTED) + +set_target_properties(lexers PROPERTIES + INTERFACE_LINK_LIBRARIES "sys;math" +) + +# Load information for each installed configuration. +file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/lexers-targets-*.cmake") +foreach(_cmake_config_file IN LISTS _cmake_config_files) + include("${_cmake_config_file}") +endforeach() +unset(_cmake_config_file) +unset(_cmake_config_files) + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + if(CMAKE_VERSION VERSION_LESS "3.28" + OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target} + OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}") + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + endif() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# Make sure the targets which have been exported in some other +# export set exist. +unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) +foreach(_target "sys" "math" ) + if(NOT TARGET "${_target}" ) + set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}") + endif() +endforeach() + +if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) + if(CMAKE_FIND_PACKAGE_NAME) + set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE) + set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}") + else() + message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}") + endif() +endif() +unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/math-targets-release.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/math-targets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..92ef1c4d6fa48026564309f78f4d44b7232fe353 --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/math-targets-release.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "math" for configuration "Release" +set_property(TARGET math APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(math PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libmath.a" + ) + +list(APPEND _cmake_import_check_targets math ) +list(APPEND _cmake_import_check_files_for_math "${_IMPORT_PREFIX}/lib/libmath.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/math-targets.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/math-targets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..b50bb211afb43387185bd148e987745aa38c2866 --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/math-targets.cmake @@ -0,0 +1,102 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.3 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.3") + message(FATAL_ERROR "CMake >= 2.8.3 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.3...3.29) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS math) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target math +add_library(math STATIC IMPORTED) + +# Load information for each installed configuration. +file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/math-targets-*.cmake") +foreach(_cmake_config_file IN LISTS _cmake_config_files) + include("${_cmake_config_file}") +endforeach() +unset(_cmake_config_file) +unset(_cmake_config_files) + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + if(CMAKE_VERSION VERSION_LESS "3.28" + OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target} + OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}") + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + endif() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/simd-targets-release.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/simd-targets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..6e206b0e5ad67fe151d3cb396081e9bf8865f4ac --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/simd-targets-release.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "simd" for configuration "Release" +set_property(TARGET simd APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(simd PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libsimd.a" + ) + +list(APPEND _cmake_import_check_targets simd ) +list(APPEND _cmake_import_check_files_for_simd "${_IMPORT_PREFIX}/lib/libsimd.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/simd-targets.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/simd-targets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..91c19d01c6605e1e77414f15e7f8d330731aeb25 --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/simd-targets.cmake @@ -0,0 +1,102 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.3 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.3") + message(FATAL_ERROR "CMake >= 2.8.3 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.3...3.29) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS simd) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target simd +add_library(simd STATIC IMPORTED) + +# Load information for each installed configuration. +file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/simd-targets-*.cmake") +foreach(_cmake_config_file IN LISTS _cmake_config_files) + include("${_cmake_config_file}") +endforeach() +unset(_cmake_config_file) +unset(_cmake_config_files) + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + if(CMAKE_VERSION VERSION_LESS "3.28" + OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target} + OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}") + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + endif() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/sys-targets-release.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/sys-targets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..edf5061a7a247dc24f0a9870b88717f8d742cb7a --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/sys-targets-release.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "sys" for configuration "Release" +set_property(TARGET sys APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(sys PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libsys.a" + ) + +list(APPEND _cmake_import_check_targets sys ) +list(APPEND _cmake_import_check_files_for_sys "${_IMPORT_PREFIX}/lib/libsys.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/sys-targets.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/sys-targets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..5ad84f4f60038ed14c6ef043dc4808749a0a0077 --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/sys-targets.cmake @@ -0,0 +1,106 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.12 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.12") + message(FATAL_ERROR "CMake >= 2.8.12 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.12...3.29) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS sys) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target sys +add_library(sys STATIC IMPORTED) + +set_target_properties(sys PROPERTIES + INTERFACE_LINK_LIBRARIES "-lpthread;dl" +) + +# Load information for each installed configuration. +file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/sys-targets-*.cmake") +foreach(_cmake_config_file IN LISTS _cmake_config_files) + include("${_cmake_config_file}") +endforeach() +unset(_cmake_config_file) +unset(_cmake_config_files) + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + if(CMAKE_VERSION VERSION_LESS "3.28" + OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target} + OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}") + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + endif() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/tasking-targets-release.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/tasking-targets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..d46b8fc09bb4f4588e5b60731a0550fe388f28e1 --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/tasking-targets-release.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "tasking" for configuration "Release" +set_property(TARGET tasking APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(tasking PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libtasking.a" + ) + +list(APPEND _cmake_import_check_targets tasking ) +list(APPEND _cmake_import_check_files_for_tasking "${_IMPORT_PREFIX}/lib/libtasking.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/tasking-targets.cmake b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/tasking-targets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..26d736051acba3999818c1f02f8f64b02187d17d --- /dev/null +++ b/lib/python3.10/site-packages/lib/cmake/embree-4.4.0/tasking-targets.cmake @@ -0,0 +1,102 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.3 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.3") + message(FATAL_ERROR "CMake >= 2.8.3 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.3...3.29) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS tasking) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target tasking +add_library(tasking STATIC IMPORTED) + +# Load information for each installed configuration. +file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/tasking-targets-*.cmake") +foreach(_cmake_config_file IN LISTS _cmake_config_files) + include("${_cmake_config_file}") +endforeach() +unset(_cmake_config_file) +unset(_cmake_config_files) + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + if(CMAKE_VERSION VERSION_LESS "3.28" + OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target} + OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}") + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + endif() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/lib/python3.10/site-packages/lib/libembree4.a b/lib/python3.10/site-packages/lib/libembree4.a new file mode 100644 index 0000000000000000000000000000000000000000..b78235c483cee3051d5acf35f301e15faed32c3e --- /dev/null +++ b/lib/python3.10/site-packages/lib/libembree4.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afc7075fd676b39d6fafc889ac49356b98f2c687657608eb4a0d8e7f174cc77f +size 18148084 diff --git a/lib/python3.10/site-packages/lib/liblexers.a b/lib/python3.10/site-packages/lib/liblexers.a new file mode 100644 index 0000000000000000000000000000000000000000..7c59a48b028f9a40f9ade5904ac8821816ff220c --- /dev/null +++ b/lib/python3.10/site-packages/lib/liblexers.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dc16296e04e0ae910f3c59df61f4256196b91480a621df53e20946003468d82 +size 154136 diff --git a/lib/python3.10/site-packages/lib/libsys.a b/lib/python3.10/site-packages/lib/libsys.a new file mode 100644 index 0000000000000000000000000000000000000000..8f749dceda775d33c40eb085b0cdc3a702aabaa0 --- /dev/null +++ b/lib/python3.10/site-packages/lib/libsys.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ede3073d766aa326654bee8919ba98cdde08143404c4fe5e05c34be1aa92e2 +size 154286 diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7d506319b23d7475f04833a679258e09d713bfb Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/_compat.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c45359bda8aa27bb43a839affce47cefe24db22c Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/_compat.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/_punycode.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/_punycode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..269277ca00d4a95eda4fd055f30c9194536cbaec Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/_punycode.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/main.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f57dc62d68cc819dbbec431f71162a94591b237a Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/main.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/parser_block.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/parser_block.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..919d740100b65b6536b9b50d241a8dd894fdb627 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/parser_block.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/parser_core.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/parser_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5de1c5676e0cf4c37034e3018dfb9f1c0a9ea155 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/parser_core.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/parser_inline.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/parser_inline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..208eb127729471d2de9e3fbf67df30d06a245649 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/parser_inline.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/renderer.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/renderer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0906476d06ed24154b9252c83efdc0656a8149ae Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/renderer.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/ruler.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/ruler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ace584e65564c743b38aadf407b8be79241fca5 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/ruler.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/token.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/token.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fe6fde50591755462372f3cc32751970ab84c39 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/token.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/tree.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..685fb0a7f0860086b7fb2a0121c9dd19c016aedb Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/tree.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/__pycache__/utils.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8daa255a974857fef75b2478990db4a2af54f49a Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/__pycache__/utils.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/common/utils.py b/lib/python3.10/site-packages/markdown_it/common/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0d11e3e3eec312779e4c98fd2e6e492b9f7bd24c --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/common/utils.py @@ -0,0 +1,318 @@ +"""Utilities for parsing source text +""" +from __future__ import annotations + +import re +from typing import Match, TypeVar + +from .entities import entities + + +def charCodeAt(src: str, pos: int) -> int | None: + """ + Returns the Unicode value of the character at the specified location. + + @param - index The zero-based index of the desired character. + If there is no character at the specified index, NaN is returned. + + This was added for compatibility with python + """ + try: + return ord(src[pos]) + except IndexError: + return None + + +def charStrAt(src: str, pos: int) -> str | None: + """ + Returns the Unicode value of the character at the specified location. + + @param - index The zero-based index of the desired character. + If there is no character at the specified index, NaN is returned. + + This was added for compatibility with python + """ + try: + return src[pos] + except IndexError: + return None + + +_ItemTV = TypeVar("_ItemTV") + + +def arrayReplaceAt( + src: list[_ItemTV], pos: int, newElements: list[_ItemTV] +) -> list[_ItemTV]: + """ + Remove element from array and put another array at those position. + Useful for some operations with tokens + """ + return src[:pos] + newElements + src[pos + 1 :] + + +def isValidEntityCode(c: int) -> bool: + # broken sequence + if c >= 0xD800 and c <= 0xDFFF: + return False + # never used + if c >= 0xFDD0 and c <= 0xFDEF: + return False + if ((c & 0xFFFF) == 0xFFFF) or ((c & 0xFFFF) == 0xFFFE): + return False + # control codes + if c >= 0x00 and c <= 0x08: + return False + if c == 0x0B: + return False + if c >= 0x0E and c <= 0x1F: + return False + if c >= 0x7F and c <= 0x9F: + return False + # out of range + if c > 0x10FFFF: + return False + return True + + +def fromCodePoint(c: int) -> str: + """Convert ordinal to unicode. + + Note, in the original Javascript two string characters were required, + for codepoints larger than `0xFFFF`. + But Python 3 can represent any unicode codepoint in one character. + """ + return chr(c) + + +# UNESCAPE_MD_RE = re.compile(r'\\([!"#$%&\'()*+,\-.\/:;<=>?@[\\\]^_`{|}~])') +# ENTITY_RE_g = re.compile(r'&([a-z#][a-z0-9]{1,31})', re.IGNORECASE) +UNESCAPE_ALL_RE = re.compile( + r'\\([!"#$%&\'()*+,\-.\/:;<=>?@[\\\]^_`{|}~])' + "|" + r"&([a-z#][a-z0-9]{1,31});", + re.IGNORECASE, +) +DIGITAL_ENTITY_BASE10_RE = re.compile(r"#([0-9]{1,8})") +DIGITAL_ENTITY_BASE16_RE = re.compile(r"#x([a-f0-9]{1,8})", re.IGNORECASE) + + +def replaceEntityPattern(match: str, name: str) -> str: + """Convert HTML entity patterns, + see https://spec.commonmark.org/0.30/#entity-references + """ + if name in entities: + return entities[name] + + code: None | int = None + if pat := DIGITAL_ENTITY_BASE10_RE.fullmatch(name): + code = int(pat.group(1), 10) + elif pat := DIGITAL_ENTITY_BASE16_RE.fullmatch(name): + code = int(pat.group(1), 16) + + if code is not None and isValidEntityCode(code): + return fromCodePoint(code) + + return match + + +def unescapeAll(string: str) -> str: + def replacer_func(match: Match[str]) -> str: + escaped = match.group(1) + if escaped: + return escaped + entity = match.group(2) + return replaceEntityPattern(match.group(), entity) + + if "\\" not in string and "&" not in string: + return string + return UNESCAPE_ALL_RE.sub(replacer_func, string) + + +ESCAPABLE = r"""\\!"#$%&'()*+,./:;<=>?@\[\]^`{}|_~-""" +ESCAPE_CHAR = re.compile(r"\\([" + ESCAPABLE + r"])") + + +def stripEscape(string: str) -> str: + """Strip escape \\ characters""" + return ESCAPE_CHAR.sub(r"\1", string) + + +def escapeHtml(raw: str) -> str: + """Replace special characters "&", "<", ">" and '"' to HTML-safe sequences.""" + # like html.escape, but without escaping single quotes + raw = raw.replace("&", "&") # Must be done first! + raw = raw.replace("<", "<") + raw = raw.replace(">", ">") + raw = raw.replace('"', """) + return raw + + +# ////////////////////////////////////////////////////////////////////////////// + +REGEXP_ESCAPE_RE = re.compile(r"[.?*+^$[\]\\(){}|-]") + + +def escapeRE(string: str) -> str: + string = REGEXP_ESCAPE_RE.sub("\\$&", string) + return string + + +# ////////////////////////////////////////////////////////////////////////////// + + +def isSpace(code: int | None) -> bool: + """Check if character code is a whitespace.""" + return code in (0x09, 0x20) + + +def isStrSpace(ch: str | None) -> bool: + """Check if character is a whitespace.""" + return ch in ("\t", " ") + + +MD_WHITESPACE = { + 0x09, # \t + 0x0A, # \n + 0x0B, # \v + 0x0C, # \f + 0x0D, # \r + 0x20, # space + 0xA0, + 0x1680, + 0x202F, + 0x205F, + 0x3000, +} + + +def isWhiteSpace(code: int) -> bool: + r"""Zs (unicode class) || [\t\f\v\r\n]""" + if code >= 0x2000 and code <= 0x200A: + return True + return code in MD_WHITESPACE + + +# ////////////////////////////////////////////////////////////////////////////// + +UNICODE_PUNCT_RE = re.compile( + r"[!-#%-\*,-\/:;\?@\[-\]_\{\}\xA1\xA7\xAB\xB6\xB7\xBB\xBF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166D\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4E\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]|\uD800[\uDD00-\uDD02\uDF9F\uDFD0]|\uD801\uDD6F|\uD802[\uDC57\uDD1F\uDD3F\uDE50-\uDE58\uDE7F\uDEF0-\uDEF6\uDF39-\uDF3F\uDF99-\uDF9C]|\uD803[\uDF55-\uDF59]|\uD804[\uDC47-\uDC4D\uDCBB\uDCBC\uDCBE-\uDCC1\uDD40-\uDD43\uDD74\uDD75\uDDC5-\uDDC8\uDDCD\uDDDB\uDDDD-\uDDDF\uDE38-\uDE3D\uDEA9]|\uD805[\uDC4B-\uDC4F\uDC5B\uDC5D\uDCC6\uDDC1-\uDDD7\uDE41-\uDE43\uDE60-\uDE6C\uDF3C-\uDF3E]|\uD806[\uDC3B\uDE3F-\uDE46\uDE9A-\uDE9C\uDE9E-\uDEA2]|\uD807[\uDC41-\uDC45\uDC70\uDC71\uDEF7\uDEF8]|\uD809[\uDC70-\uDC74]|\uD81A[\uDE6E\uDE6F\uDEF5\uDF37-\uDF3B\uDF44]|\uD81B[\uDE97-\uDE9A]|\uD82F\uDC9F|\uD836[\uDE87-\uDE8B]|\uD83A[\uDD5E\uDD5F]" # noqa: E501 +) + + +# Currently without astral characters support. +def isPunctChar(ch: str) -> bool: + """Check if character is a punctuation character.""" + return UNICODE_PUNCT_RE.search(ch) is not None + + +MD_ASCII_PUNCT = { + 0x21, # /* ! */ + 0x22, # /* " */ + 0x23, # /* # */ + 0x24, # /* $ */ + 0x25, # /* % */ + 0x26, # /* & */ + 0x27, # /* ' */ + 0x28, # /* ( */ + 0x29, # /* ) */ + 0x2A, # /* * */ + 0x2B, # /* + */ + 0x2C, # /* , */ + 0x2D, # /* - */ + 0x2E, # /* . */ + 0x2F, # /* / */ + 0x3A, # /* : */ + 0x3B, # /* ; */ + 0x3C, # /* < */ + 0x3D, # /* = */ + 0x3E, # /* > */ + 0x3F, # /* ? */ + 0x40, # /* @ */ + 0x5B, # /* [ */ + 0x5C, # /* \ */ + 0x5D, # /* ] */ + 0x5E, # /* ^ */ + 0x5F, # /* _ */ + 0x60, # /* ` */ + 0x7B, # /* { */ + 0x7C, # /* | */ + 0x7D, # /* } */ + 0x7E, # /* ~ */ +} + + +def isMdAsciiPunct(ch: int) -> bool: + """Markdown ASCII punctuation characters. + + :: + + !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \\, ], ^, _, `, {, |, }, or ~ + + See http://spec.commonmark.org/0.15/#ascii-punctuation-character + + Don't confuse with unicode punctuation !!! It lacks some chars in ascii range. + + """ # noqa: E501 + return ch in MD_ASCII_PUNCT + + +def normalizeReference(string: str) -> str: + """Helper to unify [reference labels].""" + # Trim and collapse whitespace + # + string = re.sub(r"\s+", " ", string.strip()) + + # In node v10 'ẞ'.toLowerCase() === 'á¹¾', which is presumed to be a bug + # fixed in v12 (couldn't find any details). + # + # So treat this one as a special case + # (remove this when node v10 is no longer supported). + # + # if ('ẞ'.toLowerCase() === 'á¹¾') { + # str = str.replace(/ẞ/g, 'ß') + # } + + # .toLowerCase().toUpperCase() should get rid of all differences + # between letter variants. + # + # Simple .toLowerCase() doesn't normalize 125 code points correctly, + # and .toUpperCase doesn't normalize 6 of them (list of exceptions: + # İ, Ï´, ẞ, Ω, K, â„« - those are already uppercased, but have differently + # uppercased versions). + # + # Here's an example showing how it happens. Lets take greek letter omega: + # uppercase U+0398 (Θ), U+03f4 (Ï´) and lowercase U+03b8 (θ), U+03d1 (Ï‘) + # + # Unicode entries: + # 0398;GREEK CAPITAL LETTER THETA;Lu;0;L;;;;;N;;;;03B8 + # 03B8;GREEK SMALL LETTER THETA;Ll;0;L;;;;;N;;;0398;;0398 + # 03D1;GREEK THETA SYMBOL;Ll;0;L; 03B8;;;;N;GREEK SMALL LETTER SCRIPT THETA;;0398;;0398 + # 03F4;GREEK CAPITAL THETA SYMBOL;Lu;0;L; 0398;;;;N;;;;03B8 + # + # Case-insensitive comparison should treat all of them as equivalent. + # + # But .toLowerCase() doesn't change Ï‘ (it's already lowercase), + # and .toUpperCase() doesn't change Ï´ (already uppercase). + # + # Applying first lower then upper case normalizes any character: + # '\u0398\u03f4\u03b8\u03d1'.toLowerCase().toUpperCase() === '\u0398\u0398\u0398\u0398' + # + # Note: this is equivalent to unicode case folding; unicode normalization + # is a different step that is not required here. + # + # Final result should be uppercased, because it's later stored in an object + # (this avoid a conflict with Object.prototype members, + # most notably, `__proto__`) + # + return string.lower().upper() + + +LINK_OPEN_RE = re.compile(r"^\s]", flags=re.IGNORECASE) +LINK_CLOSE_RE = re.compile(r"^", flags=re.IGNORECASE) + + +def isLinkOpen(string: str) -> bool: + return bool(LINK_OPEN_RE.search(string)) + + +def isLinkClose(string: str) -> bool: + return bool(LINK_CLOSE_RE.search(string)) diff --git a/lib/python3.10/site-packages/markdown_it/helpers/__init__.py b/lib/python3.10/site-packages/markdown_it/helpers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3dbbdd1d480ecc5ace6529f9005d40d5985529ae --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/helpers/__init__.py @@ -0,0 +1,6 @@ +"""Functions for parsing Links +""" +__all__ = ("parseLinkLabel", "parseLinkDestination", "parseLinkTitle") +from .parse_link_destination import parseLinkDestination +from .parse_link_label import parseLinkLabel +from .parse_link_title import parseLinkTitle diff --git a/lib/python3.10/site-packages/markdown_it/helpers/parse_link_label.py b/lib/python3.10/site-packages/markdown_it/helpers/parse_link_label.py new file mode 100644 index 0000000000000000000000000000000000000000..01c653c56be8a10a6018e8b18ad64af2542ed1ac --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/helpers/parse_link_label.py @@ -0,0 +1,43 @@ +""" +Parse link label + +this function assumes that first character ("[") already matches +returns the end of the label + +""" +from markdown_it.rules_inline import StateInline + + +def parseLinkLabel(state: StateInline, start: int, disableNested: bool = False) -> int: + labelEnd = -1 + oldPos = state.pos + found = False + + state.pos = start + 1 + level = 1 + + while state.pos < state.posMax: + marker = state.src[state.pos] + if marker == "]": + level -= 1 + if level == 0: + found = True + break + + prevPos = state.pos + state.md.inline.skipToken(state) + if marker == "[": + if prevPos == state.pos - 1: + # increase level if we find text `[`, + # which is not a part of any token + level += 1 + elif disableNested: + state.pos = oldPos + return -1 + if found: + labelEnd = state.pos + + # restore old state + state.pos = oldPos + + return labelEnd diff --git a/lib/python3.10/site-packages/markdown_it/helpers/parse_link_title.py b/lib/python3.10/site-packages/markdown_it/helpers/parse_link_title.py new file mode 100644 index 0000000000000000000000000000000000000000..8f589336f60ea16a7fdf73c023cad2e5092d58e3 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/helpers/parse_link_title.py @@ -0,0 +1,60 @@ +"""Parse link title +""" +from ..common.utils import charCodeAt, unescapeAll + + +class _Result: + __slots__ = ("ok", "pos", "lines", "str") + + def __init__(self) -> None: + self.ok = False + self.pos = 0 + self.lines = 0 + self.str = "" + + def __str__(self) -> str: + return self.str + + +def parseLinkTitle(string: str, pos: int, maximum: int) -> _Result: + lines = 0 + start = pos + result = _Result() + + if pos >= maximum: + return result + + marker = charCodeAt(string, pos) + + # /* " */ /* ' */ /* ( */ + if marker != 0x22 and marker != 0x27 and marker != 0x28: + return result + + pos += 1 + + # if opening marker is "(", switch it to closing marker ")" + if marker == 0x28: + marker = 0x29 + + while pos < maximum: + code = charCodeAt(string, pos) + if code == marker: + title = string[start + 1 : pos] + title = unescapeAll(title) + result.pos = pos + 1 + result.lines = lines + result.str = title + result.ok = True + return result + elif code == 0x28 and marker == 0x29: # /* ( */ /* ) */ + return result + elif code == 0x0A: + lines += 1 + elif code == 0x5C and pos + 1 < maximum: # /* \ */ + pos += 1 + if charCodeAt(string, pos) == 0x0A: + lines += 1 + + pos += 1 + + return result diff --git a/lib/python3.10/site-packages/markdown_it/presets/__init__.py b/lib/python3.10/site-packages/markdown_it/presets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6796a2d29eb524c5f35df79055d0969fca02ba --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/presets/__init__.py @@ -0,0 +1,28 @@ +__all__ = ("commonmark", "default", "zero", "js_default", "gfm_like") + +from . import commonmark, default, zero +from ..utils import PresetType + +js_default = default + + +class gfm_like: # noqa: N801 + """GitHub Flavoured Markdown (GFM) like. + + This adds the linkify, table and strikethrough components to CommmonMark. + + Note, it lacks task-list items and raw HTML filtering, + to meet the the full GFM specification + (see https://github.github.com/gfm/#autolinks-extension-). + """ + + @staticmethod + def make() -> PresetType: + config = commonmark.make() + config["components"]["core"]["rules"].append("linkify") + config["components"]["block"]["rules"].append("table") + config["components"]["inline"]["rules"].extend(["strikethrough", "linkify"]) + config["components"]["inline"]["rules2"].append("strikethrough") + config["options"]["linkify"] = True + config["options"]["html"] = True + return config diff --git a/lib/python3.10/site-packages/markdown_it/presets/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/presets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1091b709c5e0ae7dd4bae9f6eec4d7d1ad55023d Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/presets/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/presets/__pycache__/commonmark.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/presets/__pycache__/commonmark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f00fcb8861391d27c1a05736d3af1002726dae59 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/presets/__pycache__/commonmark.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/presets/__pycache__/default.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/presets/__pycache__/default.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bcaf714808f61702a45f2f646ecb625448f5c1d Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/presets/__pycache__/default.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/presets/__pycache__/zero.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/presets/__pycache__/zero.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d9ea4e1eb300253b50027a56ecdf080c80f9ad9 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/presets/__pycache__/zero.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/presets/commonmark.py b/lib/python3.10/site-packages/markdown_it/presets/commonmark.py new file mode 100644 index 0000000000000000000000000000000000000000..3990d4344aeb9e07449acf8aa749cb27b0a0e66c --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/presets/commonmark.py @@ -0,0 +1,74 @@ +"""Commonmark default options. + +This differs to presets.default, +primarily in that it allows HTML and does not enable components: + +- block: table +- inline: strikethrough +""" +from ..utils import PresetType + + +def make() -> PresetType: + return { + "options": { + "maxNesting": 20, # Internal protection, recursion limit + "html": True, # Enable HTML tags in source, + # this is just a shorthand for .enable(["html_inline", "html_block"]) + # used by the linkify rule: + "linkify": False, # autoconvert URL-like texts to links + # used by the replacements and smartquotes rules + # Enable some language-neutral replacements + quotes beautification + "typographer": False, + # used by the smartquotes rule: + # Double + single quotes replacement pairs, when typographer enabled, + # and smartquotes on. Could be either a String or an Array. + # + # For example, you can use '«»„“' for Russian, '„“‚‘' for German, + # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp). + "quotes": "\u201c\u201d\u2018\u2019", # /* “â€â€˜â€™ */ + # Renderer specific; these options are used directly in the HTML renderer + "xhtmlOut": True, # Use '/' to close single tags (
) + "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks + # Highlighter function. Should return escaped HTML, + # or '' if the source string is not changed and should be escaped externally. + # If result starts with PresetType: + return { + "options": { + "maxNesting": 100, # Internal protection, recursion limit + "html": False, # Enable HTML tags in source + # this is just a shorthand for .disable(["html_inline", "html_block"]) + # used by the linkify rule: + "linkify": False, # autoconvert URL-like texts to links + # used by the replacements and smartquotes rules: + # Enable some language-neutral replacements + quotes beautification + "typographer": False, + # used by the smartquotes rule: + # Double + single quotes replacement pairs, when typographer enabled, + # and smartquotes on. Could be either a String or an Array. + # For example, you can use '«»„“' for Russian, '„“‚‘' for German, + # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp). + "quotes": "\u201c\u201d\u2018\u2019", # /* “â€â€˜â€™ */ + # Renderer specific; these options are used directly in the HTML renderer + "xhtmlOut": False, # Use '/' to close single tags (
) + "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks + # Highlighter function. Should return escaped HTML, + # or '' if the source string is not changed and should be escaped externally. + # If result starts with PresetType: + return { + "options": { + "maxNesting": 20, # Internal protection, recursion limit + "html": False, # Enable HTML tags in source + # this is just a shorthand for .disable(["html_inline", "html_block"]) + # used by the linkify rule: + "linkify": False, # autoconvert URL-like texts to links + # used by the replacements and smartquotes rules: + # Enable some language-neutral replacements + quotes beautification + "typographer": False, + # used by the smartquotes rule: + # Double + single quotes replacement pairs, when typographer enabled, + # and smartquotes on. Could be either a String or an Array. + # For example, you can use '«»„“' for Russian, '„“‚‘' for German, + # and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp). + "quotes": "\u201c\u201d\u2018\u2019", # /* “â€â€˜â€™ */ + # Renderer specific; these options are used directly in the HTML renderer + "xhtmlOut": False, # Use '/' to close single tags (
) + "breaks": False, # Convert '\n' in paragraphs into
+ "langPrefix": "language-", # CSS language prefix for fenced blocks + # Highlighter function. Should return escaped HTML, + # or '' if the source string is not changed and should be escaped externally. + # If result starts with bool: + LOGGER.debug( + "entering blockquote: %s, %s, %s, %s", state, startLine, endLine, silent + ) + + oldLineMax = state.lineMax + pos = state.bMarks[startLine] + state.tShift[startLine] + max = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + # check the block quote marker + try: + if state.src[pos] != ">": + return False + except IndexError: + return False + pos += 1 + + # we know that it's going to be a valid blockquote, + # so no point trying to find the end of it in silent mode + if silent: + return True + + # set offset past spaces and ">" + initial = offset = state.sCount[startLine] + 1 + + try: + second_char: str | None = state.src[pos] + except IndexError: + second_char = None + + # skip one optional space after '>' + if second_char == " ": + # ' > test ' + # ^ -- position start of line here: + pos += 1 + initial += 1 + offset += 1 + adjustTab = False + spaceAfterMarker = True + elif second_char == "\t": + spaceAfterMarker = True + + if (state.bsCount[startLine] + offset) % 4 == 3: + # ' >\t test ' + # ^ -- position start of line here (tab has width==1) + pos += 1 + initial += 1 + offset += 1 + adjustTab = False + else: + # ' >\t test ' + # ^ -- position start of line here + shift bsCount slightly + # to make extra space appear + adjustTab = True + + else: + spaceAfterMarker = False + + oldBMarks = [state.bMarks[startLine]] + state.bMarks[startLine] = pos + + while pos < max: + ch = state.src[pos] + + if isStrSpace(ch): + if ch == "\t": + offset += ( + 4 + - (offset + state.bsCount[startLine] + (1 if adjustTab else 0)) % 4 + ) + else: + offset += 1 + + else: + break + + pos += 1 + + oldBSCount = [state.bsCount[startLine]] + state.bsCount[startLine] = ( + state.sCount[startLine] + 1 + (1 if spaceAfterMarker else 0) + ) + + lastLineEmpty = pos >= max + + oldSCount = [state.sCount[startLine]] + state.sCount[startLine] = offset - initial + + oldTShift = [state.tShift[startLine]] + state.tShift[startLine] = pos - state.bMarks[startLine] + + terminatorRules = state.md.block.ruler.getRules("blockquote") + + oldParentType = state.parentType + state.parentType = "blockquote" + + # Search the end of the block + # + # Block ends with either: + # 1. an empty line outside: + # ``` + # > test + # + # ``` + # 2. an empty line inside: + # ``` + # > + # test + # ``` + # 3. another tag: + # ``` + # > test + # - - - + # ``` + + # for (nextLine = startLine + 1; nextLine < endLine; nextLine++) { + nextLine = startLine + 1 + while nextLine < endLine: + # check if it's outdented, i.e. it's inside list item and indented + # less than said list item: + # + # ``` + # 1. anything + # > current blockquote + # 2. checking this line + # ``` + isOutdented = state.sCount[nextLine] < state.blkIndent + + pos = state.bMarks[nextLine] + state.tShift[nextLine] + max = state.eMarks[nextLine] + + if pos >= max: + # Case 1: line is not inside the blockquote, and this line is empty. + break + + evaluatesTrue = state.src[pos] == ">" and not isOutdented + pos += 1 + if evaluatesTrue: + # This line is inside the blockquote. + + # set offset past spaces and ">" + initial = offset = state.sCount[nextLine] + 1 + + try: + next_char: str | None = state.src[pos] + except IndexError: + next_char = None + + # skip one optional space after '>' + if next_char == " ": + # ' > test ' + # ^ -- position start of line here: + pos += 1 + initial += 1 + offset += 1 + adjustTab = False + spaceAfterMarker = True + elif next_char == "\t": + spaceAfterMarker = True + + if (state.bsCount[nextLine] + offset) % 4 == 3: + # ' >\t test ' + # ^ -- position start of line here (tab has width==1) + pos += 1 + initial += 1 + offset += 1 + adjustTab = False + else: + # ' >\t test ' + # ^ -- position start of line here + shift bsCount slightly + # to make extra space appear + adjustTab = True + + else: + spaceAfterMarker = False + + oldBMarks.append(state.bMarks[nextLine]) + state.bMarks[nextLine] = pos + + while pos < max: + ch = state.src[pos] + + if isStrSpace(ch): + if ch == "\t": + offset += ( + 4 + - ( + offset + + state.bsCount[nextLine] + + (1 if adjustTab else 0) + ) + % 4 + ) + else: + offset += 1 + else: + break + + pos += 1 + + lastLineEmpty = pos >= max + + oldBSCount.append(state.bsCount[nextLine]) + state.bsCount[nextLine] = ( + state.sCount[nextLine] + 1 + (1 if spaceAfterMarker else 0) + ) + + oldSCount.append(state.sCount[nextLine]) + state.sCount[nextLine] = offset - initial + + oldTShift.append(state.tShift[nextLine]) + state.tShift[nextLine] = pos - state.bMarks[nextLine] + + nextLine += 1 + continue + + # Case 2: line is not inside the blockquote, and the last line was empty. + if lastLineEmpty: + break + + # Case 3: another tag found. + terminate = False + + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + + if terminate: + # Quirk to enforce "hard termination mode" for paragraphs; + # normally if you call `tokenize(state, startLine, nextLine)`, + # paragraphs will look below nextLine for paragraph continuation, + # but if blockquote is terminated by another tag, they shouldn't + state.lineMax = nextLine + + if state.blkIndent != 0: + # state.blkIndent was non-zero, we now set it to zero, + # so we need to re-calculate all offsets to appear as + # if indent wasn't changed + oldBMarks.append(state.bMarks[nextLine]) + oldBSCount.append(state.bsCount[nextLine]) + oldTShift.append(state.tShift[nextLine]) + oldSCount.append(state.sCount[nextLine]) + state.sCount[nextLine] -= state.blkIndent + + break + + oldBMarks.append(state.bMarks[nextLine]) + oldBSCount.append(state.bsCount[nextLine]) + oldTShift.append(state.tShift[nextLine]) + oldSCount.append(state.sCount[nextLine]) + + # A negative indentation means that this is a paragraph continuation + # + state.sCount[nextLine] = -1 + + nextLine += 1 + + oldIndent = state.blkIndent + state.blkIndent = 0 + + token = state.push("blockquote_open", "blockquote", 1) + token.markup = ">" + token.map = lines = [startLine, 0] + + state.md.block.tokenize(state, startLine, nextLine) + + token = state.push("blockquote_close", "blockquote", -1) + token.markup = ">" + + state.lineMax = oldLineMax + state.parentType = oldParentType + lines[1] = state.line + + # Restore original tShift; this might not be necessary since the parser + # has already been here, but just to make sure we can do that. + for i, item in enumerate(oldTShift): + state.bMarks[i + startLine] = oldBMarks[i] + state.tShift[i + startLine] = item + state.sCount[i + startLine] = oldSCount[i] + state.bsCount[i + startLine] = oldBSCount[i] + + state.blkIndent = oldIndent + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/code.py b/lib/python3.10/site-packages/markdown_it/rules_block/code.py new file mode 100644 index 0000000000000000000000000000000000000000..89db9cec4e0fda2798446940397a965a2083f1a3 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/code.py @@ -0,0 +1,35 @@ +"""Code block (4 spaces padded).""" +import logging + +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def code(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering code: %s, %s, %s, %s", state, startLine, endLine, silent) + + if not state.is_code_block(startLine): + return False + + last = nextLine = startLine + 1 + + while nextLine < endLine: + if state.isEmpty(nextLine): + nextLine += 1 + continue + + if state.is_code_block(nextLine): + nextLine += 1 + last = nextLine + continue + + break + + state.line = last + + token = state.push("code_block", "code", 0) + token.content = state.getLines(startLine, last, 4 + state.blkIndent, False) + "\n" + token.map = [startLine, state.line] + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/fence.py b/lib/python3.10/site-packages/markdown_it/rules_block/fence.py new file mode 100644 index 0000000000000000000000000000000000000000..263f1b8de8dcdd0dd736eeafab2d9da34ec2c205 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/fence.py @@ -0,0 +1,101 @@ +# fences (``` lang, ~~~ lang) +import logging + +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def fence(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering fence: %s, %s, %s, %s", state, startLine, endLine, silent) + + haveEndMarker = False + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + if pos + 3 > maximum: + return False + + marker = state.src[pos] + + if marker not in ("~", "`"): + return False + + # scan marker length + mem = pos + pos = state.skipCharsStr(pos, marker) + + length = pos - mem + + if length < 3: + return False + + markup = state.src[mem:pos] + params = state.src[pos:maximum] + + if marker == "`" and marker in params: + return False + + # Since start is found, we can report success here in validation mode + if silent: + return True + + # search end of block + nextLine = startLine + + while True: + nextLine += 1 + if nextLine >= endLine: + # unclosed block should be autoclosed by end of document. + # also block seems to be autoclosed by end of parent + break + + pos = mem = state.bMarks[nextLine] + state.tShift[nextLine] + maximum = state.eMarks[nextLine] + + if pos < maximum and state.sCount[nextLine] < state.blkIndent: + # non-empty line with negative indent should stop the list: + # - ``` + # test + break + + try: + if state.src[pos] != marker: + continue + except IndexError: + break + + if state.is_code_block(nextLine): + continue + + pos = state.skipCharsStr(pos, marker) + + # closing code fence must be at least as long as the opening one + if pos - mem < length: + continue + + # make sure tail has spaces only + pos = state.skipSpaces(pos) + + if pos < maximum: + continue + + haveEndMarker = True + # found! + break + + # If a fence has heading spaces, they should be removed from its inner block + length = state.sCount[startLine] + + state.line = nextLine + (1 if haveEndMarker else 0) + + token = state.push("fence", "code", 0) + token.info = params + token.content = state.getLines(startLine + 1, nextLine, length, True) + token.markup = markup + token.map = [startLine, state.line] + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/heading.py b/lib/python3.10/site-packages/markdown_it/rules_block/heading.py new file mode 100644 index 0000000000000000000000000000000000000000..850ffb504d1b1aa0198672573ec61ff3ab3d0ed7 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/heading.py @@ -0,0 +1,68 @@ +""" Atex heading (#, ##, ...) """ +from __future__ import annotations + +import logging + +from ..common.utils import isStrSpace +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def heading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering heading: %s, %s, %s, %s", state, startLine, endLine, silent) + + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + ch: str | None = state.src[pos] + + if ch != "#" or pos >= maximum: + return False + + # count heading level + level = 1 + pos += 1 + try: + ch = state.src[pos] + except IndexError: + ch = None + while ch == "#" and pos < maximum and level <= 6: + level += 1 + pos += 1 + try: + ch = state.src[pos] + except IndexError: + ch = None + + if level > 6 or (pos < maximum and not isStrSpace(ch)): + return False + + if silent: + return True + + # Let's cut tails like ' ### ' from the end of string + + maximum = state.skipSpacesBack(maximum, pos) + tmp = state.skipCharsStrBack(maximum, "#", pos) + if tmp > pos and isStrSpace(state.src[tmp - 1]): + maximum = tmp + + state.line = startLine + 1 + + token = state.push("heading_open", "h" + str(level), 1) + token.markup = "########"[:level] + token.map = [startLine, state.line] + + token = state.push("inline", "", 0) + token.content = state.src[pos:maximum].strip() + token.map = [startLine, state.line] + token.children = [] + + token = state.push("heading_close", "h" + str(level), -1) + token.markup = "########"[:level] + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/hr.py b/lib/python3.10/site-packages/markdown_it/rules_block/hr.py new file mode 100644 index 0000000000000000000000000000000000000000..16df05f2ab2e9e171b4f3f5612bba62aab78b2a3 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/hr.py @@ -0,0 +1,55 @@ +"""Horizontal rule + +At least 3 of these characters on a line * - _ +""" +import logging + +from ..common.utils import isStrSpace +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def hr(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering hr: %s, %s, %s, %s", state, startLine, endLine, silent) + + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + try: + marker = state.src[pos] + except IndexError: + return False + pos += 1 + + # Check hr marker + if marker not in ("*", "-", "_"): + return False + + # markers can be mixed with spaces, but there should be at least 3 of them + + cnt = 1 + while pos < maximum: + ch = state.src[pos] + pos += 1 + if ch != marker and not isStrSpace(ch): + return False + if ch == marker: + cnt += 1 + + if cnt < 3: + return False + + if silent: + return True + + state.line = startLine + 1 + + token = state.push("hr", "hr", 0) + token.map = [startLine, state.line] + token.markup = marker * (cnt + 1) + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/html_block.py b/lib/python3.10/site-packages/markdown_it/rules_block/html_block.py new file mode 100644 index 0000000000000000000000000000000000000000..3d43f6ee1deb527a42f4d99da40bd052d9b02886 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/html_block.py @@ -0,0 +1,90 @@ +# HTML block +from __future__ import annotations + +import logging +import re + +from ..common.html_blocks import block_names +from ..common.html_re import HTML_OPEN_CLOSE_TAG_STR +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + +# An array of opening and corresponding closing sequences for html tags, +# last argument defines whether it can terminate a paragraph or not +HTML_SEQUENCES: list[tuple[re.Pattern[str], re.Pattern[str], bool]] = [ + ( + re.compile(r"^<(script|pre|style|textarea)(?=(\s|>|$))", re.IGNORECASE), + re.compile(r"<\/(script|pre|style|textarea)>", re.IGNORECASE), + True, + ), + (re.compile(r"^"), True), + (re.compile(r"^<\?"), re.compile(r"\?>"), True), + (re.compile(r"^"), True), + (re.compile(r"^"), True), + ( + re.compile("^|$))", re.IGNORECASE), + re.compile(r"^$"), + True, + ), + (re.compile(HTML_OPEN_CLOSE_TAG_STR + "\\s*$"), re.compile(r"^$"), False), +] + + +def html_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug( + "entering html_block: %s, %s, %s, %s", state, startLine, endLine, silent + ) + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + if state.is_code_block(startLine): + return False + + if not state.md.options.get("html", None): + return False + + if state.src[pos] != "<": + return False + + lineText = state.src[pos:maximum] + + html_seq = None + for HTML_SEQUENCE in HTML_SEQUENCES: + if HTML_SEQUENCE[0].search(lineText): + html_seq = HTML_SEQUENCE + break + + if not html_seq: + return False + + if silent: + # true if this sequence can be a terminator, false otherwise + return html_seq[2] + + nextLine = startLine + 1 + + # If we are here - we detected HTML block. + # Let's roll down till block end. + if not html_seq[1].search(lineText): + while nextLine < endLine: + if state.sCount[nextLine] < state.blkIndent: + break + + pos = state.bMarks[nextLine] + state.tShift[nextLine] + maximum = state.eMarks[nextLine] + lineText = state.src[pos:maximum] + + if html_seq[1].search(lineText): + if len(lineText) != 0: + nextLine += 1 + break + nextLine += 1 + + state.line = nextLine + + token = state.push("html_block", "", 0) + token.map = [startLine, nextLine] + token.content = state.getLines(startLine, nextLine, state.blkIndent, True) + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/lheading.py b/lib/python3.10/site-packages/markdown_it/rules_block/lheading.py new file mode 100644 index 0000000000000000000000000000000000000000..3522207abb680510decdd6c54d0be81401128ad7 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/lheading.py @@ -0,0 +1,86 @@ +# lheading (---, ==) +import logging + +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def lheading(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering lheading: %s, %s, %s, %s", state, startLine, endLine, silent) + + level = None + nextLine = startLine + 1 + ruler = state.md.block.ruler + terminatorRules = ruler.getRules("paragraph") + + if state.is_code_block(startLine): + return False + + oldParentType = state.parentType + state.parentType = "paragraph" # use paragraph to match terminatorRules + + # jump line-by-line until empty one or EOF + while nextLine < endLine and not state.isEmpty(nextLine): + # this would be a code block normally, but after paragraph + # it's considered a lazy continuation regardless of what's there + if state.sCount[nextLine] - state.blkIndent > 3: + nextLine += 1 + continue + + # Check for underline in setext header + if state.sCount[nextLine] >= state.blkIndent: + pos = state.bMarks[nextLine] + state.tShift[nextLine] + maximum = state.eMarks[nextLine] + + if pos < maximum: + marker = state.src[pos] + + if marker in ("-", "="): + pos = state.skipCharsStr(pos, marker) + pos = state.skipSpaces(pos) + + # /* = */ + if pos >= maximum: + level = 1 if marker == "=" else 2 + break + + # quirk for blockquotes, this line should already be checked by that rule + if state.sCount[nextLine] < 0: + nextLine += 1 + continue + + # Some tags can terminate paragraph without empty line. + terminate = False + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + if terminate: + break + + nextLine += 1 + + if not level: + # Didn't find valid underline + return False + + content = state.getLines(startLine, nextLine, state.blkIndent, False).strip() + + state.line = nextLine + 1 + + token = state.push("heading_open", "h" + str(level), 1) + token.markup = marker + token.map = [startLine, state.line] + + token = state.push("inline", "", 0) + token.content = content + token.map = [startLine, state.line - 1] + token.children = [] + + token = state.push("heading_close", "h" + str(level), -1) + token.markup = marker + + state.parentType = oldParentType + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/list.py b/lib/python3.10/site-packages/markdown_it/rules_block/list.py new file mode 100644 index 0000000000000000000000000000000000000000..d8070d747035dd6b43f11c4bd88d05533b22bc5b --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/list.py @@ -0,0 +1,345 @@ +# Lists +import logging + +from ..common.utils import isStrSpace +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +# Search `[-+*][\n ]`, returns next pos after marker on success +# or -1 on fail. +def skipBulletListMarker(state: StateBlock, startLine: int) -> int: + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + + try: + marker = state.src[pos] + except IndexError: + return -1 + pos += 1 + + if marker not in ("*", "-", "+"): + return -1 + + if pos < maximum: + ch = state.src[pos] + + if not isStrSpace(ch): + # " -test " - is not a list item + return -1 + + return pos + + +# Search `\d+[.)][\n ]`, returns next pos after marker on success +# or -1 on fail. +def skipOrderedListMarker(state: StateBlock, startLine: int) -> int: + start = state.bMarks[startLine] + state.tShift[startLine] + pos = start + maximum = state.eMarks[startLine] + + # List marker should have at least 2 chars (digit + dot) + if pos + 1 >= maximum: + return -1 + + ch = state.src[pos] + pos += 1 + + ch_ord = ord(ch) + # /* 0 */ /* 9 */ + if ch_ord < 0x30 or ch_ord > 0x39: + return -1 + + while True: + # EOL -> fail + if pos >= maximum: + return -1 + + ch = state.src[pos] + pos += 1 + + # /* 0 */ /* 9 */ + ch_ord = ord(ch) + if ch_ord >= 0x30 and ch_ord <= 0x39: + # List marker should have no more than 9 digits + # (prevents integer overflow in browsers) + if pos - start >= 10: + return -1 + + continue + + # found valid marker + if ch in (")", "."): + break + + return -1 + + if pos < maximum: + ch = state.src[pos] + + if not isStrSpace(ch): + # " 1.test " - is not a list item + return -1 + + return pos + + +def markTightParagraphs(state: StateBlock, idx: int) -> None: + level = state.level + 2 + + i = idx + 2 + length = len(state.tokens) - 2 + while i < length: + if state.tokens[i].level == level and state.tokens[i].type == "paragraph_open": + state.tokens[i + 2].hidden = True + state.tokens[i].hidden = True + i += 2 + i += 1 + + +def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug("entering list: %s, %s, %s, %s", state, startLine, endLine, silent) + + isTerminatingParagraph = False + tight = True + + if state.is_code_block(startLine): + return False + + # Special case: + # - item 1 + # - item 2 + # - item 3 + # - item 4 + # - this one is a paragraph continuation + if ( + state.listIndent >= 0 + and state.sCount[startLine] - state.listIndent >= 4 + and state.sCount[startLine] < state.blkIndent + ): + return False + + # limit conditions when list can interrupt + # a paragraph (validation mode only) + # Next list item should still terminate previous list item + # + # This code can fail if plugins use blkIndent as well as lists, + # but I hope the spec gets fixed long before that happens. + # + if ( + silent + and state.parentType == "paragraph" + and state.sCount[startLine] >= state.blkIndent + ): + isTerminatingParagraph = True + + # Detect list type and position after marker + posAfterMarker = skipOrderedListMarker(state, startLine) + if posAfterMarker >= 0: + isOrdered = True + start = state.bMarks[startLine] + state.tShift[startLine] + markerValue = int(state.src[start : posAfterMarker - 1]) + + # If we're starting a new ordered list right after + # a paragraph, it should start with 1. + if isTerminatingParagraph and markerValue != 1: + return False + else: + posAfterMarker = skipBulletListMarker(state, startLine) + if posAfterMarker >= 0: + isOrdered = False + else: + return False + + # If we're starting a new unordered list right after + # a paragraph, first line should not be empty. + if ( + isTerminatingParagraph + and state.skipSpaces(posAfterMarker) >= state.eMarks[startLine] + ): + return False + + # We should terminate list on style change. Remember first one to compare. + markerChar = state.src[posAfterMarker - 1] + + # For validation mode we can terminate immediately + if silent: + return True + + # Start list + listTokIdx = len(state.tokens) + + if isOrdered: + token = state.push("ordered_list_open", "ol", 1) + if markerValue != 1: + token.attrs = {"start": markerValue} + + else: + token = state.push("bullet_list_open", "ul", 1) + + token.map = listLines = [startLine, 0] + token.markup = markerChar + + # + # Iterate list items + # + + nextLine = startLine + prevEmptyEnd = False + terminatorRules = state.md.block.ruler.getRules("list") + + oldParentType = state.parentType + state.parentType = "list" + + while nextLine < endLine: + pos = posAfterMarker + maximum = state.eMarks[nextLine] + + initial = offset = ( + state.sCount[nextLine] + + posAfterMarker + - (state.bMarks[startLine] + state.tShift[startLine]) + ) + + while pos < maximum: + ch = state.src[pos] + + if ch == "\t": + offset += 4 - (offset + state.bsCount[nextLine]) % 4 + elif ch == " ": + offset += 1 + else: + break + + pos += 1 + + contentStart = pos + + # trimming space in "- \n 3" case, indent is 1 here + indentAfterMarker = 1 if contentStart >= maximum else offset - initial + + # If we have more than 4 spaces, the indent is 1 + # (the rest is just indented code block) + if indentAfterMarker > 4: + indentAfterMarker = 1 + + # " - test" + # ^^^^^ - calculating total length of this thing + indent = initial + indentAfterMarker + + # Run subparser & write tokens + token = state.push("list_item_open", "li", 1) + token.markup = markerChar + token.map = itemLines = [startLine, 0] + if isOrdered: + token.info = state.src[start : posAfterMarker - 1] + + # change current state, then restore it after parser subcall + oldTight = state.tight + oldTShift = state.tShift[startLine] + oldSCount = state.sCount[startLine] + + # - example list + # ^ listIndent position will be here + # ^ blkIndent position will be here + # + oldListIndent = state.listIndent + state.listIndent = state.blkIndent + state.blkIndent = indent + + state.tight = True + state.tShift[startLine] = contentStart - state.bMarks[startLine] + state.sCount[startLine] = offset + + if contentStart >= maximum and state.isEmpty(startLine + 1): + # workaround for this case + # (list item is empty, list terminates before "foo"): + # ~~~~~~~~ + # - + # + # foo + # ~~~~~~~~ + state.line = min(state.line + 2, endLine) + else: + # NOTE in list.js this was: + # state.md.block.tokenize(state, startLine, endLine, True) + # but tokeniz does not take the final parameter + state.md.block.tokenize(state, startLine, endLine) + + # If any of list item is tight, mark list as tight + if (not state.tight) or prevEmptyEnd: + tight = False + + # Item become loose if finish with empty line, + # but we should filter last element, because it means list finish + prevEmptyEnd = (state.line - startLine) > 1 and state.isEmpty(state.line - 1) + + state.blkIndent = state.listIndent + state.listIndent = oldListIndent + state.tShift[startLine] = oldTShift + state.sCount[startLine] = oldSCount + state.tight = oldTight + + token = state.push("list_item_close", "li", -1) + token.markup = markerChar + + nextLine = startLine = state.line + itemLines[1] = nextLine + + if nextLine >= endLine: + break + + contentStart = state.bMarks[startLine] + + # + # Try to check if list is terminated or continued. + # + if state.sCount[nextLine] < state.blkIndent: + break + + if state.is_code_block(startLine): + break + + # fail if terminating block found + terminate = False + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + + if terminate: + break + + # fail if list has another type + if isOrdered: + posAfterMarker = skipOrderedListMarker(state, nextLine) + if posAfterMarker < 0: + break + start = state.bMarks[nextLine] + state.tShift[nextLine] + else: + posAfterMarker = skipBulletListMarker(state, nextLine) + if posAfterMarker < 0: + break + + if markerChar != state.src[posAfterMarker - 1]: + break + + # Finalize list + if isOrdered: + token = state.push("ordered_list_close", "ol", -1) + else: + token = state.push("bullet_list_close", "ul", -1) + + token.markup = markerChar + + listLines[1] = nextLine + state.line = nextLine + + state.parentType = oldParentType + + # mark paragraphs tight if needed + if tight: + markTightParagraphs(state, listTokIdx) + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/paragraph.py b/lib/python3.10/site-packages/markdown_it/rules_block/paragraph.py new file mode 100644 index 0000000000000000000000000000000000000000..5388a4b1468defccb5ed4d7d68f3e5ac1a747178 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/paragraph.py @@ -0,0 +1,65 @@ +"""Paragraph.""" +import logging + +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def paragraph(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + LOGGER.debug( + "entering paragraph: %s, %s, %s, %s", state, startLine, endLine, silent + ) + + nextLine = startLine + 1 + ruler = state.md.block.ruler + terminatorRules = ruler.getRules("paragraph") + endLine = state.lineMax + + oldParentType = state.parentType + state.parentType = "paragraph" + + # jump line-by-line until empty one or EOF + while nextLine < endLine: + if state.isEmpty(nextLine): + break + # this would be a code block normally, but after paragraph + # it's considered a lazy continuation regardless of what's there + if state.sCount[nextLine] - state.blkIndent > 3: + nextLine += 1 + continue + + # quirk for blockquotes, this line should already be checked by that rule + if state.sCount[nextLine] < 0: + nextLine += 1 + continue + + # Some tags can terminate paragraph without empty line. + terminate = False + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + + if terminate: + break + + nextLine += 1 + + content = state.getLines(startLine, nextLine, state.blkIndent, False).strip() + + state.line = nextLine + + token = state.push("paragraph_open", "p", 1) + token.map = [startLine, state.line] + + token = state.push("inline", "", 0) + token.content = content + token.map = [startLine, state.line] + token.children = [] + + token = state.push("paragraph_close", "p", -1) + + state.parentType = oldParentType + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/reference.py b/lib/python3.10/site-packages/markdown_it/rules_block/reference.py new file mode 100644 index 0000000000000000000000000000000000000000..b77944b2ee8133727da934c3c7dbe800f2fc28ce --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/reference.py @@ -0,0 +1,215 @@ +import logging + +from ..common.utils import charCodeAt, isSpace, normalizeReference +from .state_block import StateBlock + +LOGGER = logging.getLogger(__name__) + + +def reference(state: StateBlock, startLine: int, _endLine: int, silent: bool) -> bool: + LOGGER.debug( + "entering reference: %s, %s, %s, %s", state, startLine, _endLine, silent + ) + + lines = 0 + pos = state.bMarks[startLine] + state.tShift[startLine] + maximum = state.eMarks[startLine] + nextLine = startLine + 1 + + if state.is_code_block(startLine): + return False + + if state.src[pos] != "[": + return False + + # Simple check to quickly interrupt scan on [link](url) at the start of line. + # Can be useful on practice: https:#github.com/markdown-it/markdown-it/issues/54 + while pos < maximum: + # /* ] */ /* \ */ /* : */ + if state.src[pos] == "]" and state.src[pos - 1] != "\\": + if pos + 1 == maximum: + return False + if state.src[pos + 1] != ":": + return False + break + pos += 1 + + endLine = state.lineMax + + # jump line-by-line until empty one or EOF + terminatorRules = state.md.block.ruler.getRules("reference") + + oldParentType = state.parentType + state.parentType = "reference" + + while nextLine < endLine and not state.isEmpty(nextLine): + # this would be a code block normally, but after paragraph + # it's considered a lazy continuation regardless of what's there + if state.sCount[nextLine] - state.blkIndent > 3: + nextLine += 1 + continue + + # quirk for blockquotes, this line should already be checked by that rule + if state.sCount[nextLine] < 0: + nextLine += 1 + continue + + # Some tags can terminate paragraph without empty line. + terminate = False + for terminatorRule in terminatorRules: + if terminatorRule(state, nextLine, endLine, True): + terminate = True + break + + if terminate: + break + + nextLine += 1 + + string = state.getLines(startLine, nextLine, state.blkIndent, False).strip() + maximum = len(string) + + labelEnd = None + pos = 1 + while pos < maximum: + ch = charCodeAt(string, pos) + if ch == 0x5B: # /* [ */ + return False + elif ch == 0x5D: # /* ] */ + labelEnd = pos + break + elif ch == 0x0A: # /* \n */ + lines += 1 + elif ch == 0x5C: # /* \ */ + pos += 1 + if pos < maximum and charCodeAt(string, pos) == 0x0A: + lines += 1 + pos += 1 + + if ( + labelEnd is None or labelEnd < 0 or charCodeAt(string, labelEnd + 1) != 0x3A + ): # /* : */ + return False + + # [label]: destination 'title' + # ^^^ skip optional whitespace here + pos = labelEnd + 2 + while pos < maximum: + ch = charCodeAt(string, pos) + if ch == 0x0A: + lines += 1 + elif isSpace(ch): + pass + else: + break + pos += 1 + + # [label]: destination 'title' + # ^^^^^^^^^^^ parse this + res = state.md.helpers.parseLinkDestination(string, pos, maximum) + if not res.ok: + return False + + href = state.md.normalizeLink(res.str) + if not state.md.validateLink(href): + return False + + pos = res.pos + lines += res.lines + + # save cursor state, we could require to rollback later + destEndPos = pos + destEndLineNo = lines + + # [label]: destination 'title' + # ^^^ skipping those spaces + start = pos + while pos < maximum: + ch = charCodeAt(string, pos) + if ch == 0x0A: + lines += 1 + elif isSpace(ch): + pass + else: + break + pos += 1 + + # [label]: destination 'title' + # ^^^^^^^ parse this + res = state.md.helpers.parseLinkTitle(string, pos, maximum) + if pos < maximum and start != pos and res.ok: + title = res.str + pos = res.pos + lines += res.lines + else: + title = "" + pos = destEndPos + lines = destEndLineNo + + # skip trailing spaces until the rest of the line + while pos < maximum: + ch = charCodeAt(string, pos) + if not isSpace(ch): + break + pos += 1 + + if pos < maximum and charCodeAt(string, pos) != 0x0A and title: + # garbage at the end of the line after title, + # but it could still be a valid reference if we roll back + title = "" + pos = destEndPos + lines = destEndLineNo + while pos < maximum: + ch = charCodeAt(string, pos) + if not isSpace(ch): + break + pos += 1 + + if pos < maximum and charCodeAt(string, pos) != 0x0A: + # garbage at the end of the line + return False + + label = normalizeReference(string[1:labelEnd]) + if not label: + # CommonMark 0.20 disallows empty labels + return False + + # Reference can not terminate anything. This check is for safety only. + if silent: + return True + + if "references" not in state.env: + state.env["references"] = {} + + state.line = startLine + lines + 1 + + # note, this is not part of markdown-it JS, but is useful for renderers + if state.md.options.get("inline_definitions", False): + token = state.push("definition", "", 0) + token.meta = { + "id": label, + "title": title, + "url": href, + "label": string[1:labelEnd], + } + token.map = [startLine, state.line] + + if label not in state.env["references"]: + state.env["references"][label] = { + "title": title, + "href": href, + "map": [startLine, state.line], + } + else: + state.env.setdefault("duplicate_refs", []).append( + { + "title": title, + "href": href, + "label": label, + "map": [startLine, state.line], + } + ) + + state.parentType = oldParentType + + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/state_block.py b/lib/python3.10/site-packages/markdown_it/rules_block/state_block.py new file mode 100644 index 0000000000000000000000000000000000000000..445ad265a01e3f1dededf9f72848686a2b5ee901 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/state_block.py @@ -0,0 +1,261 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Literal + +from ..common.utils import isStrSpace +from ..ruler import StateBase +from ..token import Token +from ..utils import EnvType + +if TYPE_CHECKING: + from markdown_it.main import MarkdownIt + + +class StateBlock(StateBase): + def __init__( + self, src: str, md: MarkdownIt, env: EnvType, tokens: list[Token] + ) -> None: + self.src = src + + # link to parser instance + self.md = md + + self.env = env + + # + # Internal state variables + # + + self.tokens = tokens + + self.bMarks: list[int] = [] # line begin offsets for fast jumps + self.eMarks: list[int] = [] # line end offsets for fast jumps + # offsets of the first non-space characters (tabs not expanded) + self.tShift: list[int] = [] + self.sCount: list[int] = [] # indents for each line (tabs expanded) + + # An amount of virtual spaces (tabs expanded) between beginning + # of each line (bMarks) and real beginning of that line. + # + # It exists only as a hack because blockquotes override bMarks + # losing information in the process. + # + # It's used only when expanding tabs, you can think about it as + # an initial tab length, e.g. bsCount=21 applied to string `\t123` + # means first tab should be expanded to 4-21%4 === 3 spaces. + # + self.bsCount: list[int] = [] + + # block parser variables + self.blkIndent = 0 # required block content indent (for example, if we are + # inside a list, it would be positioned after list marker) + self.line = 0 # line index in src + self.lineMax = 0 # lines count + self.tight = False # loose/tight mode for lists + self.ddIndent = -1 # indent of the current dd block (-1 if there isn't any) + self.listIndent = -1 # indent of the current list block (-1 if there isn't any) + + # can be 'blockquote', 'list', 'root', 'paragraph' or 'reference' + # used in lists to determine if they interrupt a paragraph + self.parentType = "root" + + self.level = 0 + + # renderer + self.result = "" + + # Create caches + # Generate markers. + indent_found = False + + start = pos = indent = offset = 0 + length = len(self.src) + + for pos, character in enumerate(self.src): + if not indent_found: + if isStrSpace(character): + indent += 1 + + if character == "\t": + offset += 4 - offset % 4 + else: + offset += 1 + continue + else: + indent_found = True + + if character == "\n" or pos == length - 1: + if character != "\n": + pos += 1 + self.bMarks.append(start) + self.eMarks.append(pos) + self.tShift.append(indent) + self.sCount.append(offset) + self.bsCount.append(0) + + indent_found = False + indent = 0 + offset = 0 + start = pos + 1 + + # Push fake entry to simplify cache bounds checks + self.bMarks.append(length) + self.eMarks.append(length) + self.tShift.append(0) + self.sCount.append(0) + self.bsCount.append(0) + + self.lineMax = len(self.bMarks) - 1 # don't count last fake line + + # pre-check if code blocks are enabled, to speed up is_code_block method + self._code_enabled = "code" in self.md["block"].ruler.get_active_rules() + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}" + f"(line={self.line},level={self.level},tokens={len(self.tokens)})" + ) + + def push(self, ttype: str, tag: str, nesting: Literal[-1, 0, 1]) -> Token: + """Push new token to "stream".""" + token = Token(ttype, tag, nesting) + token.block = True + if nesting < 0: + self.level -= 1 # closing tag + token.level = self.level + if nesting > 0: + self.level += 1 # opening tag + self.tokens.append(token) + return token + + def isEmpty(self, line: int) -> bool: + """.""" + return (self.bMarks[line] + self.tShift[line]) >= self.eMarks[line] + + def skipEmptyLines(self, from_pos: int) -> int: + """.""" + while from_pos < self.lineMax: + try: + if (self.bMarks[from_pos] + self.tShift[from_pos]) < self.eMarks[ + from_pos + ]: + break + except IndexError: + pass + from_pos += 1 + return from_pos + + def skipSpaces(self, pos: int) -> int: + """Skip spaces from given position.""" + while True: + try: + current = self.src[pos] + except IndexError: + break + if not isStrSpace(current): + break + pos += 1 + return pos + + def skipSpacesBack(self, pos: int, minimum: int) -> int: + """Skip spaces from given position in reverse.""" + if pos <= minimum: + return pos + while pos > minimum: + pos -= 1 + if not isStrSpace(self.src[pos]): + return pos + 1 + return pos + + def skipChars(self, pos: int, code: int) -> int: + """Skip character code from given position.""" + while True: + try: + current = self.srcCharCode[pos] + except IndexError: + break + if current != code: + break + pos += 1 + return pos + + def skipCharsStr(self, pos: int, ch: str) -> int: + """Skip character string from given position.""" + while True: + try: + current = self.src[pos] + except IndexError: + break + if current != ch: + break + pos += 1 + return pos + + def skipCharsBack(self, pos: int, code: int, minimum: int) -> int: + """Skip character code reverse from given position - 1.""" + if pos <= minimum: + return pos + while pos > minimum: + pos -= 1 + if code != self.srcCharCode[pos]: + return pos + 1 + return pos + + def skipCharsStrBack(self, pos: int, ch: str, minimum: int) -> int: + """Skip character string reverse from given position - 1.""" + if pos <= minimum: + return pos + while pos > minimum: + pos -= 1 + if ch != self.src[pos]: + return pos + 1 + return pos + + def getLines(self, begin: int, end: int, indent: int, keepLastLF: bool) -> str: + """Cut lines range from source.""" + line = begin + if begin >= end: + return "" + + queue = [""] * (end - begin) + + i = 1 + while line < end: + lineIndent = 0 + lineStart = first = self.bMarks[line] + last = ( + self.eMarks[line] + 1 + if line + 1 < end or keepLastLF + else self.eMarks[line] + ) + + while (first < last) and (lineIndent < indent): + ch = self.src[first] + if isStrSpace(ch): + if ch == "\t": + lineIndent += 4 - (lineIndent + self.bsCount[line]) % 4 + else: + lineIndent += 1 + elif first - lineStart < self.tShift[line]: + lineIndent += 1 + else: + break + first += 1 + + if lineIndent > indent: + # partially expanding tabs in code blocks, e.g '\t\tfoobar' + # with indent=2 becomes ' \tfoobar' + queue[i - 1] = (" " * (lineIndent - indent)) + self.src[first:last] + else: + queue[i - 1] = self.src[first:last] + + line += 1 + i += 1 + + return "".join(queue) + + def is_code_block(self, line: int) -> bool: + """Check if line is a code block, + i.e. the code block rule is enabled and text is indented by more than 3 spaces. + """ + return self._code_enabled and (self.sCount[line] - self.blkIndent) >= 4 diff --git a/lib/python3.10/site-packages/markdown_it/rules_block/table.py b/lib/python3.10/site-packages/markdown_it/rules_block/table.py new file mode 100644 index 0000000000000000000000000000000000000000..4b666c1d5d9ede544b27c38d7ffb5d850edac70b --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_block/table.py @@ -0,0 +1,236 @@ +# GFM table, https://github.github.com/gfm/#tables-extension- +from __future__ import annotations + +import re + +from ..common.utils import charStrAt, isStrSpace +from .state_block import StateBlock + +headerLineRe = re.compile(r"^:?-+:?$") +enclosingPipesRe = re.compile(r"^\||\|$") + + +def getLine(state: StateBlock, line: int) -> str: + pos = state.bMarks[line] + state.tShift[line] + maximum = state.eMarks[line] + + # return state.src.substr(pos, max - pos) + return state.src[pos:maximum] + + +def escapedSplit(string: str) -> list[str]: + result: list[str] = [] + pos = 0 + max = len(string) + isEscaped = False + lastPos = 0 + current = "" + ch = charStrAt(string, pos) + + while pos < max: + if ch == "|": + if not isEscaped: + # pipe separating cells, '|' + result.append(current + string[lastPos:pos]) + current = "" + lastPos = pos + 1 + else: + # escaped pipe, '\|' + current += string[lastPos : pos - 1] + lastPos = pos + + isEscaped = ch == "\\" + pos += 1 + + ch = charStrAt(string, pos) + + result.append(current + string[lastPos:]) + + return result + + +def table(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool: + tbodyLines = None + + # should have at least two lines + if startLine + 2 > endLine: + return False + + nextLine = startLine + 1 + + if state.sCount[nextLine] < state.blkIndent: + return False + + if state.is_code_block(nextLine): + return False + + # first character of the second line should be '|', '-', ':', + # and no other characters are allowed but spaces; + # basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp + + pos = state.bMarks[nextLine] + state.tShift[nextLine] + if pos >= state.eMarks[nextLine]: + return False + first_ch = state.src[pos] + pos += 1 + if first_ch not in ("|", "-", ":"): + return False + + if pos >= state.eMarks[nextLine]: + return False + second_ch = state.src[pos] + pos += 1 + if second_ch not in ("|", "-", ":") and not isStrSpace(second_ch): + return False + + # if first character is '-', then second character must not be a space + # (due to parsing ambiguity with list) + if first_ch == "-" and isStrSpace(second_ch): + return False + + while pos < state.eMarks[nextLine]: + ch = state.src[pos] + + if ch not in ("|", "-", ":") and not isStrSpace(ch): + return False + + pos += 1 + + lineText = getLine(state, startLine + 1) + + columns = lineText.split("|") + aligns = [] + for i in range(len(columns)): + t = columns[i].strip() + if not t: + # allow empty columns before and after table, but not in between columns; + # e.g. allow ` |---| `, disallow ` ---||--- ` + if i == 0 or i == len(columns) - 1: + continue + else: + return False + + if not headerLineRe.search(t): + return False + if charStrAt(t, len(t) - 1) == ":": + aligns.append("center" if charStrAt(t, 0) == ":" else "right") + elif charStrAt(t, 0) == ":": + aligns.append("left") + else: + aligns.append("") + + lineText = getLine(state, startLine).strip() + if "|" not in lineText: + return False + if state.is_code_block(startLine): + return False + columns = escapedSplit(lineText) + if columns and columns[0] == "": + columns.pop(0) + if columns and columns[-1] == "": + columns.pop() + + # header row will define an amount of columns in the entire table, + # and align row should be exactly the same (the rest of the rows can differ) + columnCount = len(columns) + if columnCount == 0 or columnCount != len(aligns): + return False + + if silent: + return True + + oldParentType = state.parentType + state.parentType = "table" + + # use 'blockquote' lists for termination because it's + # the most similar to tables + terminatorRules = state.md.block.ruler.getRules("blockquote") + + token = state.push("table_open", "table", 1) + token.map = tableLines = [startLine, 0] + + token = state.push("thead_open", "thead", 1) + token.map = [startLine, startLine + 1] + + token = state.push("tr_open", "tr", 1) + token.map = [startLine, startLine + 1] + + for i in range(len(columns)): + token = state.push("th_open", "th", 1) + if aligns[i]: + token.attrs = {"style": "text-align:" + aligns[i]} + + token = state.push("inline", "", 0) + # note in markdown-it this map was removed in v12.0.0 however, we keep it, + # since it is helpful to propagate to children tokens + token.map = [startLine, startLine + 1] + token.content = columns[i].strip() + token.children = [] + + token = state.push("th_close", "th", -1) + + token = state.push("tr_close", "tr", -1) + token = state.push("thead_close", "thead", -1) + + nextLine = startLine + 2 + while nextLine < endLine: + if state.sCount[nextLine] < state.blkIndent: + break + + terminate = False + for i in range(len(terminatorRules)): + if terminatorRules[i](state, nextLine, endLine, True): + terminate = True + break + + if terminate: + break + lineText = getLine(state, nextLine).strip() + if not lineText: + break + if state.is_code_block(nextLine): + break + columns = escapedSplit(lineText) + if columns and columns[0] == "": + columns.pop(0) + if columns and columns[-1] == "": + columns.pop() + + if nextLine == startLine + 2: + token = state.push("tbody_open", "tbody", 1) + token.map = tbodyLines = [startLine + 2, 0] + + token = state.push("tr_open", "tr", 1) + token.map = [nextLine, nextLine + 1] + + for i in range(columnCount): + token = state.push("td_open", "td", 1) + if aligns[i]: + token.attrs = {"style": "text-align:" + aligns[i]} + + token = state.push("inline", "", 0) + # note in markdown-it this map was removed in v12.0.0 however, we keep it, + # since it is helpful to propagate to children tokens + token.map = [nextLine, nextLine + 1] + try: + token.content = columns[i].strip() if columns[i] else "" + except IndexError: + token.content = "" + token.children = [] + + token = state.push("td_close", "td", -1) + + token = state.push("tr_close", "tr", -1) + + nextLine += 1 + + if tbodyLines: + token = state.push("tbody_close", "tbody", -1) + tbodyLines[1] = nextLine + + token = state.push("table_close", "table", -1) + + tableLines[1] = nextLine + state.parentType = oldParentType + state.line = nextLine + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__init__.py b/lib/python3.10/site-packages/markdown_it/rules_core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9c5368c2b694231000626a03594ebad75fe8c71 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/__init__.py @@ -0,0 +1,19 @@ +__all__ = ( + "StateCore", + "normalize", + "block", + "inline", + "replace", + "smartquotes", + "linkify", + "text_join", +) + +from .block import block +from .inline import inline +from .linkify import linkify +from .normalize import normalize +from .replacements import replace +from .smartquotes import smartquotes +from .state_core import StateCore +from .text_join import text_join diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39a5039f7c429bbbc81c49848ebef4ed68ba8660 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/block.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/block.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7853eff67f5496fd36b793791d4efe4b8c660eeb Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/block.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/inline.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/inline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5048528e3a08ed14ca8893d8765b62c9031ff447 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/inline.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..642423b038121bd56f3c0d305937646f727dbadd Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/linkify.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e75416ab5331a1ded8a5a06687e05baea20a374 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/normalize.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb3b14b8d39fa7cbd4f145e500d6ed2cf77b3cf0 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/replacements.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57bcd8b00d9eee72d41c7cabb2c7c2aa5ef475c5 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/smartquotes.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf8caed6614d2718176a68eeee03c1ddab603b55 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/state_core.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a484c715131816b1ec51925a3b647609d812c3a Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_core/__pycache__/text_join.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/block.py b/lib/python3.10/site-packages/markdown_it/rules_core/block.py new file mode 100644 index 0000000000000000000000000000000000000000..a6c3bb8d7ae18880fd638690fb5b09beb78b103c --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/block.py @@ -0,0 +1,13 @@ +from ..token import Token +from .state_core import StateCore + + +def block(state: StateCore) -> None: + if state.inlineMode: + token = Token("inline", "", 0) + token.content = state.src + token.map = [0, 1] + token.children = [] + state.tokens.append(token) + else: + state.md.block.parse(state.src, state.md, state.env, state.tokens) diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/inline.py b/lib/python3.10/site-packages/markdown_it/rules_core/inline.py new file mode 100644 index 0000000000000000000000000000000000000000..c3fd0b5e25dda5d8a5a644cc9e460d0f92ae2d1d --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/inline.py @@ -0,0 +1,10 @@ +from .state_core import StateCore + + +def inline(state: StateCore) -> None: + """Parse inlines""" + for token in state.tokens: + if token.type == "inline": + if token.children is None: + token.children = [] + state.md.inline.parse(token.content, state.md, state.env, token.children) diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/linkify.py b/lib/python3.10/site-packages/markdown_it/rules_core/linkify.py new file mode 100644 index 0000000000000000000000000000000000000000..efbc9d4c9b1cbada1c936401b3421d73fbff5b64 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/linkify.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +import re +from typing import Protocol + +from ..common.utils import arrayReplaceAt, isLinkClose, isLinkOpen +from ..token import Token +from .state_core import StateCore + +HTTP_RE = re.compile(r"^http://") +MAILTO_RE = re.compile(r"^mailto:") +TEST_MAILTO_RE = re.compile(r"^mailto:", flags=re.IGNORECASE) + + +def linkify(state: StateCore) -> None: + """Rule for identifying plain-text links.""" + if not state.md.options.linkify: + return + + if not state.md.linkify: + raise ModuleNotFoundError("Linkify enabled but not installed.") + + for inline_token in state.tokens: + if inline_token.type != "inline" or not state.md.linkify.pretest( + inline_token.content + ): + continue + + tokens = inline_token.children + + htmlLinkLevel = 0 + + # We scan from the end, to keep position when new tags added. + # Use reversed logic in links start/end match + assert tokens is not None + i = len(tokens) + while i >= 1: + i -= 1 + assert isinstance(tokens, list) + currentToken = tokens[i] + + # Skip content of markdown links + if currentToken.type == "link_close": + i -= 1 + while ( + tokens[i].level != currentToken.level + and tokens[i].type != "link_open" + ): + i -= 1 + continue + + # Skip content of html tag links + if currentToken.type == "html_inline": + if isLinkOpen(currentToken.content) and htmlLinkLevel > 0: + htmlLinkLevel -= 1 + if isLinkClose(currentToken.content): + htmlLinkLevel += 1 + if htmlLinkLevel > 0: + continue + + if currentToken.type == "text" and state.md.linkify.test( + currentToken.content + ): + text = currentToken.content + links: list[_LinkType] = state.md.linkify.match(text) or [] + + # Now split string to nodes + nodes = [] + level = currentToken.level + lastPos = 0 + + # forbid escape sequence at the start of the string, + # this avoids http\://example.com/ from being linkified as + # http://example.com/ + if ( + links + and links[0].index == 0 + and i > 0 + and tokens[i - 1].type == "text_special" + ): + links = links[1:] + + for link in links: + url = link.url + fullUrl = state.md.normalizeLink(url) + if not state.md.validateLink(fullUrl): + continue + + urlText = link.text + + # Linkifier might send raw hostnames like "example.com", where url + # starts with domain name. So we prepend http:// in those cases, + # and remove it afterwards. + if not link.schema: + urlText = HTTP_RE.sub( + "", state.md.normalizeLinkText("http://" + urlText) + ) + elif link.schema == "mailto:" and TEST_MAILTO_RE.search(urlText): + urlText = MAILTO_RE.sub( + "", state.md.normalizeLinkText("mailto:" + urlText) + ) + else: + urlText = state.md.normalizeLinkText(urlText) + + pos = link.index + + if pos > lastPos: + token = Token("text", "", 0) + token.content = text[lastPos:pos] + token.level = level + nodes.append(token) + + token = Token("link_open", "a", 1) + token.attrs = {"href": fullUrl} + token.level = level + level += 1 + token.markup = "linkify" + token.info = "auto" + nodes.append(token) + + token = Token("text", "", 0) + token.content = urlText + token.level = level + nodes.append(token) + + token = Token("link_close", "a", -1) + level -= 1 + token.level = level + token.markup = "linkify" + token.info = "auto" + nodes.append(token) + + lastPos = link.last_index + + if lastPos < len(text): + token = Token("text", "", 0) + token.content = text[lastPos:] + token.level = level + nodes.append(token) + + inline_token.children = tokens = arrayReplaceAt(tokens, i, nodes) + + +class _LinkType(Protocol): + url: str + text: str + index: int + last_index: int + schema: str | None diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/normalize.py b/lib/python3.10/site-packages/markdown_it/rules_core/normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..c9f8d0d5729b2497b5f4b611b0451dfe92872506 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/normalize.py @@ -0,0 +1,18 @@ +"""Normalize input string.""" +import re + +from .state_core import StateCore + +# https://spec.commonmark.org/0.29/#line-ending +NEWLINES_RE = re.compile(r"\r\n?|\n") +NULL_RE = re.compile(r"\0") + + +def normalize(state: StateCore) -> None: + # Normalize newlines + string = NEWLINES_RE.sub("\n", state.src) + + # Replace NULL characters + string = NULL_RE.sub("\uFFFD", string) + + state.src = string diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/replacements.py b/lib/python3.10/site-packages/markdown_it/rules_core/replacements.py new file mode 100644 index 0000000000000000000000000000000000000000..14912e17ac8eed885a2fd07c74141804f3f9fa72 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/replacements.py @@ -0,0 +1,126 @@ +"""Simple typographic replacements + +* ``(c)``, ``(C)`` → © +* ``(tm)``, ``(TM)`` → â„¢ +* ``(r)``, ``(R)`` → ® +* ``+-`` → ± +* ``...`` → … +* ``?....`` → ?.. +* ``!....`` → !.. +* ``????????`` → ??? +* ``!!!!!`` → !!! +* ``,,,`` → , +* ``--`` → &ndash +* ``---`` → &mdash +""" +from __future__ import annotations + +import logging +import re + +from ..token import Token +from .state_core import StateCore + +LOGGER = logging.getLogger(__name__) + +# TODO: +# - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾ +# - multiplication 2 x 4 -> 2 × 4 + +RARE_RE = re.compile(r"\+-|\.\.|\?\?\?\?|!!!!|,,|--") + +# Workaround for phantomjs - need regex without /g flag, +# or root check will fail every second time +# SCOPED_ABBR_TEST_RE = r"\((c|tm|r)\)" + +SCOPED_ABBR_RE = re.compile(r"\((c|tm|r)\)", flags=re.IGNORECASE) + +PLUS_MINUS_RE = re.compile(r"\+-") + +ELLIPSIS_RE = re.compile(r"\.{2,}") + +ELLIPSIS_QUESTION_EXCLAMATION_RE = re.compile(r"([?!])…") + +QUESTION_EXCLAMATION_RE = re.compile(r"([?!]){4,}") + +COMMA_RE = re.compile(r",{2,}") + +EM_DASH_RE = re.compile(r"(^|[^-])---(?=[^-]|$)", flags=re.MULTILINE) + +EN_DASH_RE = re.compile(r"(^|\s)--(?=\s|$)", flags=re.MULTILINE) + +EN_DASH_INDENT_RE = re.compile(r"(^|[^-\s])--(?=[^-\s]|$)", flags=re.MULTILINE) + + +SCOPED_ABBR = {"c": "©", "r": "®", "tm": "â„¢"} + + +def replaceFn(match: re.Match[str]) -> str: + return SCOPED_ABBR[match.group(1).lower()] + + +def replace_scoped(inlineTokens: list[Token]) -> None: + inside_autolink = 0 + + for token in inlineTokens: + if token.type == "text" and not inside_autolink: + token.content = SCOPED_ABBR_RE.sub(replaceFn, token.content) + + if token.type == "link_open" and token.info == "auto": + inside_autolink -= 1 + + if token.type == "link_close" and token.info == "auto": + inside_autolink += 1 + + +def replace_rare(inlineTokens: list[Token]) -> None: + inside_autolink = 0 + + for token in inlineTokens: + if ( + token.type == "text" + and (not inside_autolink) + and RARE_RE.search(token.content) + ): + # +- -> ± + token.content = PLUS_MINUS_RE.sub("±", token.content) + + # .., ..., ....... -> … + token.content = ELLIPSIS_RE.sub("…", token.content) + + # but ?..... & !..... -> ?.. & !.. + token.content = ELLIPSIS_QUESTION_EXCLAMATION_RE.sub("\\1..", token.content) + token.content = QUESTION_EXCLAMATION_RE.sub("\\1\\1\\1", token.content) + + # ,, ,,, ,,,, -> , + token.content = COMMA_RE.sub(",", token.content) + + # em-dash + token.content = EM_DASH_RE.sub("\\1\u2014", token.content) + + # en-dash + token.content = EN_DASH_RE.sub("\\1\u2013", token.content) + token.content = EN_DASH_INDENT_RE.sub("\\1\u2013", token.content) + + if token.type == "link_open" and token.info == "auto": + inside_autolink -= 1 + + if token.type == "link_close" and token.info == "auto": + inside_autolink += 1 + + +def replace(state: StateCore) -> None: + if not state.md.options.typographer: + return + + for token in state.tokens: + if token.type != "inline": + continue + if token.children is None: + continue + + if SCOPED_ABBR_RE.search(token.content): + replace_scoped(token.children) + + if RARE_RE.search(token.content): + replace_rare(token.children) diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/smartquotes.py b/lib/python3.10/site-packages/markdown_it/rules_core/smartquotes.py new file mode 100644 index 0000000000000000000000000000000000000000..c98fbd71e7d2e644ca7c6ac95827962342326059 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/smartquotes.py @@ -0,0 +1,202 @@ +"""Convert straight quotation marks to typographic ones +""" +from __future__ import annotations + +import re +from typing import Any + +from ..common.utils import charCodeAt, isMdAsciiPunct, isPunctChar, isWhiteSpace +from ..token import Token +from .state_core import StateCore + +QUOTE_TEST_RE = re.compile(r"['\"]") +QUOTE_RE = re.compile(r"['\"]") +APOSTROPHE = "\u2019" # ’ + + +def replaceAt(string: str, index: int, ch: str) -> str: + # When the index is negative, the behavior is different from the js version. + # But basically, the index will not be negative. + assert index >= 0 + return string[:index] + ch + string[index + 1 :] + + +def process_inlines(tokens: list[Token], state: StateCore) -> None: + stack: list[dict[str, Any]] = [] + + for i, token in enumerate(tokens): + thisLevel = token.level + + j = 0 + for j in range(len(stack))[::-1]: + if stack[j]["level"] <= thisLevel: + break + else: + # When the loop is terminated without a "break". + # Subtract 1 to get the same index as the js version. + j -= 1 + + stack = stack[: j + 1] + + if token.type != "text": + continue + + text = token.content + pos = 0 + maximum = len(text) + + while pos < maximum: + goto_outer = False + lastIndex = pos + t = QUOTE_RE.search(text[lastIndex:]) + if not t: + break + + canOpen = canClose = True + pos = t.start(0) + lastIndex + 1 + isSingle = t.group(0) == "'" + + # Find previous character, + # default to space if it's the beginning of the line + lastChar: None | int = 0x20 + + if t.start(0) + lastIndex - 1 >= 0: + lastChar = charCodeAt(text, t.start(0) + lastIndex - 1) + else: + for j in range(i)[::-1]: + if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak": + break + # should skip all tokens except 'text', 'html_inline' or 'code_inline' + if not tokens[j].content: + continue + + lastChar = charCodeAt(tokens[j].content, len(tokens[j].content) - 1) + break + + # Find next character, + # default to space if it's the end of the line + nextChar: None | int = 0x20 + + if pos < maximum: + nextChar = charCodeAt(text, pos) + else: + for j in range(i + 1, len(tokens)): + # nextChar defaults to 0x20 + if tokens[j].type == "softbreak" or tokens[j].type == "hardbreak": + break + # should skip all tokens except 'text', 'html_inline' or 'code_inline' + if not tokens[j].content: + continue + + nextChar = charCodeAt(tokens[j].content, 0) + break + + isLastPunctChar = lastChar is not None and ( + isMdAsciiPunct(lastChar) or isPunctChar(chr(lastChar)) + ) + isNextPunctChar = nextChar is not None and ( + isMdAsciiPunct(nextChar) or isPunctChar(chr(nextChar)) + ) + + isLastWhiteSpace = lastChar is not None and isWhiteSpace(lastChar) + isNextWhiteSpace = nextChar is not None and isWhiteSpace(nextChar) + + if isNextWhiteSpace: # noqa: SIM114 + canOpen = False + elif isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar): + canOpen = False + + if isLastWhiteSpace: # noqa: SIM114 + canClose = False + elif isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar): + canClose = False + + if nextChar == 0x22 and t.group(0) == '"': # 0x22: " # noqa: SIM102 + if ( + lastChar is not None and lastChar >= 0x30 and lastChar <= 0x39 + ): # 0x30: 0, 0x39: 9 + # special case: 1"" - count first quote as an inch + canClose = canOpen = False + + if canOpen and canClose: + # Replace quotes in the middle of punctuation sequence, but not + # in the middle of the words, i.e.: + # + # 1. foo " bar " baz - not replaced + # 2. foo-"-bar-"-baz - replaced + # 3. foo"bar"baz - not replaced + canOpen = isLastPunctChar + canClose = isNextPunctChar + + if not canOpen and not canClose: + # middle of word + if isSingle: + token.content = replaceAt( + token.content, t.start(0) + lastIndex, APOSTROPHE + ) + continue + + if canClose: + # this could be a closing quote, rewind the stack to get a match + for j in range(len(stack))[::-1]: + item = stack[j] + if stack[j]["level"] < thisLevel: + break + if item["single"] == isSingle and stack[j]["level"] == thisLevel: + item = stack[j] + + if isSingle: + openQuote = state.md.options.quotes[2] + closeQuote = state.md.options.quotes[3] + else: + openQuote = state.md.options.quotes[0] + closeQuote = state.md.options.quotes[1] + + # replace token.content *before* tokens[item.token].content, + # because, if they are pointing at the same token, replaceAt + # could mess up indices when quote length != 1 + token.content = replaceAt( + token.content, t.start(0) + lastIndex, closeQuote + ) + tokens[item["token"]].content = replaceAt( + tokens[item["token"]].content, item["pos"], openQuote + ) + + pos += len(closeQuote) - 1 + if item["token"] == i: + pos += len(openQuote) - 1 + + text = token.content + maximum = len(text) + + stack = stack[:j] + goto_outer = True + break + if goto_outer: + goto_outer = False + continue + + if canOpen: + stack.append( + { + "token": i, + "pos": t.start(0) + lastIndex, + "single": isSingle, + "level": thisLevel, + } + ) + elif canClose and isSingle: + token.content = replaceAt( + token.content, t.start(0) + lastIndex, APOSTROPHE + ) + + +def smartquotes(state: StateCore) -> None: + if not state.md.options.typographer: + return + + for token in state.tokens: + if token.type != "inline" or not QUOTE_RE.search(token.content): + continue + if token.children is not None: + process_inlines(token.children, state) diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/state_core.py b/lib/python3.10/site-packages/markdown_it/rules_core/state_core.py new file mode 100644 index 0000000000000000000000000000000000000000..a938041d992fdf7ae3f2843a2e0f9ef298c45790 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/state_core.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from ..ruler import StateBase +from ..token import Token +from ..utils import EnvType + +if TYPE_CHECKING: + from markdown_it import MarkdownIt + + +class StateCore(StateBase): + def __init__( + self, + src: str, + md: MarkdownIt, + env: EnvType, + tokens: list[Token] | None = None, + ) -> None: + self.src = src + self.md = md # link to parser instance + self.env = env + self.tokens: list[Token] = tokens or [] + self.inlineMode = False diff --git a/lib/python3.10/site-packages/markdown_it/rules_core/text_join.py b/lib/python3.10/site-packages/markdown_it/rules_core/text_join.py new file mode 100644 index 0000000000000000000000000000000000000000..d54ccbbc376e7c50cf95227a36a11000b9d80496 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_core/text_join.py @@ -0,0 +1,34 @@ +"""Join raw text tokens with the rest of the text + +This is set as a separate rule to provide an opportunity for plugins +to run text replacements after text join, but before escape join. + +For example, `\\:)` shouldn't be replaced with an emoji. +""" +from __future__ import annotations + +from ..token import Token +from .state_core import StateCore + + +def text_join(state: StateCore) -> None: + """Join raw text for escape sequences (`text_special`) tokens with the rest of the text""" + + for inline_token in state.tokens[:]: + if inline_token.type != "inline": + continue + + # convert text_special to text and join all adjacent text nodes + new_tokens: list[Token] = [] + for child_token in inline_token.children or []: + if child_token.type == "text_special": + child_token.type = "text" + if ( + child_token.type == "text" + and new_tokens + and new_tokens[-1].type == "text" + ): + new_tokens[-1].content += child_token.content + else: + new_tokens.append(child_token) + inline_token.children = new_tokens diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__init__.py b/lib/python3.10/site-packages/markdown_it/rules_inline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3a8026ec3a7afd6f35140bb074780da05c6e86af --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/__init__.py @@ -0,0 +1,31 @@ +__all__ = ( + "StateInline", + "text", + "fragments_join", + "link_pairs", + "linkify", + "escape", + "newline", + "backtick", + "emphasis", + "image", + "link", + "autolink", + "entity", + "html_inline", + "strikethrough", +) +from . import emphasis, strikethrough +from .autolink import autolink +from .backticks import backtick +from .balance_pairs import link_pairs +from .entity import entity +from .escape import escape +from .fragments_join import fragments_join +from .html_inline import html_inline +from .image import image +from .link import link +from .linkify import linkify +from .newline import newline +from .state_inline import StateInline +from .text import text diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b14a525f0d62f51ade73cdf2c79916c7786be299 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c7769534e28eebdd84ca129bd9398e5108a167f Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/autolink.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78570462117c167a743180519ef2fe67da262fbd Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/backticks.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..819deb998b808e4c51c7772752ed77ca20115b96 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/balance_pairs.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b8d47400a9bc1a9f9e1ce1be941f1b81553d708 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/emphasis.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7eee1aca4289d9a438704d9635d0fd37a14e826d Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/entity.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..228822ca0d9955d21ff6af45de585eea3b6ab655 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/escape.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c0adfad11c8a55a6d2231240f4a7448b85ebe53 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/fragments_join.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2974e5174309b9836251dd535b9bc60bc5bd1a2 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/html_inline.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/image.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a734d1f13861515d8702f4de35b4ba6d7cbf5cd Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/image.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/link.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/link.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5b1e3509bdf30071bd6255b097d88115434d332 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/link.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..600167229929aa210c6ecdb5a8dc33a82932ea61 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/linkify.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d18137c2555777427b4a7454df9342831687df2 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/newline.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e408c8bbfe3263208c7158c3b4199917aa15903b Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/state_inline.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23364a6ad285f141aabaa3a017978606b60195c3 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/strikethrough.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/text.cpython-310.pyc b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dafb0a771dde28e464486605f073077692376af3 Binary files /dev/null and b/lib/python3.10/site-packages/markdown_it/rules_inline/__pycache__/text.cpython-310.pyc differ diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/autolink.py b/lib/python3.10/site-packages/markdown_it/rules_inline/autolink.py new file mode 100644 index 0000000000000000000000000000000000000000..295d963f39254e6ddfe9dc36af2bfa5e534c0827 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/autolink.py @@ -0,0 +1,77 @@ +# Process autolinks '' +import re + +from .state_inline import StateInline + +EMAIL_RE = re.compile( + r"^([a-zA-Z0-9.!#$%&\'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$" # noqa: E501 +) +AUTOLINK_RE = re.compile(r"^([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)$") + + +def autolink(state: StateInline, silent: bool) -> bool: + pos = state.pos + + if state.src[pos] != "<": + return False + + start = state.pos + maximum = state.posMax + + while True: + pos += 1 + if pos >= maximum: + return False + + ch = state.src[pos] + + if ch == "<": + return False + if ch == ">": + break + + url = state.src[start + 1 : pos] + + if AUTOLINK_RE.search(url) is not None: + fullUrl = state.md.normalizeLink(url) + if not state.md.validateLink(fullUrl): + return False + + if not silent: + token = state.push("link_open", "a", 1) + token.attrs = {"href": fullUrl} + token.markup = "autolink" + token.info = "auto" + + token = state.push("text", "", 0) + token.content = state.md.normalizeLinkText(url) + + token = state.push("link_close", "a", -1) + token.markup = "autolink" + token.info = "auto" + + state.pos += len(url) + 2 + return True + + if EMAIL_RE.search(url) is not None: + fullUrl = state.md.normalizeLink("mailto:" + url) + if not state.md.validateLink(fullUrl): + return False + + if not silent: + token = state.push("link_open", "a", 1) + token.attrs = {"href": fullUrl} + token.markup = "autolink" + token.info = "auto" + + token = state.push("text", "", 0) + token.content = state.md.normalizeLinkText(url) + + token = state.push("link_close", "a", -1) + token.markup = "autolink" + token.info = "auto" + + state.pos += len(url) + 2 + return True + + return False diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/backticks.py b/lib/python3.10/site-packages/markdown_it/rules_inline/backticks.py new file mode 100644 index 0000000000000000000000000000000000000000..fc60d6b15cdfa7012a05bcf1ccbb06f44d870dfd --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/backticks.py @@ -0,0 +1,72 @@ +# Parse backticks +import re + +from .state_inline import StateInline + +regex = re.compile("^ (.+) $") + + +def backtick(state: StateInline, silent: bool) -> bool: + pos = state.pos + + if state.src[pos] != "`": + return False + + start = pos + pos += 1 + maximum = state.posMax + + # scan marker length + while pos < maximum and (state.src[pos] == "`"): + pos += 1 + + marker = state.src[start:pos] + openerLength = len(marker) + + if state.backticksScanned and state.backticks.get(openerLength, 0) <= start: + if not silent: + state.pending += marker + state.pos += openerLength + return True + + matchStart = matchEnd = pos + + # Nothing found in the cache, scan until the end of the line (or until marker is found) + while True: + try: + matchStart = state.src.index("`", matchEnd) + except ValueError: + break + matchEnd = matchStart + 1 + + # scan marker length + while matchEnd < maximum and (state.src[matchEnd] == "`"): + matchEnd += 1 + + closerLength = matchEnd - matchStart + + if closerLength == openerLength: + # Found matching closer length. + if not silent: + token = state.push("code_inline", "code", 0) + token.markup = marker + token.content = state.src[pos:matchStart].replace("\n", " ") + if ( + token.content.startswith(" ") + and token.content.endswith(" ") + and len(token.content.strip()) > 0 + ): + token.content = token.content[1:-1] + state.pos = matchEnd + return True + + # Some different length found, put it in cache as upper limit of where closer can be found + state.backticks[closerLength] = matchStart + + # Scanned through the end, didn't find anything + state.backticksScanned = True + + if not silent: + state.pending += marker + state.pos += openerLength + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/balance_pairs.py b/lib/python3.10/site-packages/markdown_it/rules_inline/balance_pairs.py new file mode 100644 index 0000000000000000000000000000000000000000..bbb2101c7e1614dde2323d3a8a42b388f354789e --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/balance_pairs.py @@ -0,0 +1,137 @@ +"""Balance paired characters (*, _, etc) in inline tokens.""" +from __future__ import annotations + +from .state_inline import Delimiter, StateInline + + +def processDelimiters(state: StateInline, delimiters: list[Delimiter]) -> None: + """For each opening emphasis-like marker find a matching closing one.""" + if not delimiters: + return + + openersBottom = {} + maximum = len(delimiters) + + # headerIdx is the first delimiter of the current (where closer is) delimiter run + headerIdx = 0 + lastTokenIdx = -2 # needs any value lower than -1 + jumps: list[int] = [] + closerIdx = 0 + while closerIdx < maximum: + closer = delimiters[closerIdx] + + jumps.append(0) + + # markers belong to same delimiter run if: + # - they have adjacent tokens + # - AND markers are the same + # + if ( + delimiters[headerIdx].marker != closer.marker + or lastTokenIdx != closer.token - 1 + ): + headerIdx = closerIdx + lastTokenIdx = closer.token + + # Length is only used for emphasis-specific "rule of 3", + # if it's not defined (in strikethrough or 3rd party plugins), + # we can default it to 0 to disable those checks. + # + closer.length = closer.length or 0 + + if not closer.close: + closerIdx += 1 + continue + + # Previously calculated lower bounds (previous fails) + # for each marker, each delimiter length modulo 3, + # and for whether this closer can be an opener; + # https://github.com/commonmark/cmark/commit/34250e12ccebdc6372b8b49c44fab57c72443460 + if closer.marker not in openersBottom: + openersBottom[closer.marker] = [-1, -1, -1, -1, -1, -1] + + minOpenerIdx = openersBottom[closer.marker][ + (3 if closer.open else 0) + (closer.length % 3) + ] + + openerIdx = headerIdx - jumps[headerIdx] - 1 + + newMinOpenerIdx = openerIdx + + while openerIdx > minOpenerIdx: + opener = delimiters[openerIdx] + + if opener.marker != closer.marker: + openerIdx -= jumps[openerIdx] + 1 + continue + + if opener.open and opener.end < 0: + isOddMatch = False + + # from spec: + # + # If one of the delimiters can both open and close emphasis, then the + # sum of the lengths of the delimiter runs containing the opening and + # closing delimiters must not be a multiple of 3 unless both lengths + # are multiples of 3. + # + if ( + (opener.close or closer.open) + and ((opener.length + closer.length) % 3 == 0) + and (opener.length % 3 != 0 or closer.length % 3 != 0) + ): + isOddMatch = True + + if not isOddMatch: + # If previous delimiter cannot be an opener, we can safely skip + # the entire sequence in future checks. This is required to make + # sure algorithm has linear complexity (see *_*_*_*_*_... case). + # + if openerIdx > 0 and not delimiters[openerIdx - 1].open: + lastJump = jumps[openerIdx - 1] + 1 + else: + lastJump = 0 + + jumps[closerIdx] = closerIdx - openerIdx + lastJump + jumps[openerIdx] = lastJump + + closer.open = False + opener.end = closerIdx + opener.close = False + newMinOpenerIdx = -1 + + # treat next token as start of run, + # it optimizes skips in **<...>**a**<...>** pathological case + lastTokenIdx = -2 + + break + + openerIdx -= jumps[openerIdx] + 1 + + if newMinOpenerIdx != -1: + # If match for this delimiter run failed, we want to set lower bound for + # future lookups. This is required to make sure algorithm has linear + # complexity. + # + # See details here: + # https:#github.com/commonmark/cmark/issues/178#issuecomment-270417442 + # + openersBottom[closer.marker][ + (3 if closer.open else 0) + ((closer.length or 0) % 3) + ] = newMinOpenerIdx + + closerIdx += 1 + + +def link_pairs(state: StateInline) -> None: + tokens_meta = state.tokens_meta + maximum = len(state.tokens_meta) + + processDelimiters(state, state.delimiters) + + curr = 0 + while curr < maximum: + curr_meta = tokens_meta[curr] + if curr_meta and "delimiters" in curr_meta: + processDelimiters(state, curr_meta["delimiters"]) + curr += 1 diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/emphasis.py b/lib/python3.10/site-packages/markdown_it/rules_inline/emphasis.py new file mode 100644 index 0000000000000000000000000000000000000000..9a98f9e216c94db0217e986270aaaa72fcc99f7f --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/emphasis.py @@ -0,0 +1,102 @@ +# Process *this* and _that_ +# +from __future__ import annotations + +from .state_inline import Delimiter, StateInline + + +def tokenize(state: StateInline, silent: bool) -> bool: + """Insert each marker as a separate text token, and add it to delimiter list""" + start = state.pos + marker = state.src[start] + + if silent: + return False + + if marker not in ("_", "*"): + return False + + scanned = state.scanDelims(state.pos, marker == "*") + + for _ in range(scanned.length): + token = state.push("text", "", 0) + token.content = marker + state.delimiters.append( + Delimiter( + marker=ord(marker), + length=scanned.length, + token=len(state.tokens) - 1, + end=-1, + open=scanned.can_open, + close=scanned.can_close, + ) + ) + + state.pos += scanned.length + + return True + + +def _postProcess(state: StateInline, delimiters: list[Delimiter]) -> None: + i = len(delimiters) - 1 + while i >= 0: + startDelim = delimiters[i] + + # /* _ */ /* * */ + if startDelim.marker != 0x5F and startDelim.marker != 0x2A: + i -= 1 + continue + + # Process only opening markers + if startDelim.end == -1: + i -= 1 + continue + + endDelim = delimiters[startDelim.end] + + # If the previous delimiter has the same marker and is adjacent to this one, + # merge those into one strong delimiter. + # + # `whatever` -> `whatever` + # + isStrong = ( + i > 0 + and delimiters[i - 1].end == startDelim.end + 1 + # check that first two markers match and adjacent + and delimiters[i - 1].marker == startDelim.marker + and delimiters[i - 1].token == startDelim.token - 1 + # check that last two markers are adjacent (we can safely assume they match) + and delimiters[startDelim.end + 1].token == endDelim.token + 1 + ) + + ch = chr(startDelim.marker) + + token = state.tokens[startDelim.token] + token.type = "strong_open" if isStrong else "em_open" + token.tag = "strong" if isStrong else "em" + token.nesting = 1 + token.markup = ch + ch if isStrong else ch + token.content = "" + + token = state.tokens[endDelim.token] + token.type = "strong_close" if isStrong else "em_close" + token.tag = "strong" if isStrong else "em" + token.nesting = -1 + token.markup = ch + ch if isStrong else ch + token.content = "" + + if isStrong: + state.tokens[delimiters[i - 1].token].content = "" + state.tokens[delimiters[startDelim.end + 1].token].content = "" + i -= 1 + + i -= 1 + + +def postProcess(state: StateInline) -> None: + """Walk through delimiter list and replace text tokens with tags.""" + _postProcess(state, state.delimiters) + + for token in state.tokens_meta: + if token and "delimiters" in token: + _postProcess(state, token["delimiters"]) diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/entity.py b/lib/python3.10/site-packages/markdown_it/rules_inline/entity.py new file mode 100644 index 0000000000000000000000000000000000000000..ec9d39650e5bc533e694d3d6699677068d22c69f --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/entity.py @@ -0,0 +1,53 @@ +# Process html entity - {, ¯, ", ... +import re + +from ..common.entities import entities +from ..common.utils import fromCodePoint, isValidEntityCode +from .state_inline import StateInline + +DIGITAL_RE = re.compile(r"^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));", re.IGNORECASE) +NAMED_RE = re.compile(r"^&([a-z][a-z0-9]{1,31});", re.IGNORECASE) + + +def entity(state: StateInline, silent: bool) -> bool: + pos = state.pos + maximum = state.posMax + + if state.src[pos] != "&": + return False + + if pos + 1 >= maximum: + return False + + if state.src[pos + 1] == "#": + if match := DIGITAL_RE.search(state.src[pos:]): + if not silent: + match1 = match.group(1) + code = ( + int(match1[1:], 16) if match1[0].lower() == "x" else int(match1, 10) + ) + + token = state.push("text_special", "", 0) + token.content = ( + fromCodePoint(code) + if isValidEntityCode(code) + else fromCodePoint(0xFFFD) + ) + token.markup = match.group(0) + token.info = "entity" + + state.pos += len(match.group(0)) + return True + + else: + if (match := NAMED_RE.search(state.src[pos:])) and match.group(1) in entities: + if not silent: + token = state.push("text_special", "", 0) + token.content = entities[match.group(1)] + token.markup = match.group(0) + token.info = "entity" + + state.pos += len(match.group(0)) + return True + + return False diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/escape.py b/lib/python3.10/site-packages/markdown_it/rules_inline/escape.py new file mode 100644 index 0000000000000000000000000000000000000000..9f68b5dfad17fdcfe164ab4a8ef0761051b46efb --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/escape.py @@ -0,0 +1,92 @@ +""" +Process escaped chars and hardbreaks +""" +from ..common.utils import isStrSpace +from .state_inline import StateInline + + +def escape(state: StateInline, silent: bool) -> bool: + """Process escaped chars and hardbreaks.""" + pos = state.pos + maximum = state.posMax + + if state.src[pos] != "\\": + return False + + pos += 1 + + # '\' at the end of the inline block + if pos >= maximum: + return False + + ch1 = state.src[pos] + ch1_ord = ord(ch1) + if ch1 == "\n": + if not silent: + state.push("hardbreak", "br", 0) + pos += 1 + # skip leading whitespaces from next line + while pos < maximum: + ch = state.src[pos] + if not isStrSpace(ch): + break + pos += 1 + + state.pos = pos + return True + + escapedStr = state.src[pos] + + if ch1_ord >= 0xD800 and ch1_ord <= 0xDBFF and pos + 1 < maximum: + ch2 = state.src[pos + 1] + ch2_ord = ord(ch2) + if ch2_ord >= 0xDC00 and ch2_ord <= 0xDFFF: + escapedStr += ch2 + pos += 1 + + origStr = "\\" + escapedStr + + if not silent: + token = state.push("text_special", "", 0) + token.content = escapedStr if ch1 in _ESCAPED else origStr + token.markup = origStr + token.info = "escape" + + state.pos = pos + 1 + return True + + +_ESCAPED = { + "!", + '"', + "#", + "$", + "%", + "&", + "'", + "(", + ")", + "*", + "+", + ",", + "-", + ".", + "/", + ":", + ";", + "<", + "=", + ">", + "?", + "@", + "[", + "\\", + "]", + "^", + "_", + "`", + "{", + "|", + "}", + "~", +} diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/fragments_join.py b/lib/python3.10/site-packages/markdown_it/rules_inline/fragments_join.py new file mode 100644 index 0000000000000000000000000000000000000000..f795c1364b8ac098b7a17f34cd31d7070280cf36 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/fragments_join.py @@ -0,0 +1,43 @@ +from .state_inline import StateInline + + +def fragments_join(state: StateInline) -> None: + """ + Clean up tokens after emphasis and strikethrough postprocessing: + merge adjacent text nodes into one and re-calculate all token levels + + This is necessary because initially emphasis delimiter markers (``*, _, ~``) + are treated as their own separate text tokens. Then emphasis rule either + leaves them as text (needed to merge with adjacent text) or turns them + into opening/closing tags (which messes up levels inside). + """ + level = 0 + maximum = len(state.tokens) + + curr = last = 0 + while curr < maximum: + # re-calculate levels after emphasis/strikethrough turns some text nodes + # into opening/closing tags + if state.tokens[curr].nesting < 0: + level -= 1 # closing tag + state.tokens[curr].level = level + if state.tokens[curr].nesting > 0: + level += 1 # opening tag + + if ( + state.tokens[curr].type == "text" + and curr + 1 < maximum + and state.tokens[curr + 1].type == "text" + ): + # collapse two adjacent text nodes + state.tokens[curr + 1].content = ( + state.tokens[curr].content + state.tokens[curr + 1].content + ) + else: + if curr != last: + state.tokens[last] = state.tokens[curr] + last += 1 + curr += 1 + + if curr != last: + del state.tokens[last:] diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/html_inline.py b/lib/python3.10/site-packages/markdown_it/rules_inline/html_inline.py new file mode 100644 index 0000000000000000000000000000000000000000..9065e1d034da76270f7d3f1ba528132c8d57d341 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/html_inline.py @@ -0,0 +1,43 @@ +# Process html tags +from ..common.html_re import HTML_TAG_RE +from ..common.utils import isLinkClose, isLinkOpen +from .state_inline import StateInline + + +def isLetter(ch: int) -> bool: + lc = ch | 0x20 # to lower case + # /* a */ and /* z */ + return (lc >= 0x61) and (lc <= 0x7A) + + +def html_inline(state: StateInline, silent: bool) -> bool: + pos = state.pos + + if not state.md.options.get("html", None): + return False + + # Check start + maximum = state.posMax + if state.src[pos] != "<" or pos + 2 >= maximum: + return False + + # Quick fail on second char + ch = state.src[pos + 1] + if ch not in ("!", "?", "/") and not isLetter(ord(ch)): # /* / */ + return False + + match = HTML_TAG_RE.search(state.src[pos:]) + if not match: + return False + + if not silent: + token = state.push("html_inline", "", 0) + token.content = state.src[pos : pos + len(match.group(0))] + + if isLinkOpen(token.content): + state.linkLevel += 1 + if isLinkClose(token.content): + state.linkLevel -= 1 + + state.pos += len(match.group(0)) + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/image.py b/lib/python3.10/site-packages/markdown_it/rules_inline/image.py new file mode 100644 index 0000000000000000000000000000000000000000..b4a32a9f5880f3977c81f6b94d18751f93fa8561 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/image.py @@ -0,0 +1,148 @@ +# Process ![image]( "title") +from __future__ import annotations + +from ..common.utils import isStrSpace, normalizeReference +from ..token import Token +from .state_inline import StateInline + + +def image(state: StateInline, silent: bool) -> bool: + label = None + href = "" + oldPos = state.pos + max = state.posMax + + if state.src[state.pos] != "!": + return False + + if state.pos + 1 < state.posMax and state.src[state.pos + 1] != "[": + return False + + labelStart = state.pos + 2 + labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, False) + + # parser failed to find ']', so it's not a valid link + if labelEnd < 0: + return False + + pos = labelEnd + 1 + + if pos < max and state.src[pos] == "(": + # + # Inline link + # + + # [link]( "title" ) + # ^^ skipping these spaces + pos += 1 + while pos < max: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + if pos >= max: + return False + + # [link]( "title" ) + # ^^^^^^ parsing link destination + start = pos + res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax) + if res.ok: + href = state.md.normalizeLink(res.str) + if state.md.validateLink(href): + pos = res.pos + else: + href = "" + + # [link]( "title" ) + # ^^ skipping these spaces + start = pos + while pos < max: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + # [link]( "title" ) + # ^^^^^^^ parsing link title + res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax) + if pos < max and start != pos and res.ok: + title = res.str + pos = res.pos + + # [link]( "title" ) + # ^^ skipping these spaces + while pos < max: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + else: + title = "" + + if pos >= max or state.src[pos] != ")": + state.pos = oldPos + return False + + pos += 1 + + else: + # + # Link reference + # + if "references" not in state.env: + return False + + # /* [ */ + if pos < max and state.src[pos] == "[": + start = pos + 1 + pos = state.md.helpers.parseLinkLabel(state, pos) + if pos >= 0: + label = state.src[start:pos] + pos += 1 + else: + pos = labelEnd + 1 + else: + pos = labelEnd + 1 + + # covers label == '' and label == undefined + # (collapsed reference link and shortcut reference link respectively) + if not label: + label = state.src[labelStart:labelEnd] + + label = normalizeReference(label) + + ref = state.env["references"].get(label, None) + if not ref: + state.pos = oldPos + return False + + href = ref["href"] + title = ref["title"] + + # + # We found the end of the link, and know for a fact it's a valid link + # so all that's left to do is to call tokenizer. + # + if not silent: + content = state.src[labelStart:labelEnd] + + tokens: list[Token] = [] + state.md.inline.parse(content, state.md, state.env, tokens) + + token = state.push("image", "img", 0) + token.attrs = {"src": href, "alt": ""} + token.children = tokens or None + token.content = content + + if title: + token.attrSet("title", title) + + # note, this is not part of markdown-it JS, but is useful for renderers + if label and state.md.options.get("store_labels", False): + token.meta["label"] = label + + state.pos = pos + state.posMax = max + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/link.py b/lib/python3.10/site-packages/markdown_it/rules_inline/link.py new file mode 100644 index 0000000000000000000000000000000000000000..78cf9122f3b4ad740769cb7edd3e4cc49af46e00 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/link.py @@ -0,0 +1,151 @@ +# Process [link]( "stuff") + +from ..common.utils import isStrSpace, normalizeReference +from .state_inline import StateInline + + +def link(state: StateInline, silent: bool) -> bool: + href = "" + title = "" + label = None + oldPos = state.pos + maximum = state.posMax + start = state.pos + parseReference = True + + if state.src[state.pos] != "[": + return False + + labelStart = state.pos + 1 + labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, True) + + # parser failed to find ']', so it's not a valid link + if labelEnd < 0: + return False + + pos = labelEnd + 1 + + if pos < maximum and state.src[pos] == "(": + # + # Inline link + # + + # might have found a valid shortcut link, disable reference parsing + parseReference = False + + # [link]( "title" ) + # ^^ skipping these spaces + pos += 1 + while pos < maximum: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + if pos >= maximum: + return False + + # [link]( "title" ) + # ^^^^^^ parsing link destination + start = pos + res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax) + if res.ok: + href = state.md.normalizeLink(res.str) + if state.md.validateLink(href): + pos = res.pos + else: + href = "" + + # [link]( "title" ) + # ^^ skipping these spaces + start = pos + while pos < maximum: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + # [link]( "title" ) + # ^^^^^^^ parsing link title + res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax) + if pos < maximum and start != pos and res.ok: + title = res.str + pos = res.pos + + # [link]( "title" ) + # ^^ skipping these spaces + while pos < maximum: + ch = state.src[pos] + if not isStrSpace(ch) and ch != "\n": + break + pos += 1 + + if pos >= maximum or state.src[pos] != ")": + # parsing a valid shortcut link failed, fallback to reference + parseReference = True + + pos += 1 + + if parseReference: + # + # Link reference + # + if "references" not in state.env: + return False + + if pos < maximum and state.src[pos] == "[": + start = pos + 1 + pos = state.md.helpers.parseLinkLabel(state, pos) + if pos >= 0: + label = state.src[start:pos] + pos += 1 + else: + pos = labelEnd + 1 + + else: + pos = labelEnd + 1 + + # covers label == '' and label == undefined + # (collapsed reference link and shortcut reference link respectively) + if not label: + label = state.src[labelStart:labelEnd] + + label = normalizeReference(label) + + ref = ( + state.env["references"][label] if label in state.env["references"] else None + ) + if not ref: + state.pos = oldPos + return False + + href = ref["href"] + title = ref["title"] + + # + # We found the end of the link, and know for a fact it's a valid link + # so all that's left to do is to call tokenizer. + # + if not silent: + state.pos = labelStart + state.posMax = labelEnd + + token = state.push("link_open", "a", 1) + token.attrs = {"href": href} + + if title: + token.attrSet("title", title) + + # note, this is not part of markdown-it JS, but is useful for renderers + if label and state.md.options.get("store_labels", False): + token.meta["label"] = label + + state.linkLevel += 1 + state.md.inline.tokenize(state) + state.linkLevel -= 1 + + token = state.push("link_close", "a", -1) + + state.pos = pos + state.posMax = maximum + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/linkify.py b/lib/python3.10/site-packages/markdown_it/rules_inline/linkify.py new file mode 100644 index 0000000000000000000000000000000000000000..a8a181537d78867f8dfdf552b70f83be09bb2dd5 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/linkify.py @@ -0,0 +1,61 @@ +"""Process links like https://example.org/""" +import re + +from .state_inline import StateInline + +# RFC3986: scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +SCHEME_RE = re.compile(r"(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$", re.IGNORECASE) + + +def linkify(state: StateInline, silent: bool) -> bool: + """Rule for identifying plain-text links.""" + if not state.md.options.linkify: + return False + if state.linkLevel > 0: + return False + if not state.md.linkify: + raise ModuleNotFoundError("Linkify enabled but not installed.") + + pos = state.pos + maximum = state.posMax + + if ( + (pos + 3) > maximum + or state.src[pos] != ":" + or state.src[pos + 1] != "/" + or state.src[pos + 2] != "/" + ): + return False + + if not (match := SCHEME_RE.match(state.pending)): + return False + + proto = match.group(1) + if not (link := state.md.linkify.match_at_start(state.src[pos - len(proto) :])): + return False + url: str = link.url + + # disallow '*' at the end of the link (conflicts with emphasis) + url = url.rstrip("*") + + full_url = state.md.normalizeLink(url) + if not state.md.validateLink(full_url): + return False + + if not silent: + state.pending = state.pending[: -len(proto)] + + token = state.push("link_open", "a", 1) + token.attrs = {"href": full_url} + token.markup = "linkify" + token.info = "auto" + + token = state.push("text", "", 0) + token.content = state.md.normalizeLinkText(url) + + token = state.push("link_close", "a", -1) + token.markup = "linkify" + token.info = "auto" + + state.pos += len(url) - len(proto) + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/newline.py b/lib/python3.10/site-packages/markdown_it/rules_inline/newline.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8f1db02da07b023aa9fdb08ee7af326f773da8 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/newline.py @@ -0,0 +1,43 @@ +"""Proceess '\n'.""" +from ..common.utils import charStrAt, isStrSpace +from .state_inline import StateInline + + +def newline(state: StateInline, silent: bool) -> bool: + pos = state.pos + + if state.src[pos] != "\n": + return False + + pmax = len(state.pending) - 1 + maximum = state.posMax + + # ' \n' -> hardbreak + # Lookup in pending chars is bad practice! Don't copy to other rules! + # Pending string is stored in concat mode, indexed lookups will cause + # conversion to flat mode. + if not silent: + if pmax >= 0 and charStrAt(state.pending, pmax) == " ": + if pmax >= 1 and charStrAt(state.pending, pmax - 1) == " ": + # Find whitespaces tail of pending chars. + ws = pmax - 1 + while ws >= 1 and charStrAt(state.pending, ws - 1) == " ": + ws -= 1 + state.pending = state.pending[:ws] + + state.push("hardbreak", "br", 0) + else: + state.pending = state.pending[:-1] + state.push("softbreak", "br", 0) + + else: + state.push("softbreak", "br", 0) + + pos += 1 + + # skip heading spaces for next line + while pos < maximum and isStrSpace(state.src[pos]): + pos += 1 + + state.pos = pos + return True diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/state_inline.py b/lib/python3.10/site-packages/markdown_it/rules_inline/state_inline.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c491c4b7c9ae4117d60f447fdbf3c742f66f48 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/state_inline.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +from collections import namedtuple +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Literal + +from .._compat import DATACLASS_KWARGS +from ..common.utils import isMdAsciiPunct, isPunctChar, isWhiteSpace +from ..ruler import StateBase +from ..token import Token +from ..utils import EnvType + +if TYPE_CHECKING: + from markdown_it import MarkdownIt + + +@dataclass(**DATACLASS_KWARGS) +class Delimiter: + # Char code of the starting marker (number). + marker: int + + # Total length of these series of delimiters. + length: int + + # A position of the token this delimiter corresponds to. + token: int + + # If this delimiter is matched as a valid opener, `end` will be + # equal to its position, otherwise it's `-1`. + end: int + + # Boolean flags that determine if this delimiter could open or close + # an emphasis. + open: bool + close: bool + + level: bool | None = None + + +Scanned = namedtuple("Scanned", ["can_open", "can_close", "length"]) + + +class StateInline(StateBase): + def __init__( + self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token] + ) -> None: + self.src = src + self.env = env + self.md = md + self.tokens = outTokens + self.tokens_meta: list[dict[str, Any] | None] = [None] * len(outTokens) + + self.pos = 0 + self.posMax = len(self.src) + self.level = 0 + self.pending = "" + self.pendingLevel = 0 + + # Stores { start: end } pairs. Useful for backtrack + # optimization of pairs parse (emphasis, strikes). + self.cache: dict[int, int] = {} + + # List of emphasis-like delimiters for current tag + self.delimiters: list[Delimiter] = [] + + # Stack of delimiter lists for upper level tags + self._prev_delimiters: list[list[Delimiter]] = [] + + # backticklength => last seen position + self.backticks: dict[int, int] = {} + self.backticksScanned = False + + # Counter used to disable inline linkify-it execution + # inside and markdown links + self.linkLevel = 0 + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}" + f"(pos=[{self.pos} of {self.posMax}], token={len(self.tokens)})" + ) + + def pushPending(self) -> Token: + token = Token("text", "", 0) + token.content = self.pending + token.level = self.pendingLevel + self.tokens.append(token) + self.pending = "" + return token + + def push(self, ttype: str, tag: str, nesting: Literal[-1, 0, 1]) -> Token: + """Push new token to "stream". + If pending text exists - flush it as text token + """ + if self.pending: + self.pushPending() + + token = Token(ttype, tag, nesting) + token_meta = None + + if nesting < 0: + # closing tag + self.level -= 1 + self.delimiters = self._prev_delimiters.pop() + + token.level = self.level + + if nesting > 0: + # opening tag + self.level += 1 + self._prev_delimiters.append(self.delimiters) + self.delimiters = [] + token_meta = {"delimiters": self.delimiters} + + self.pendingLevel = self.level + self.tokens.append(token) + self.tokens_meta.append(token_meta) + return token + + def scanDelims(self, start: int, canSplitWord: bool) -> Scanned: + """ + Scan a sequence of emphasis-like markers, and determine whether + it can start an emphasis sequence or end an emphasis sequence. + + - start - position to scan from (it should point at a valid marker); + - canSplitWord - determine if these markers can be found inside a word + + """ + pos = start + maximum = self.posMax + marker = self.src[start] + + # treat beginning of the line as a whitespace + lastChar = self.src[start - 1] if start > 0 else " " + + while pos < maximum and self.src[pos] == marker: + pos += 1 + + count = pos - start + + # treat end of the line as a whitespace + nextChar = self.src[pos] if pos < maximum else " " + + isLastPunctChar = isMdAsciiPunct(ord(lastChar)) or isPunctChar(lastChar) + isNextPunctChar = isMdAsciiPunct(ord(nextChar)) or isPunctChar(nextChar) + + isLastWhiteSpace = isWhiteSpace(ord(lastChar)) + isNextWhiteSpace = isWhiteSpace(ord(nextChar)) + + left_flanking = not ( + isNextWhiteSpace + or (isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar)) + ) + right_flanking = not ( + isLastWhiteSpace + or (isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar)) + ) + + if not canSplitWord: + can_open = left_flanking and ((not right_flanking) or isLastPunctChar) + can_close = right_flanking and ((not left_flanking) or isNextPunctChar) + else: + can_open = left_flanking + can_close = right_flanking + + return Scanned(can_open, can_close, count) diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/strikethrough.py b/lib/python3.10/site-packages/markdown_it/rules_inline/strikethrough.py new file mode 100644 index 0000000000000000000000000000000000000000..ec816281d49b23d0774bf91db6600d996aaf8b06 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/strikethrough.py @@ -0,0 +1,127 @@ +# ~~strike through~~ +from __future__ import annotations + +from .state_inline import Delimiter, StateInline + + +def tokenize(state: StateInline, silent: bool) -> bool: + """Insert each marker as a separate text token, and add it to delimiter list""" + start = state.pos + ch = state.src[start] + + if silent: + return False + + if ch != "~": + return False + + scanned = state.scanDelims(state.pos, True) + length = scanned.length + + if length < 2: + return False + + if length % 2: + token = state.push("text", "", 0) + token.content = ch + length -= 1 + + i = 0 + while i < length: + token = state.push("text", "", 0) + token.content = ch + ch + state.delimiters.append( + Delimiter( + marker=ord(ch), + length=0, # disable "rule of 3" length checks meant for emphasis + token=len(state.tokens) - 1, + end=-1, + open=scanned.can_open, + close=scanned.can_close, + ) + ) + + i += 2 + + state.pos += scanned.length + + return True + + +def _postProcess(state: StateInline, delimiters: list[Delimiter]) -> None: + loneMarkers = [] + maximum = len(delimiters) + + i = 0 + while i < maximum: + startDelim = delimiters[i] + + if startDelim.marker != 0x7E: # /* ~ */ + i += 1 + continue + + if startDelim.end == -1: + i += 1 + continue + + endDelim = delimiters[startDelim.end] + + token = state.tokens[startDelim.token] + token.type = "s_open" + token.tag = "s" + token.nesting = 1 + token.markup = "~~" + token.content = "" + + token = state.tokens[endDelim.token] + token.type = "s_close" + token.tag = "s" + token.nesting = -1 + token.markup = "~~" + token.content = "" + + if ( + state.tokens[endDelim.token - 1].type == "text" + and state.tokens[endDelim.token - 1].content == "~" + ): + loneMarkers.append(endDelim.token - 1) + + i += 1 + + # If a marker sequence has an odd number of characters, it's split + # like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the + # start of the sequence. + # + # So, we have to move all those markers after subsequent s_close tags. + # + while loneMarkers: + i = loneMarkers.pop() + j = i + 1 + + while (j < len(state.tokens)) and (state.tokens[j].type == "s_close"): + j += 1 + + j -= 1 + + if i != j: + token = state.tokens[j] + state.tokens[j] = state.tokens[i] + state.tokens[i] = token + + +def postProcess(state: StateInline) -> None: + """Walk through delimiter list and replace text tokens with tags.""" + tokens_meta = state.tokens_meta + maximum = len(state.tokens_meta) + _postProcess(state, state.delimiters) + + curr = 0 + while curr < maximum: + try: + curr_meta = tokens_meta[curr] + except IndexError: + pass + else: + if curr_meta and "delimiters" in curr_meta: + _postProcess(state, curr_meta["delimiters"]) + curr += 1 diff --git a/lib/python3.10/site-packages/markdown_it/rules_inline/text.py b/lib/python3.10/site-packages/markdown_it/rules_inline/text.py new file mode 100644 index 0000000000000000000000000000000000000000..f306b2e4cecde67aa3d363d3844e8b40a70a48b1 --- /dev/null +++ b/lib/python3.10/site-packages/markdown_it/rules_inline/text.py @@ -0,0 +1,53 @@ +# Skip text characters for text token, place those to pending buffer +# and increment current pos +from .state_inline import StateInline + +# Rule to skip pure text +# '{}$%@~+=:' reserved for extensions + +# !!!! Don't confuse with "Markdown ASCII Punctuation" chars +# http://spec.commonmark.org/0.15/#ascii-punctuation-character + + +_TerminatorChars = { + "\n", + "!", + "#", + "$", + "%", + "&", + "*", + "+", + "-", + ":", + "<", + "=", + ">", + "@", + "[", + "\\", + "]", + "^", + "_", + "`", + "{", + "}", + "~", +} + + +def text(state: StateInline, silent: bool) -> bool: + pos = state.pos + posMax = state.posMax + while (pos < posMax) and state.src[pos] not in _TerminatorChars: + pos += 1 + + if pos == state.pos: + return False + + if not silent: + state.pending += state.src[state.pos : pos] + + state.pos = pos + + return True diff --git a/lib/python3.10/site-packages/matplotlib/_c_internal_utils.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/matplotlib/_c_internal_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..03718036c23f393f29019779998c8d162ac44c33 --- /dev/null +++ b/lib/python3.10/site-packages/matplotlib/_c_internal_utils.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:643c208f3cb14042e0b6f3d82aa31fb094fc56f61beecee0aab0001cc44580ab +size 251392 diff --git a/lib/python3.10/site-packages/matplotlib/_path.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/matplotlib/_path.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..eed83acff1dc8adbe29d1e751cb5e22c6f280e38 --- /dev/null +++ b/lib/python3.10/site-packages/matplotlib/_path.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a28a124af2791c5ebdeb7360abcb1d9bc1a701a63e042b8d215e68ff25ae555 +size 486904 diff --git a/lib/python3.10/site-packages/matplotlib/_qhull.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/matplotlib/_qhull.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e06676aa9abdd332814e39bc3bed18e6d9df44fb --- /dev/null +++ b/lib/python3.10/site-packages/matplotlib/_qhull.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f112fb6986667e038b368cfabb7ceee479b43c5cfdec025e22db3ab10caa7f15 +size 745280 diff --git a/lib/python3.10/site-packages/matplotlib/_tri.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/matplotlib/_tri.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2022275c4dfbe74929ff6e463ce4d7682725ccb2 --- /dev/null +++ b/lib/python3.10/site-packages/matplotlib/_tri.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ec117a7bee59ea2cf47311cc05170252bbdc1b64851ee15fd80da777fc3550a +size 411008 diff --git a/lib/python3.10/site-packages/matplotlib/ft2font.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/matplotlib/ft2font.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3eace9ad0d8bdfd95e160d524055fdc76a299d09 --- /dev/null +++ b/lib/python3.10/site-packages/matplotlib/ft2font.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cae6451c7ad01ba1ff966f6cc68bb278b963017c1df0c7786eae8aed81064e0b +size 1423392 diff --git a/lib/python3.10/site-packages/mkl/_py_mkl_service.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/mkl/_py_mkl_service.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dd5797668b3170db49ce034e79f8b24394390e72 --- /dev/null +++ b/lib/python3.10/site-packages/mkl/_py_mkl_service.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6f7b8eab50ea6529085a514f6d504c7f785e9adf5e6b73f036063ded9c1a223 +size 687024 diff --git a/lib/python3.10/site-packages/mkl_fft/_pydfti.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/mkl_fft/_pydfti.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1b4afe01d92f4784727722df93718a46ec1f20c7 --- /dev/null +++ b/lib/python3.10/site-packages/mkl_fft/_pydfti.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0c4b3e44c954fc897bb85c6ecc89c4b07dbf5bc53d85c6ab3c7390395e0ad20 +size 2291640 diff --git a/lib/python3.10/site-packages/mkl_random/mklrand.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/mkl_random/mklrand.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ed0b8cc2f8f6ce397af134b006e8e51c28dec447 --- /dev/null +++ b/lib/python3.10/site-packages/mkl_random/mklrand.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19dcb258593c02a876e226e07ba925138c5af8e166b1d547d67c1d2f789a9cc9 +size 4375752 diff --git a/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 b/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..c67fba89d7868adf8cda19d759510b7b4a34c6ab --- /dev/null +++ b/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a7eaece7231911be2fbb1b3e7cb4cb5c2e0f84e0c2bb73739472b5a1feec8c1 +size 168193 diff --git a/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 b/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..32aeee9a92e063a830909443ffa10a61f8eb4896 --- /dev/null +++ b/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6090048eccc763522c12ef016f81da6b627cb3a044f55cf0479a839c41c0980 +size 2833617 diff --git a/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 b/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..47f28619b3aab72df5480d0140d9f43ea3859d30 --- /dev/null +++ b/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14afb3129b1a8b50bc40a3b0820c7f1152ea9bc10121aab152943f7057472886 +size 2686065 diff --git a/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99-934c22de.so.0.0.0 b/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99-934c22de.so.0.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..4e1eb5101254f9fcebd5d9b3fec1f6413b0f931c --- /dev/null +++ b/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99-934c22de.so.0.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ed5137f412781ad7863439fb543613f620b43c32b63292a0029246162f5bbc6 +size 250985 diff --git a/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 b/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..c4b4570cb08e531daffd8e9ca6cf501eb500c067 --- /dev/null +++ b/lib/python3.10/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:934c22ded0e7d169c4d4678876c96051adf3d94545da962f60b41659b075da3b +size 247609 diff --git a/lib/python3.10/site-packages/scipy.libs/libscipy_openblas-68440149.so b/lib/python3.10/site-packages/scipy.libs/libscipy_openblas-68440149.so new file mode 100644 index 0000000000000000000000000000000000000000..949095f115ca7dd9bd6a6947284062392f130904 --- /dev/null +++ b/lib/python3.10/site-packages/scipy.libs/libscipy_openblas-68440149.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e739a2435af49d2d938749f316aa338c65f41c9ae6615406e7cf83feb662f46 +size 22211841 diff --git a/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1a9fd108fa9935a846f2d6b658bf2e06acb6a680 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/_lib/_ccallback_c.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67db05b965333dc69af741b19817e261e224c3859baf70e6138280c585ce30e0 +size 100512 diff --git a/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..24e30f5b4e7ba8ffc26c4e648e632c908082b099 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc4c80366bfd88576e37abc8a148c58279be489675700d00695601d3c6fae317 +size 108752 diff --git a/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a3fee39790b0ebfc74d97e2124cf6685a4c2ff60 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/cluster/_hierarchy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f835153a0db10a37518e208087ee32022882ada83228c99cbb25c34864b87d0a +size 376800 diff --git a/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e18d84aeb24399c08ed58a68ab40475381354b42 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/cluster/_optimal_leaf_ordering.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7725dfb1108ff3ac0485ea3bd405877154f1da70a3ac9aa7b34761c74e9d230 +size 317632 diff --git a/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..79eea4e6bb79d5b26237e2c7d4dc4c0ce2bfe4e1 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/cluster/_vq.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d2c3c9c374395ff25c3ccce470225d5ee997441a6b3aa5e966a9a106a52609b +size 122552 diff --git a/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f699b32fea77005449387790abf007d75cbb9962 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d069d0d98fc4ed493bd674fe76ab3ded481e46ae8d8e0a4479185efc2c3a7e67 +size 234728 diff --git a/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0120fbf963f4d92059e2b15c4d20c69a6e98a478 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/integrate/_dop.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:242d526ce4648b1065346d97326308a35c90f69ec54d99ec221196b50f28b7f9 +size 100800 diff --git a/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e50e57a1f4123fa9d1b3d8b4cd288b3d20440a31 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/integrate/_test_odeint_banded.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:052fbb9d2846364987cf92e191114b339f8b2074e6ab79d0548a1b44a44d6de0 +size 516585 diff --git a/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..47417cfc78da093460060ec2fe131a8b52e20c87 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/integrate/_vode.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd8aa38295e3ed43385f79aa51e71a675ea2c3764f2e8ff60d976d3bdac7170d +size 147512 diff --git a/lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8f92d83ca083bc7702e0fbfd57b88d4acda3d8b2 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/interpolate/_bspl.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9366c450df169ca18ae2070c5babb9527c960ef55b852b93044219e3d75c1e3c +size 267496 diff --git a/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..29bcece52581e442266eeb5d47a146e076869bbb --- /dev/null +++ b/lib/python3.10/site-packages/scipy/interpolate/_dfitpack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d78de9577af66eb9f72fe9ce3df2a33e0bcee6eb28458041c9185c3b6223aeda +size 325856 diff --git a/lib/python3.10/site-packages/scipy/interpolate/_interpnd.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/interpolate/_interpnd.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..19f896ddba7cc5456ee5eb34df4001949d3dd307 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/interpolate/_interpnd.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0c9b3f4edf0b6cd4591e24fa3efade0d9d5d3b565fb3d43c39ccea782e75383 +size 399936 diff --git a/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e07d13451be90c1321c2a17034d6a7d54c5cc1bc --- /dev/null +++ b/lib/python3.10/site-packages/scipy/interpolate/_ppoly.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ec3a9c7ddd8492c6ef77faa179007437c3f1ddf074a189a6c905d3bb244b47a +size 419736 diff --git a/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c7406a648695a0ade4055b1d4ea1b527d780df1e --- /dev/null +++ b/lib/python3.10/site-packages/scipy/interpolate/_rbfinterp_pythran.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b2cab6467ab9fdf6b2734d86d901254a8d7ae580b9d0d12c0db18bbad5005bf +size 219960 diff --git a/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..95cbce16ef7c51ded63c723f12d52366b72726b9 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/interpolate/_rgi_cython.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c072f8c3c2650f39c7bfae1db258b05bc8749e64f1c77ed9b8ee942c851a7d59 +size 257360 diff --git a/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0da4c95588daf0fc0cd2234e82f9cd5ea7d09ec3 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7fe04a36b36b1e0a5d36761da511d300f6e19c3effd0e3d774c9d7f75b18ee9 +size 575616 diff --git a/lib/python3.10/site-packages/scipy/linalg/_decomp_interpolative.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_decomp_interpolative.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5f33cc410221680f7d901dd79f705bc62636d018 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_decomp_interpolative.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bd07a193689540c55c60c822f57aea02e4936bbd32989f57248bf102a7d2cef +size 901392 diff --git a/lib/python3.10/site-packages/scipy/linalg/_decomp_lu_cython.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_decomp_lu_cython.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..44428a0c7b17da7596f91850490efabfbd146614 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_decomp_lu_cython.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eee666d8360b302eb0ecf1bce907d9dba5b8d8845fb3ad9a7e0c4478bde3df3 +size 232536 diff --git a/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..82dabf388658b808bce305612dfa72ff3e71add8 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2a13d38ac2578fba8322e0e3d3c9a46f0ef51716440e430402bec2fb7799064 +size 335672 diff --git a/lib/python3.10/site-packages/scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..410a74b2cad1f8054a7a649f54d887580563514f --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84b104dd77854d56a080828718c39eb1967a6e77f8e96c85eadb5aa7354b01d2 +size 621344 diff --git a/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..73a8a73ecdc14a97cdd1f6106bbde377fad5551d --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc3c6299b7a23fcc34baf3817ae194e3d38b223423135ffd89ff972b5d2fffe7 +size 2130016 diff --git a/lib/python3.10/site-packages/scipy/linalg/_linalg_pythran.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_linalg_pythran.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..77cbad566f0db6baa950d6e48d74b26654763f7f --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_linalg_pythran.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ddbbcf214fb16e6f79ccaf9a3b9a47cbd35827fcf445c4c44800cd5a8e0ef6b +size 108048 diff --git a/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e8a0291ed27ddcf91f013870ce702dcc293ebb74 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm_triu.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae1a2eeb629ac97a7df9d9433cfc40efa45f129d8de5ee44204ddd1ce677f34a +size 242720 diff --git a/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..89dcc7ee8be3207599757e39f07d0db58af5e8c8 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abd2530d68702f783d818bc029387f3cb3e0ad845da1d75d4983bb504983e7e5 +size 262280 diff --git a/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..455d34afd1d26c5d52054e671f82c51bc629734b --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0244b3d11ef72f77ac1fdd71825c983d292390ecf7cb90dc26f0c16f0cafc24a +size 299192 diff --git a/lib/python3.10/site-packages/scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a61e54fac9f66ee0e883e37d6e6d5071382fa6e9 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2eca7a709764e692f05948b41854c2776c21ce214575b4f9c277148889f084d4 +size 752016 diff --git a/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8f09472a75fd111c0610cb983be8207a72f195cd --- /dev/null +++ b/lib/python3.10/site-packages/scipy/ndimage/_nd_image.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a1adef53aa39e0b795aa6b570d96a52c1ecbc188cead613896787568a972232 +size 137680 diff --git a/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3c831258674724c4ca8dd19dcc838b2c357b8a57 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/ndimage/_ni_label.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba681f973f4dd7523a84136de5beae96a779760aa2976738875c8b9f862fd3ec +size 398672 diff --git a/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..19cad76d1a55f45174f984d29453954ce6ef8b05 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/odr/__odrpack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b278525eb1212441ba606b9eac8f1c6cd293689c0b6a38a2d46f5e6d6076f10 +size 221432 diff --git a/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6901601d1a046f8a855c81941e699cfdef28bb3b --- /dev/null +++ b/lib/python3.10/site-packages/scipy/optimize/_bglu_dense.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fa46c70c7caf60c2e03bef0b7b09281439b972d860146e226e819e2612fa2df +size 313832 diff --git a/lib/python3.10/site-packages/scipy/optimize/_cython_nnls.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/optimize/_cython_nnls.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..de723cada89b5ded4baac04accdb505e0853becf --- /dev/null +++ b/lib/python3.10/site-packages/scipy/optimize/_cython_nnls.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02427ffbbffed1b3a15b79dcb97cdd46e80f4a53a834fd6531c5632dd73d1270 +size 103568 diff --git a/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d7bce9f064465cdd7432d50de7ea22ff710c9795 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/optimize/_moduleTNC.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:492096f7fedccf52dbce6b6a3b0fe6242494ee7841de76224c2db3d2d4c46b79 +size 142816 diff --git a/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b39c6d7de30a8c7cd9be5c639ff2bc4b094ebaeb --- /dev/null +++ b/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:818a1274771c89b2a5bda945f162d7295fbcca6fdc1bd5d6f52907ccfdd810f6 +size 171208 diff --git a/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ae1a58cf968ce25516fc06e75288d7363c85b25a --- /dev/null +++ b/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30fbeabd154f01ed2bb14f452af1d3ba00ee07fa6435a95315135b3b9275e63a +size 259376 diff --git a/lib/python3.10/site-packages/scipy/signal/_sosfilt.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/signal/_sosfilt.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b96d2fa12621d0985a3cc6d5a3db3a01c42a9a22 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/signal/_sosfilt.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2db413f2f78df4a70d58932f44e47d5b3ea96db9794f6fd5a0fbf1b0def4b15b +size 269312 diff --git a/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..50de6dba86691d361bab34a7c8ca0a84bbcb945b --- /dev/null +++ b/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7a2e4103f0f357c0099a448128a99cc39fb0006f6bbb56dde053c245a7415c7 +size 345344 diff --git a/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5405da392621d45f5f19d58ad6df89fe3476b454 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb4aff188499e9ea24e7243fc1556cb43f533daf1d6147a0668fe68c43af5b7f +size 671264 diff --git a/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..63c0819b36dc2aa64820a7b5086a1c8bc58a3ea4 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7205cde761b3ff7da27efd06a8745ad920aadab1b1f24a4e14360c976dd3e3ef +size 4370896 diff --git a/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4938e90042402b2f093e4dd8a3ee365fbd489e50 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/spatial/_ckdtree.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b02e640cb8fe1fdda2cbe53db9839ebbb26ebe2f9ccbbc7a5845b1ce09d1cd34 +size 806672 diff --git a/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f3476a3cd9a9d674980e2e192cd5b987b1d43092 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/spatial/_distance_pybind.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15c9454a4a703b717a5148357df69116e98412771794f947194aefe506d427e2 +size 619360 diff --git a/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..20d4fc40db4a283519cb230c1ae67fa633c2252c --- /dev/null +++ b/lib/python3.10/site-packages/scipy/spatial/_distance_wrap.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dab094c8c2dc6794eebbb8e1432d48d847f345bae95a06d7d8cc1f1079fa61cc +size 108584 diff --git a/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7d586e6d536fbc4ee3adfbacf2a6dfe56e4674a6 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/spatial/_hausdorff.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa56b83a2af9539ca9aae36775dd54d288819a2d2b28de1165c81ba1e7aaccfb +size 211728 diff --git a/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e61cdad36d3ef2522a4e66ba783ea2dfc7b705aa --- /dev/null +++ b/lib/python3.10/site-packages/scipy/spatial/_qhull.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:839c7f0f1d54e829e694e95a94f4234eb7b13bc383f739b558a6a93669f56888 +size 1021080 diff --git a/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f301ba6e04e4b7edb0dc0e327aaa99383f3ebf49 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/spatial/_voronoi.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c047026c5786e344c10cbfc90a07da8a46907537b6cbd676358a99d26bf7f562 +size 202608 diff --git a/lib/python3.10/site-packages/scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..31a88d9de0e798bdaa37e9ed42d7c65f394d7b94 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/special/_ellip_harm_2.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e82b59fdf33141825501f3b7e1d331ed69cd49115c2efbf981fdd589cfb5af1f +size 104792 diff --git a/lib/python3.10/site-packages/scipy/special/_gufuncs.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/special/_gufuncs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..65513dda8b7e597326faf45dc04f15af51c38701 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/special/_gufuncs.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cedbb15dd81a743b536b6010313fa50730f9b2ce1415829ace7eb798b5741163 +size 699320 diff --git a/lib/python3.10/site-packages/scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..27b4d85ed7fda373f4774f112f1a4a645e5146a0 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/special/_specfun.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0c436a1d4f95631724f71510b74cd651399886ed2fea461df41a175a2d1202c +size 261848 diff --git a/lib/python3.10/site-packages/scipy/special/_special_ufuncs.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/special/_special_ufuncs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..51417587622da6baaaadd6f7d0ff3f8c5375d6cf --- /dev/null +++ b/lib/python3.10/site-packages/scipy/special/_special_ufuncs.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c959adcbac9dee3fe60d8aa54f51a5ae228e1026479f5880c0a6aa6a5ef5045 +size 1389712 diff --git a/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8a44a0560cbd5236cebe656fe01b7f95c95c9c4e --- /dev/null +++ b/lib/python3.10/site-packages/scipy/special/_test_internal.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e79981d76d0d8a96f938c497a0ec5cb79dba27878bef9a21edc6ec0d19d20f7 +size 259384 diff --git a/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..07006ae8dc53e3764eeab1bfb041dad51860c051 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/special/_ufuncs.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fe9b6f2d57425bc40201be7fb8c5368449bbce1bb2b77cfef84bed119b27d09 +size 1076080 diff --git a/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..65c56a81434e9e72ab26b85ec1efc3ae19cd8dd3 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/special/_ufuncs_cxx.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e33972009810bde5ecc7501a59c775e6e1e52b1304ed90075f4e41a8c4425b77 +size 1689008 diff --git a/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0f162c4ba5f204e438a5bed335c8f70c6454e254 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97fcf0e184f0ec6b359a7b74a60a8b7fff0cf531b9fb6ca7c52d1bd41ba82694 +size 2899376 diff --git a/lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efbc9acd19d25e1b11ca80cb4fa8e08dd6de81ea --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/__pycache__/_continuous_distns.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f01f49a1ffa7606b6f8737900e676f94bc35b6a0c113df4ce0a92d6978de71c9 +size 386233 diff --git a/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f6ce53a95f05d9519dd5b2861821685224a5165 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/__pycache__/_distn_infrastructure.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fea7a0d99c496defcc0938e1055b382b4866fd8984ac2b840258e4f0ad13e96 +size 125765 diff --git a/lib/python3.10/site-packages/scipy/stats/__pycache__/_distribution_infrastructure.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/__pycache__/_distribution_infrastructure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8355b124315007b513e5ce9cfcdc450278f966d5 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/__pycache__/_distribution_infrastructure.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf3de006b76106a39a445e972d73c3f9a5a0421d00dc380b03bf8c79e9c154ef +size 154036 diff --git a/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d32ba47022b7438006e1d73951b3c3f43da29e79 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/__pycache__/_morestats.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e3d2cf5e357ad179e2f130fba0c1a21422cfdc9d52ba7297a0ea9f063af84be +size 153642 diff --git a/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e6e8e8ba9d3656e72bab40225c3162153bd36a0 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/__pycache__/_mstats_basic.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50cf823635b1907a3ad6cdabd17aa1e6182a51568ff1f30ae3b00a040c4edd15 +size 111345 diff --git a/lib/python3.10/site-packages/scipy/stats/__pycache__/_multivariate.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/__pycache__/_multivariate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..720c5dd4b3f4e3f26a0f92a4483d918f1034dd33 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/__pycache__/_multivariate.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:858387cbfd454a4cb931e97a617144aa817896f1117780f1aa14653a57a99b98 +size 224497 diff --git a/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70ecfeab3d5e7e13dfada565e785da4275f82f92 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/__pycache__/_stats_py.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf72b885664c7a93ff7fc086cd3ac5046ff476bc888ac8c9f50455f29a7e72a5 +size 358486 diff --git a/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f120eee005226268bd3183da9a220facb29e0e03 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_ansari_swilk_statistics.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fca84c47448e1597a80e171571c708ce3347c5b71a0069ce3b1c4e273b75e258 +size 239520 diff --git a/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..239b4d3ef62c40996f94118b9f53505b0be2e8c5 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd5baf60ad08572d1a01415e67ffc70581965015c474eb83c1da3edaaa42daf +size 185240 diff --git a/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b721e38acbe29d2327af30dcf2cdad8a266d4625 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_qmc_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5662abff25639843becd5e0511efdd3a5ccf7634e7d7a41e6289095e5f7c566a +size 256248 diff --git a/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9d6a85a84b2876cfe5a7f19561c74604be0d0ed8 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3dfbfd26640509a1b5b73ab802f3ae57af2de7a3a28186945e20c7ac696e020 +size 220424 diff --git a/lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0c6afdfe3f1f0628d2956ca8c4ee1df306b641cc --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_sobol.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8f11c714855b65b63fddee1db387ad3ff490f99799e86b76cc70fd57543b070 +size 357680 diff --git a/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz b/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz new file mode 100644 index 0000000000000000000000000000000000000000..44f1f1e9ebd1eb188289ca9adb8027855c1a23b6 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4859931147d42ce465b8605cb277f957d98b839d03194fdf06579357906d193b +size 589334 diff --git a/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c41421a11861b15c94702017d11338c524ede370 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_stats.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3e8e9daa7bbc5218c92bd554b9684c7cfb857ae58171b10c00b8773ee770492 +size 658472 diff --git a/lib/python3.10/site-packages/scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9d9a9b43fca491a25ecae6c916c36dc660b30b76 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b41d61d4f1238566fd0a22b50717f2abc6b5540d370264b524bd036e87242ef +size 139608 diff --git a/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..11e667d5af46e67c909b077e730416585f4842d0 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0b0b223fef6c5bab07a1d8db62baa2dfec8e3f01baab7a91ec0b6c8181d4973 +size 803192 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2458b52094d92d6c5f6cf04b52902642ae35b7b4 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_distributions.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e236fd113444f5951bf3729e7f90b3afb2a56188984826560172b78090c53d12 +size 296284 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4227dd5c153bae9e4829dd5b8c662001892c3fd --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_morestats.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c366afe0bc4462f7f7231549e0e8301c4c8883fa32c90cdf871e2815aa955cac +size 109430 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fee9bc2ef06f68b3d981ec2c12a344146af7bf9d --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_multivariate.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:154fc35d12192cc620d2980c1e38396d15f98bdb083c7bc9a55bbdc9b5b41f79 +size 117756 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc b/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc09da2ec597e4d14c5072a8c3b111391d2e3681 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f4a57bf395d4dfee50db8fd3cbc5734215f4f741a90ae6b7c7ed7fbcf6e31bd +size 301361 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy b/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy new file mode 100644 index 0000000000000000000000000000000000000000..721749bcd853fa5c5efe5a1f5ba6e105658395dc --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:254d2dee4a4d547b9331c60243c6fcfcaffd26c8b104d08d4f6045a7645b3bba +size 4064 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy b/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..adda664a7b5442fc0977ddbaa572c864ddd31f08 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-cdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf18c1f2d65a232bf2c7121282df31bf2a8be827afafc4ed810ed37457ee898a +size 183728 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy b/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..6c41166721b891a801cdc6828804c6da7233d625 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-Z1-pdf-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fee99512bab4ccc6569b47b924e4b034e1cdbab5624fafc7e120648bd5f7a128 +size 183688 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy b/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..0a1460e407521836a9b73a081609af4ccdb6deae --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3c719edd5431fb9e7b9ecb6d19e3ca7a9095298bd19f226685b0fca40f0c073 +size 9328 diff --git a/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy b/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy new file mode 100644 index 0000000000000000000000000000000000000000..80dde74dcda9a23dcdbf9a2f677eb9c98337b0a7 --- /dev/null +++ b/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eef4dc702dd8c6e31c18c74e1f81284c3e9ca2ab50282de39c9ad30b7bb8e76d +size 38624 diff --git a/lib/python3.10/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-310.pyc b/lib/python3.10/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..074206dff68d179b052efccb7a2640905cf7ded0 --- /dev/null +++ b/lib/python3.10/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62f1272dbd7afb7147f5a5be3eeff92472da4de4005aa6b132f9be224ae51d7c +size 100312 diff --git a/lib/python3.10/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-310.pyc b/lib/python3.10/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9808f67c920efbe0efa5244a5a8536eb15dda604 --- /dev/null +++ b/lib/python3.10/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d30df143c31ac5d03f46d45517ed03e20b1153c988f571f4e1a59d847e7f18f +size 138269 diff --git a/lib/python3.10/site-packages/share/doc/libigl/readme.pdf b/lib/python3.10/site-packages/share/doc/libigl/readme.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5e6c0128d88b56a93a42e017bf5ef595e58db33e --- /dev/null +++ b/lib/python3.10/site-packages/share/doc/libigl/readme.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b05a6b8f83f8453f65a0a5c2c6da836dea438f35c7967346e80b60412bd8c06 +size 2321438 diff --git a/lib/python3.10/site-packages/skimage/_shared/geometry.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/_shared/geometry.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c12d23d8976f17b9847829612e5876818c937ea3 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/_shared/geometry.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e3d6a734fa578f10867813eadb30ccd22cc9ad0bc9aa802328ea5bd080aa828 +size 198288 diff --git a/lib/python3.10/site-packages/skimage/_shared/transform.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/_shared/transform.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..be8e79839f85a06746573286bba5ab213b3268fa --- /dev/null +++ b/lib/python3.10/site-packages/skimage/_shared/transform.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4be6e1e8ddec2ae6c2d6d63e7ef559dbadd7bb314dd18979786226fb2ed101e9 +size 198440 diff --git a/lib/python3.10/site-packages/skimage/data/astronaut.png b/lib/python3.10/site-packages/skimage/data/astronaut.png new file mode 100644 index 0000000000000000000000000000000000000000..e8da3848f89e2a8766a0ff8d19872dc9fc3c5bd8 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/astronaut.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88431cd9653ccd539741b555fb0a46b61558b301d4110412b5bc28b5e3ea6cb5 +size 791555 diff --git a/lib/python3.10/site-packages/skimage/data/brick.png b/lib/python3.10/site-packages/skimage/data/brick.png new file mode 100644 index 0000000000000000000000000000000000000000..659de406229bafe2d042bbef66da40a7d6493c04 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/brick.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7966caf324f6ba843118d98f7a07746d22f6a343430add0233eca5f6eaaa8fcf +size 106634 diff --git a/lib/python3.10/site-packages/skimage/data/camera.png b/lib/python3.10/site-packages/skimage/data/camera.png new file mode 100644 index 0000000000000000000000000000000000000000..e20fc533ad09bd11eeb31db1e577a796ceb96d0f --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/camera.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0793d2adda0fa6ae899c03989482bff9a42d3d5690fc7e3648f2795d730c23a +size 139512 diff --git a/lib/python3.10/site-packages/skimage/data/chelsea.png b/lib/python3.10/site-packages/skimage/data/chelsea.png new file mode 100644 index 0000000000000000000000000000000000000000..ff097fff1876603f5661f764eaf35fbdcef3fd76 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/chelsea.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:596aa1e7cb875eb79f437e310381d26b338a81c2da23439704a73c4651e8c4bb +size 240512 diff --git a/lib/python3.10/site-packages/skimage/data/coffee.png b/lib/python3.10/site-packages/skimage/data/coffee.png new file mode 100644 index 0000000000000000000000000000000000000000..e5c6b2e72e4260fc4a4d58dbe0828d901b43e38e --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/coffee.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc02f8ca188b167c775a7101b5d767d1e71792cf762c33d6fa15a4599b5a8de7 +size 466706 diff --git a/lib/python3.10/site-packages/skimage/data/grass.png b/lib/python3.10/site-packages/skimage/data/grass.png new file mode 100644 index 0000000000000000000000000000000000000000..05d9d08857ba83ce95e65f60896fb55aafd8788b --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/grass.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6b6022426b38936c43a4ac09635cd78af074e90f42ffa8227ac8b7452d39f89 +size 217893 diff --git a/lib/python3.10/site-packages/skimage/data/gravel.png b/lib/python3.10/site-packages/skimage/data/gravel.png new file mode 100644 index 0000000000000000000000000000000000000000..360b21723fddd631c60f5c1c17300a6274de11ef --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/gravel.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c48615b451bf1e606fbd72c0aa9f8cc0f068ab7111ef7d93bb9b0f2586440c12 +size 194247 diff --git a/lib/python3.10/site-packages/skimage/data/hubble_deep_field.jpg b/lib/python3.10/site-packages/skimage/data/hubble_deep_field.jpg new file mode 100644 index 0000000000000000000000000000000000000000..317c8954681939567967639a0f8c9837f0f7daab --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/hubble_deep_field.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a19c5dd8a927a9334bb1229a6d63711b1c0c767fb27e2286e7c84a3e2c2f5f4 +size 527940 diff --git a/lib/python3.10/site-packages/skimage/data/ihc.png b/lib/python3.10/site-packages/skimage/data/ihc.png new file mode 100644 index 0000000000000000000000000000000000000000..a8b9a514464d3a03802ff3dfd895cec77337792d --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/ihc.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8dd1aa387ddd1f49d8ad13b50921b237df8e9b262606d258770687b0ef93cef +size 477916 diff --git a/lib/python3.10/site-packages/skimage/data/lfw_subset.npy b/lib/python3.10/site-packages/skimage/data/lfw_subset.npy new file mode 100644 index 0000000000000000000000000000000000000000..05b12d88d30184ac0860359a9efac502373e5b03 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/lfw_subset.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9560ec2f5edfac01973f63a8a99d00053fecd11e21877e18038fbe500f8e872c +size 1000080 diff --git a/lib/python3.10/site-packages/skimage/data/logo.png b/lib/python3.10/site-packages/skimage/data/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..d1614bc702d3e03aa4b9b9d3e95f1a858e2edefb --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/logo.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2c57fe8af089f08b5ba523d95573c26e62904ac5967f4c8851b27d033690168 +size 179723 diff --git a/lib/python3.10/site-packages/skimage/data/motorcycle_disp.npz b/lib/python3.10/site-packages/skimage/data/motorcycle_disp.npz new file mode 100644 index 0000000000000000000000000000000000000000..3d9adb1e5f2fe414638d633435e18de3e0238bbf --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/motorcycle_disp.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e49c8cebff3fa20359a0cc6880c82e1c03bbb106da81a177218281bc2f113d7 +size 1146173 diff --git a/lib/python3.10/site-packages/skimage/data/motorcycle_left.png b/lib/python3.10/site-packages/skimage/data/motorcycle_left.png new file mode 100644 index 0000000000000000000000000000000000000000..9ed4bb5dcffd84e2697349f3a4eef338c0e1678f --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/motorcycle_left.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db18e9c4157617403c3537a6ba355dfeafe9a7eabb6b9b94cb33f6525dd49179 +size 644701 diff --git a/lib/python3.10/site-packages/skimage/data/motorcycle_right.png b/lib/python3.10/site-packages/skimage/data/motorcycle_right.png new file mode 100644 index 0000000000000000000000000000000000000000..71789e194cfe4e7b00b7605c3d96e3385c9cf468 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/motorcycle_right.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fc913ae870e42a4b662314bc904d1786bcad8e2f0b9b67dba5a229406357797 +size 640373 diff --git a/lib/python3.10/site-packages/skimage/data/retina.jpg b/lib/python3.10/site-packages/skimage/data/retina.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c50886ca27ecb948d2cdb8cc28560adf2db3c1cf --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/retina.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38a07f36f27f095e818aea7b96d34202c05176d30253c66733f2e00379e9e0e6 +size 269564 diff --git a/lib/python3.10/site-packages/skimage/data/rocket.jpg b/lib/python3.10/site-packages/skimage/data/rocket.jpg new file mode 100644 index 0000000000000000000000000000000000000000..07db9b546c88ab35293e5e00c3941316368c5d2c --- /dev/null +++ b/lib/python3.10/site-packages/skimage/data/rocket.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2dd0de7c538df8d111e479619b129464d0269d0ae5fd18ca91d33a7fdfea95c +size 112525 diff --git a/lib/python3.10/site-packages/skimage/draw/_draw.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/draw/_draw.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c4b07186c0d60d75a11abb096e96ed97bc275f9f --- /dev/null +++ b/lib/python3.10/site-packages/skimage/draw/_draw.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d77923a0de4a05abe46b72935c071b20cac4560df4cb3e8dc976dd0427a8e2a1 +size 410080 diff --git a/lib/python3.10/site-packages/skimage/feature/_canny_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/_canny_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1309b75395fbd26bfd1939028387c51d34492991 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/_canny_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbae13e4f2f29c23ed927912d8373388dd3e24e20c606bb58ddbf134bdcff389 +size 281520 diff --git a/lib/python3.10/site-packages/skimage/feature/_cascade.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/_cascade.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7a52080c2a455769d34c6d5cd9c9ad8ff3151956 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/_cascade.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95241aa45239816d6902cf27bcc1120085edc2bd9864d1fb59359d8dd400066d +size 363672 diff --git a/lib/python3.10/site-packages/skimage/feature/_haar.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/_haar.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..5cc18c9b21895881aaae43f6fc299d3722459523 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/_haar.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3014877442a97135dae69c82d51e547a39de56c59ed3b799e72432649e7a19f4 +size 571056 diff --git a/lib/python3.10/site-packages/skimage/feature/_hoghistogram.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/_hoghistogram.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b0b1b10f6d466987bfa7ab949d0906a7e1286024 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/_hoghistogram.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbb3e1a66f40c2b2588e42f7fdbac07fa4244a236b7ea15a62fa4f42e4cd4dc6 +size 298968 diff --git a/lib/python3.10/site-packages/skimage/feature/_sift.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/_sift.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b6d4b9dc592929949ecdf77ca1a6d02eb2ae8669 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/_sift.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:521e4131f155b157dedb805e70dccba386ef5ff95c56e1cdae4d22ddaf2e3e94 +size 358520 diff --git a/lib/python3.10/site-packages/skimage/feature/_texture.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/_texture.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..aad1987d893e3102d85f46be8124009f49ce8d30 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/_texture.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6350c1ac464192df8c1ef8017b23b84ace7983f978dc92f9c495c2629c06a89 +size 398936 diff --git a/lib/python3.10/site-packages/skimage/feature/brief_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/brief_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e7efd3889762b88245fd0d09b856dd5c5fa44379 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/brief_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:721c8ca10b7516be2792a52035c591dc24bba69dc904a54f90fd8c782e8817b6 +size 236344 diff --git a/lib/python3.10/site-packages/skimage/feature/censure_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/censure_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1d737e5b33522dfd850a5dd243f1ea296cadb756 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/censure_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d561e8a6ab7193d9abae5828d95945250299ee16ceddef186cf4e8bb914f0e0 +size 235688 diff --git a/lib/python3.10/site-packages/skimage/feature/corner_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/corner_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..761a6377c202689dbc4b8447820cfdaf733c6f62 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/corner_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78ce58e14c802194a89062ab65cfccb3391a494abf5d7aa41a98319c85557689 +size 377448 diff --git a/lib/python3.10/site-packages/skimage/feature/orb_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/feature/orb_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2184fbf8137794119b0b8ec6d01433313660d71b --- /dev/null +++ b/lib/python3.10/site-packages/skimage/feature/orb_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d08c4673d9445b662e218c3248fc215b352d614af73ca0b4073362b3214870d1 +size 282768 diff --git a/lib/python3.10/site-packages/skimage/filters/_multiotsu.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/filters/_multiotsu.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ac4b261869be988452b572456da6080c08419872 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/filters/_multiotsu.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1952be27b56a0bb6d6ad745f3a405b0e20ec84d58d87749676f9c1d99568ec0 +size 267568 diff --git a/lib/python3.10/site-packages/skimage/filters/rank/bilateral_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/filters/rank/bilateral_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2952eb4e816d25874c6663da922613f400a2a0a6 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/filters/rank/bilateral_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b47ab650a62ae0854e1a71a73ad6553d3182f013549902675f3374b407a67e82 +size 577496 diff --git a/lib/python3.10/site-packages/skimage/filters/rank/core_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/filters/rank/core_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f44c89845ff8e25c566beb87d6cdafaa1d5b674f --- /dev/null +++ b/lib/python3.10/site-packages/skimage/filters/rank/core_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a952d9ec7c5e9c5564b8fd1e077e43a368c94c7a951e4f94bdd5c1aab3eae4f +size 608488 diff --git a/lib/python3.10/site-packages/skimage/filters/rank/core_cy_3d.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/filters/rank/core_cy_3d.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..57490c810aa56aea8b5b0d20a79eed278a7a176c --- /dev/null +++ b/lib/python3.10/site-packages/skimage/filters/rank/core_cy_3d.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3af1c934f7122f6feb29fd0cd2d904ddd358a1e2e7a7e787288e63f80e16f56 +size 362952 diff --git a/lib/python3.10/site-packages/skimage/filters/rank/generic_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/filters/rank/generic_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cfaf1fcf2de9b37a276131baf1627a2db47ddbd6 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/filters/rank/generic_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1911347cda39f3d7b5060cc5be713d3d1c67a4062e2305777438f4dacbb29257 +size 3972808 diff --git a/lib/python3.10/site-packages/skimage/filters/rank/percentile_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/filters/rank/percentile_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..38a4696d9aee56f86b07f84615586579b1ae05c0 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/filters/rank/percentile_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ed5f874db3abe5db07a6f8170a2b20332ab73b18e47fd2c4f5476ba1fc85d7a +size 1247280 diff --git a/lib/python3.10/site-packages/skimage/graph/_mcp.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/graph/_mcp.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..45d558cdc957621f0a55a0d13ba834eb777649eb --- /dev/null +++ b/lib/python3.10/site-packages/skimage/graph/_mcp.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fe784eeac29066773b0cb4eaf8fa0854625ac1a317f54866ce28c7c20d812e8 +size 545712 diff --git a/lib/python3.10/site-packages/skimage/graph/_ncut_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/graph/_ncut_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..65968206207b8fd63bd61afa5a8ff42794f2fdb2 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/graph/_ncut_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f6686384cd838d375d7be095bc3dbe16ff9fa8b713e2cd60c0d6ef6b41fcd78 +size 299592 diff --git a/lib/python3.10/site-packages/skimage/graph/_spath.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/graph/_spath.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7d531a004fc8424c736e6d45b4dea657c3a1a015 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/graph/_spath.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da1faab997d92073607e6ff0bc2e2b105529e94e14e8362c959080321f30957d +size 260320 diff --git a/lib/python3.10/site-packages/skimage/graph/heap.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/graph/heap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c8a813cf990933eec5d0c457c2e796785b261f96 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/graph/heap.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:173ffaa721061222365daec4b03896a6a6b33b66e2f46ef2a64f657397facf59 +size 138360 diff --git a/lib/python3.10/site-packages/skimage/measure/_ccomp.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/measure/_ccomp.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fd3ffeb419349c47b8a596f429aef2b945dc0537 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/measure/_ccomp.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:868593714f266d29ba055bc84834bc385a52656a055c47a4ee1ef522b3fa0db1 +size 132936 diff --git a/lib/python3.10/site-packages/skimage/measure/_find_contours_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/measure/_find_contours_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b0892b4cb331578fa67aa535f524ebb8a06905e4 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/measure/_find_contours_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c93ab38e8938413325d4cf19a10f8dbc6ace86a40d586d15db6451f551703ea +size 244488 diff --git a/lib/python3.10/site-packages/skimage/measure/_marching_cubes_lewiner_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/measure/_marching_cubes_lewiner_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c5b41e6bbb99b0c8baf19c49705406165fe963a1 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/measure/_marching_cubes_lewiner_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:954da02662e57517739f427acec718aef6f13d9e97f8cce2600a67e710310f70 +size 385072 diff --git a/lib/python3.10/site-packages/skimage/measure/_moments_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/measure/_moments_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3c60cd0402a9714585bf6b6d9a53d5e7dc783e0a --- /dev/null +++ b/lib/python3.10/site-packages/skimage/measure/_moments_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bb7849080de8f131177cd31d6cdb3e509f3717394c1979b03ecab3afa0faddf +size 277144 diff --git a/lib/python3.10/site-packages/skimage/measure/_pnpoly.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/measure/_pnpoly.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8db3f34711a0cbfacc43109f4f843505a959fc49 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/measure/_pnpoly.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d08ef0bdc9243e81a2fbd52344d3fbd0191a6c5e083e943dbaa627363a9cdbe +size 259736 diff --git a/lib/python3.10/site-packages/skimage/morphology/_convex_hull.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/morphology/_convex_hull.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..88a587a35119dc4581f93bcaca09a1b25b0c0a02 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/_convex_hull.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc171a18e771af6779e73c07d1d8e99c1b2b9af297415cc8b80ae7b5ad72a97f +size 236312 diff --git a/lib/python3.10/site-packages/skimage/morphology/_extrema_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/morphology/_extrema_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b20dc8672dfd07b670a3f23e7fa538bfec8cf588 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/_extrema_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27b63f4fae145e457a15a486ee39f1ea58ec1430a984ea8e0b336861138cc11e +size 338728 diff --git a/lib/python3.10/site-packages/skimage/morphology/_flood_fill_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/morphology/_flood_fill_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e98841ffa44b0b710b66339d27e61b1e62bcf171 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/_flood_fill_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b4e5f9b0c045601a192473e5b9ae67cf5a30cf7982be5838fd0381fd3ecafbb +size 410264 diff --git a/lib/python3.10/site-packages/skimage/morphology/_grayreconstruct.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/morphology/_grayreconstruct.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f4790dc8efcf57eaeedb0b64f89cb72a7bc863c8 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/_grayreconstruct.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11ebe3a3249a3e49f28d5147c96a2968e07e90fb526293e9a6e6d5eb66c69fab +size 294480 diff --git a/lib/python3.10/site-packages/skimage/morphology/_max_tree.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/morphology/_max_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..246647ae690583052831f62447138ace1d9b4b53 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/_max_tree.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecad2adda322c2a369615a6308daa8fc9e10b24b0907af951be89e525d4bc24 +size 924168 diff --git a/lib/python3.10/site-packages/skimage/morphology/_misc_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/morphology/_misc_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..410d08c058134a253b7954bdaacfd3367739a16c --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/_misc_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9956019a093876b0ad7b2e9a2eb0619e79560e6b9606e7d273b8e6473450a88f +size 341440 diff --git a/lib/python3.10/site-packages/skimage/morphology/_skeletonize_lee_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/morphology/_skeletonize_lee_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..18e13c53dda3da07206ccd6439360ba135c0df63 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/_skeletonize_lee_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62ff506a3fbc42d28f18d545073a1d87f226664a21fc4003e7d9876c176f6b51 +size 254424 diff --git a/lib/python3.10/site-packages/skimage/morphology/_skeletonize_various_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/morphology/_skeletonize_various_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e397691e09006626a53dd5506bbc6749d181ddb0 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/_skeletonize_various_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ed17980cc9fa3ea8c7e9beada6e11bfa71350178eed12c34d7f08f1330de405 +size 259584 diff --git a/lib/python3.10/site-packages/skimage/morphology/ball_decompositions.npy b/lib/python3.10/site-packages/skimage/morphology/ball_decompositions.npy new file mode 100644 index 0000000000000000000000000000000000000000..c9cac4b69aa817fcda30ef426f65018453a0b286 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/ball_decompositions.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f9eb51f361fd7d7d22d342dec7bb98178a21aa9c944507f25898b8ca213e54d +size 431 diff --git a/lib/python3.10/site-packages/skimage/morphology/disk_decompositions.npy b/lib/python3.10/site-packages/skimage/morphology/disk_decompositions.npy new file mode 100644 index 0000000000000000000000000000000000000000..b91800468ecb8bf69110053fc81b3e27e4c01e1a --- /dev/null +++ b/lib/python3.10/site-packages/skimage/morphology/disk_decompositions.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:769739e7b3d8c7061993682cdf45a0c048940952fb78e31b172d830f7e78e7ee +size 881 diff --git a/lib/python3.10/site-packages/skimage/restoration/_denoise_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/restoration/_denoise_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..75a75cd5b8df304c2a8bc1c6e1b1adc2923fe0ad --- /dev/null +++ b/lib/python3.10/site-packages/skimage/restoration/_denoise_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5f326e54a57fddef59a4153ac46844ca3288d49f60a8f5908d9b4f73c809daa +size 371536 diff --git a/lib/python3.10/site-packages/skimage/restoration/_inpaint.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/restoration/_inpaint.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..58392801f68dccb9ca67f8174fd0a383f22491d2 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/restoration/_inpaint.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a3d452686c1ee2172b45beca85d112d3f6e0f64a2a87d5a91458297bcf27b90 +size 281040 diff --git a/lib/python3.10/site-packages/skimage/restoration/_nl_means_denoising.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/restoration/_nl_means_denoising.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d0a738059809d6d255f7464ff49df00f1b28d3f1 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/restoration/_nl_means_denoising.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96905ac92d19f913cfdb8dc649673977c2ec37c0075456e0efd7e15cb8f60c9c +size 635280 diff --git a/lib/python3.10/site-packages/skimage/restoration/_rolling_ball_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/restoration/_rolling_ball_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..380423240b193c32efa18e11178ad31daa17c3b5 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/restoration/_rolling_ball_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74e290157380662036bb56ba61f3ab26b98636fc2b6cd03a6e7f6a7dc3f068a2 +size 327960 diff --git a/lib/python3.10/site-packages/skimage/restoration/_unwrap_1d.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/restoration/_unwrap_1d.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c83b26b4f6735d568e36523b1d354fa98cc87a1b --- /dev/null +++ b/lib/python3.10/site-packages/skimage/restoration/_unwrap_1d.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf4c388b32c19e6c0c5f300169aec2b2d872b5ec02ecbecd0d134f398f77c80b +size 227056 diff --git a/lib/python3.10/site-packages/skimage/restoration/_unwrap_2d.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/restoration/_unwrap_2d.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d30696c1d85b4f1c867d51d78306725c4407edd7 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/restoration/_unwrap_2d.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c83929f34fb8c3277b084441942c62006473dfd10b8e470dcf9ad2f01906717 +size 244520 diff --git a/lib/python3.10/site-packages/skimage/restoration/_unwrap_3d.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/restoration/_unwrap_3d.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d2522f31a1b8941c71d9e320d8b98f1ffb0b15a3 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/restoration/_unwrap_3d.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c13e996500a1b8cc0e339a1516afa42e2201cecd5d4a6e3ab4165cc8ea693b7 +size 261104 diff --git a/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..59e74ad83335038e0f6e47478854df3e83c5904e --- /dev/null +++ b/lib/python3.10/site-packages/skimage/segmentation/_felzenszwalb_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:172d5c3540105cc851659327c36e51d95dcc88f704a53c9b86d7bd7a54b9d2f0 +size 182264 diff --git a/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c9dbc015c85f2cd66788be6e340448c1edf2a246 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/segmentation/_quickshift_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82b383ba6f13de01805da6ca2129e502f54c6a4083623b3d9c46bb981ce48e68 +size 326120 diff --git a/lib/python3.10/site-packages/skimage/segmentation/_slic.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/segmentation/_slic.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ac84f1077d31ad63bad1b040d0e19b18069176f6 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/segmentation/_slic.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b97d85f2a0ae76f81fffe005ec59de0b62ef89f8e0ad785bcade9eebc1d11696 +size 340408 diff --git a/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bcc73eb9c313ac86dfe9963da3324362bef19925 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/segmentation/_watershed_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6484f9adbe82ec0a41ea4d7b8d0fa9c63f90724133853e1f88d0d56adc312dd6 +size 342240 diff --git a/lib/python3.10/site-packages/skimage/transform/_hough_transform.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/transform/_hough_transform.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..21fbc6be8f3ce975665e0d5af440b9cf961556c0 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/transform/_hough_transform.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c77bd876dffa0d17c479952306ff46b12bdcc949564fcc5a9452cb094260f3b2 +size 354160 diff --git a/lib/python3.10/site-packages/skimage/transform/_radon_transform.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/transform/_radon_transform.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bf31d1c6ba3d6277b645411f2d637bb175f6dd96 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/transform/_radon_transform.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b65ff1cdfed8dd2bc1a30a63b62b8302ead61f982f16214e7f8b48e070113d9 +size 294088 diff --git a/lib/python3.10/site-packages/skimage/transform/_warps_cy.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/transform/_warps_cy.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3f25076c0b53256e15bd67b38b5733b4482b139d --- /dev/null +++ b/lib/python3.10/site-packages/skimage/transform/_warps_cy.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d71f8b4da5721938c2cf7a200532ba7f1a57f4546af71a9e960dd18b782d6e4e +size 309000 diff --git a/lib/python3.10/site-packages/skimage/util/_remap.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/skimage/util/_remap.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b2be4c5702d2f0b6483069699797605ce9f000c7 --- /dev/null +++ b/lib/python3.10/site-packages/skimage/util/_remap.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63d33f34af12892317bdeb33e84444f9e967a16a6a1c99d0491053beacdb0158 +size 863048 diff --git a/lib/python3.10/site-packages/tensorboard/webfiles.zip b/lib/python3.10/site-packages/tensorboard/webfiles.zip new file mode 100644 index 0000000000000000000000000000000000000000..4982ecdbd6d8814f2f52c822033f7139cb3cfa46 --- /dev/null +++ b/lib/python3.10/site-packages/tensorboard/webfiles.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42fb3437df2398e0904a400cc2d84ba7122014151c0e9540504f6a844c44c39b +size 4497284