content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
import functools\nimport itertools\nimport platform\nimport sys\n\nimport pytest\n\nfrom mpl_toolkits.mplot3d import Axes3D, axes3d, proj3d, art3d\nfrom mpl_toolkits.mplot3d.axes3d import _Quaternion as Quaternion\nimport matplotlib as mpl\nfrom matplotlib.backend_bases import (MouseButton, MouseEvent,\n NavigationToolbar2)\nfrom matplotlib import cm\nfrom matplotlib import colors as mcolors, patches as mpatch\nfrom matplotlib.testing.decorators import image_comparison, check_figures_equal\nfrom matplotlib.testing.widgets import mock_event\nfrom matplotlib.collections import LineCollection, PolyCollection\nfrom matplotlib.patches import Circle, PathPatch\nfrom matplotlib.path import Path\nfrom matplotlib.text import Text\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nmpl3d_image_comparison = functools.partial(\n image_comparison, remove_text=True, style='default')\n\n\ndef plot_cuboid(ax, scale):\n # plot a rectangular cuboid with side lengths given by scale (x, y, z)\n r = [0, 1]\n pts = itertools.combinations(np.array(list(itertools.product(r, r, r))), 2)\n for start, end in pts:\n if np.sum(np.abs(start - end)) == r[1] - r[0]:\n ax.plot3D(*zip(start*np.array(scale), end*np.array(scale)))\n\n\n@check_figures_equal(extensions=["png"])\ndef test_invisible_axes(fig_test, fig_ref):\n ax = fig_test.subplots(subplot_kw=dict(projection='3d'))\n ax.set_visible(False)\n\n\n@mpl3d_image_comparison(['grid_off.png'], style='mpl20')\ndef test_grid_off():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.grid(False)\n\n\n@mpl3d_image_comparison(['invisible_ticks_axis.png'], style='mpl20')\ndef test_invisible_ticks_axis():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n for axis in [ax.xaxis, ax.yaxis, ax.zaxis]:\n axis.line.set_visible(False)\n\n\n@mpl3d_image_comparison(['axis_positions.png'], remove_text=False, style='mpl20')\ndef test_axis_positions():\n positions = ['upper', 'lower', 'both', 'none']\n fig, axs = plt.subplots(2, 2, subplot_kw={'projection': '3d'})\n for ax, pos in zip(axs.flatten(), positions):\n for axis in ax.xaxis, ax.yaxis, ax.zaxis:\n axis.set_label_position(pos)\n axis.set_ticks_position(pos)\n title = f'{pos}'\n ax.set(xlabel='x', ylabel='y', zlabel='z', title=title)\n\n\n@mpl3d_image_comparison(['aspects.png'], remove_text=False, style='mpl20')\ndef test_aspects():\n aspects = ('auto', 'equal', 'equalxy', 'equalyz', 'equalxz', 'equal')\n _, axs = plt.subplots(2, 3, subplot_kw={'projection': '3d'})\n\n for ax in axs.flatten()[0:-1]:\n plot_cuboid(ax, scale=[1, 1, 5])\n # plot a cube as well to cover github #25443\n plot_cuboid(axs[1][2], scale=[1, 1, 1])\n\n for i, ax in enumerate(axs.flatten()):\n ax.set_title(aspects[i])\n ax.set_box_aspect((3, 4, 5))\n ax.set_aspect(aspects[i], adjustable='datalim')\n axs[1][2].set_title('equal (cube)')\n\n\n@mpl3d_image_comparison(['aspects_adjust_box.png'],\n remove_text=False, style='mpl20')\ndef test_aspects_adjust_box():\n aspects = ('auto', 'equal', 'equalxy', 'equalyz', 'equalxz')\n fig, axs = plt.subplots(1, len(aspects), subplot_kw={'projection': '3d'},\n figsize=(11, 3))\n\n for i, ax in enumerate(axs):\n plot_cuboid(ax, scale=[4, 3, 5])\n ax.set_title(aspects[i])\n ax.set_aspect(aspects[i], adjustable='box')\n\n\ndef test_axes3d_repr():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.set_label('label')\n ax.set_title('title')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n assert repr(ax) == (\n "<Axes3D: label='label', "\n "title={'center': 'title'}, xlabel='x', ylabel='y', zlabel='z'>")\n\n\n@mpl3d_image_comparison(['axes3d_primary_views.png'], style='mpl20',\n tol=0.05 if sys.platform == "darwin" else 0)\ndef test_axes3d_primary_views():\n # (elev, azim, roll)\n views = [(90, -90, 0), # XY\n (0, -90, 0), # XZ\n (0, 0, 0), # YZ\n (-90, 90, 0), # -XY\n (0, 90, 0), # -XZ\n (0, 180, 0)] # -YZ\n # When viewing primary planes, draw the two visible axes so they intersect\n # at their low values\n fig, axs = plt.subplots(2, 3, subplot_kw={'projection': '3d'})\n for i, ax in enumerate(axs.flat):\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.set_proj_type('ortho')\n ax.view_init(elev=views[i][0], azim=views[i][1], roll=views[i][2])\n plt.tight_layout()\n\n\n@mpl3d_image_comparison(['bar3d.png'], style='mpl20')\ndef test_bar3d():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n for c, z in zip(['r', 'g', 'b', 'y'], [30, 20, 10, 0]):\n xs = np.arange(20)\n ys = np.arange(20)\n cs = [c] * len(xs)\n cs[0] = 'c'\n ax.bar(xs, ys, zs=z, zdir='y', align='edge', color=cs, alpha=0.8)\n\n\ndef test_bar3d_colors():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n for c in ['red', 'green', 'blue', 'yellow']:\n xs = np.arange(len(c))\n ys = np.zeros_like(xs)\n zs = np.zeros_like(ys)\n # Color names with same length as xs/ys/zs should not be split into\n # individual letters.\n ax.bar3d(xs, ys, zs, 1, 1, 1, color=c)\n\n\n@mpl3d_image_comparison(['bar3d_shaded.png'], style='mpl20')\ndef test_bar3d_shaded():\n x = np.arange(4)\n y = np.arange(5)\n x2d, y2d = np.meshgrid(x, y)\n x2d, y2d = x2d.ravel(), y2d.ravel()\n z = x2d + y2d + 1 # Avoid triggering bug with zero-depth boxes.\n\n views = [(30, -60, 0), (30, 30, 30), (-30, 30, -90), (300, -30, 0)]\n fig = plt.figure(figsize=plt.figaspect(1 / len(views)))\n axs = fig.subplots(\n 1, len(views),\n subplot_kw=dict(projection='3d')\n )\n for ax, (elev, azim, roll) in zip(axs, views):\n ax.bar3d(x2d, y2d, x2d * 0, 1, 1, z, shade=True)\n ax.view_init(elev=elev, azim=azim, roll=roll)\n fig.canvas.draw()\n\n\n@mpl3d_image_comparison(['bar3d_notshaded.png'], style='mpl20')\ndef test_bar3d_notshaded():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n x = np.arange(4)\n y = np.arange(5)\n x2d, y2d = np.meshgrid(x, y)\n x2d, y2d = x2d.ravel(), y2d.ravel()\n z = x2d + y2d\n ax.bar3d(x2d, y2d, x2d * 0, 1, 1, z, shade=False)\n fig.canvas.draw()\n\n\ndef test_bar3d_lightsource():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection="3d")\n\n ls = mcolors.LightSource(azdeg=0, altdeg=90)\n\n length, width = 3, 4\n area = length * width\n\n x, y = np.meshgrid(np.arange(length), np.arange(width))\n x = x.ravel()\n y = y.ravel()\n dz = x + y\n\n color = [cm.coolwarm(i/area) for i in range(area)]\n\n collection = ax.bar3d(x=x, y=y, z=0,\n dx=1, dy=1, dz=dz,\n color=color, shade=True, lightsource=ls)\n\n # Testing that the custom 90° lightsource produces different shading on\n # the top facecolors compared to the default, and that those colors are\n # precisely (within floating point rounding errors of 4 ULP) the colors\n # from the colormap, due to the illumination parallel to the z-axis.\n np.testing.assert_array_max_ulp(color, collection._facecolor3d[1::6], 4)\n\n\n@mpl3d_image_comparison(['contour3d.png'], style='mpl20',\n tol=0 if platform.machine() == 'x86_64' else 0.002)\ndef test_contour3d():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y, Z = axes3d.get_test_data(0.05)\n ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)\n ax.contour(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)\n ax.contour(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)\n ax.axis(xmin=-40, xmax=40, ymin=-40, ymax=40, zmin=-100, zmax=100)\n\n\n@mpl3d_image_comparison(['contour3d_extend3d.png'], style='mpl20')\ndef test_contour3d_extend3d():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y, Z = axes3d.get_test_data(0.05)\n ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm, extend3d=True)\n ax.set_xlim(-30, 30)\n ax.set_ylim(-20, 40)\n ax.set_zlim(-80, 80)\n\n\n@mpl3d_image_comparison(['contourf3d.png'], style='mpl20')\ndef test_contourf3d():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y, Z = axes3d.get_test_data(0.05)\n ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)\n ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)\n ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)\n ax.set_xlim(-40, 40)\n ax.set_ylim(-40, 40)\n ax.set_zlim(-100, 100)\n\n\n@mpl3d_image_comparison(['contourf3d_fill.png'], style='mpl20')\ndef test_contourf3d_fill():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y = np.meshgrid(np.arange(-2, 2, 0.25), np.arange(-2, 2, 0.25))\n Z = X.clip(0, 0)\n # This produces holes in the z=0 surface that causes rendering errors if\n # the Poly3DCollection is not aware of path code information (issue #4784)\n Z[::5, ::5] = 0.1\n ax.contourf(X, Y, Z, offset=0, levels=[-0.1, 0], cmap=cm.coolwarm)\n ax.set_xlim(-2, 2)\n ax.set_ylim(-2, 2)\n ax.set_zlim(-1, 1)\n\n\n@pytest.mark.parametrize('extend, levels', [['both', [2, 4, 6]],\n ['min', [2, 4, 6, 8]],\n ['max', [0, 2, 4, 6]]])\n@check_figures_equal(extensions=["png"])\ndef test_contourf3d_extend(fig_test, fig_ref, extend, levels):\n X, Y = np.meshgrid(np.arange(-2, 2, 0.25), np.arange(-2, 2, 0.25))\n # Z is in the range [0, 8]\n Z = X**2 + Y**2\n\n # Manually set the over/under colors to be the end of the colormap\n cmap = mpl.colormaps['viridis'].copy()\n cmap.set_under(cmap(0))\n cmap.set_over(cmap(255))\n # Set vmin/max to be the min/max values plotted on the reference image\n kwargs = {'vmin': 1, 'vmax': 7, 'cmap': cmap}\n\n ax_ref = fig_ref.add_subplot(projection='3d')\n ax_ref.contourf(X, Y, Z, levels=[0, 2, 4, 6, 8], **kwargs)\n\n ax_test = fig_test.add_subplot(projection='3d')\n ax_test.contourf(X, Y, Z, levels, extend=extend, **kwargs)\n\n for ax in [ax_ref, ax_test]:\n ax.set_xlim(-2, 2)\n ax.set_ylim(-2, 2)\n ax.set_zlim(-10, 10)\n\n\n@mpl3d_image_comparison(['tricontour.png'], tol=0.02, style='mpl20')\ndef test_tricontour():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n\n np.random.seed(19680801)\n x = np.random.rand(1000) - 0.5\n y = np.random.rand(1000) - 0.5\n z = -(x**2 + y**2)\n\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.tricontour(x, y, z)\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n ax.tricontourf(x, y, z)\n\n\ndef test_contour3d_1d_input():\n # Check that 1D sequences of different length for {x, y} doesn't error\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n nx, ny = 30, 20\n x = np.linspace(-10, 10, nx)\n y = np.linspace(-10, 10, ny)\n z = np.random.randint(0, 2, [ny, nx])\n ax.contour(x, y, z, [0.5])\n\n\n@mpl3d_image_comparison(['lines3d.png'], style='mpl20')\ndef test_lines3d():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)\n z = np.linspace(-2, 2, 100)\n r = z ** 2 + 1\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n ax.plot(x, y, z)\n\n\n@check_figures_equal(extensions=["png"])\ndef test_plot_scalar(fig_test, fig_ref):\n ax1 = fig_test.add_subplot(projection='3d')\n ax1.plot([1], [1], "o")\n ax2 = fig_ref.add_subplot(projection='3d')\n ax2.plot(1, 1, "o")\n\n\ndef test_invalid_line_data():\n with pytest.raises(RuntimeError, match='x must be'):\n art3d.Line3D(0, [], [])\n with pytest.raises(RuntimeError, match='y must be'):\n art3d.Line3D([], 0, [])\n with pytest.raises(RuntimeError, match='z must be'):\n art3d.Line3D([], [], 0)\n\n line = art3d.Line3D([], [], [])\n with pytest.raises(RuntimeError, match='x must be'):\n line.set_data_3d(0, [], [])\n with pytest.raises(RuntimeError, match='y must be'):\n line.set_data_3d([], 0, [])\n with pytest.raises(RuntimeError, match='z must be'):\n line.set_data_3d([], [], 0)\n\n\n@mpl3d_image_comparison(['mixedsubplot.png'], style='mpl20')\ndef test_mixedsubplots():\n def f(t):\n return np.cos(2*np.pi*t) * np.exp(-t)\n\n t1 = np.arange(0.0, 5.0, 0.1)\n t2 = np.arange(0.0, 5.0, 0.02)\n\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure(figsize=plt.figaspect(2.))\n ax = fig.add_subplot(2, 1, 1)\n ax.plot(t1, f(t1), 'bo', t2, f(t2), 'k--', markerfacecolor='green')\n ax.grid(True)\n\n ax = fig.add_subplot(2, 1, 2, projection='3d')\n X, Y = np.meshgrid(np.arange(-5, 5, 0.25), np.arange(-5, 5, 0.25))\n R = np.hypot(X, Y)\n Z = np.sin(R)\n\n ax.plot_surface(X, Y, Z, rcount=40, ccount=40,\n linewidth=0, antialiased=False)\n\n ax.set_zlim3d(-1, 1)\n\n\n@check_figures_equal(extensions=['png'])\ndef test_tight_layout_text(fig_test, fig_ref):\n # text is currently ignored in tight layout. So the order of text() and\n # tight_layout() calls should not influence the result.\n ax1 = fig_test.add_subplot(projection='3d')\n ax1.text(.5, .5, .5, s='some string')\n fig_test.tight_layout()\n\n ax2 = fig_ref.add_subplot(projection='3d')\n fig_ref.tight_layout()\n ax2.text(.5, .5, .5, s='some string')\n\n\n@mpl3d_image_comparison(['scatter3d.png'], style='mpl20')\ndef test_scatter3d():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(np.arange(10), np.arange(10), np.arange(10),\n c='r', marker='o')\n x = y = z = np.arange(10, 20)\n ax.scatter(x, y, z, c='b', marker='^')\n z[-1] = 0 # Check that scatter() copies the data.\n # Ensure empty scatters do not break.\n ax.scatter([], [], [], c='r', marker='X')\n\n\n@mpl3d_image_comparison(['scatter3d_color.png'], style='mpl20')\ndef test_scatter3d_color():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n # Check that 'none' color works; these two should overlay to produce the\n # same as setting just `color`.\n ax.scatter(np.arange(10), np.arange(10), np.arange(10),\n facecolor='r', edgecolor='none', marker='o')\n ax.scatter(np.arange(10), np.arange(10), np.arange(10),\n facecolor='none', edgecolor='r', marker='o')\n\n ax.scatter(np.arange(10, 20), np.arange(10, 20), np.arange(10, 20),\n color='b', marker='s')\n\n\n@mpl3d_image_comparison(['scatter3d_linewidth.png'], style='mpl20')\ndef test_scatter3d_linewidth():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n # Check that array-like linewidth can be set\n ax.scatter(np.arange(10), np.arange(10), np.arange(10),\n marker='o', linewidth=np.arange(10))\n\n\n@check_figures_equal(extensions=['png'])\ndef test_scatter3d_linewidth_modification(fig_ref, fig_test):\n # Changing Path3DCollection linewidths with array-like post-creation\n # should work correctly.\n ax_test = fig_test.add_subplot(projection='3d')\n c = ax_test.scatter(np.arange(10), np.arange(10), np.arange(10),\n marker='o')\n c.set_linewidths(np.arange(10))\n\n ax_ref = fig_ref.add_subplot(projection='3d')\n ax_ref.scatter(np.arange(10), np.arange(10), np.arange(10), marker='o',\n linewidths=np.arange(10))\n\n\n@check_figures_equal(extensions=['png'])\ndef test_scatter3d_modification(fig_ref, fig_test):\n # Changing Path3DCollection properties post-creation should work correctly.\n ax_test = fig_test.add_subplot(projection='3d')\n c = ax_test.scatter(np.arange(10), np.arange(10), np.arange(10),\n marker='o')\n c.set_facecolor('C1')\n c.set_edgecolor('C2')\n c.set_alpha([0.3, 0.7] * 5)\n assert c.get_depthshade()\n c.set_depthshade(False)\n assert not c.get_depthshade()\n c.set_sizes(np.full(10, 75))\n c.set_linewidths(3)\n\n ax_ref = fig_ref.add_subplot(projection='3d')\n ax_ref.scatter(np.arange(10), np.arange(10), np.arange(10), marker='o',\n facecolor='C1', edgecolor='C2', alpha=[0.3, 0.7] * 5,\n depthshade=False, s=75, linewidths=3)\n\n\n@pytest.mark.parametrize('depthshade', [True, False])\n@check_figures_equal(extensions=['png'])\ndef test_scatter3d_sorting(fig_ref, fig_test, depthshade):\n """Test that marker properties are correctly sorted."""\n\n y, x = np.mgrid[:10, :10]\n z = np.arange(x.size).reshape(x.shape)\n\n sizes = np.full(z.shape, 25)\n sizes[0::2, 0::2] = 100\n sizes[1::2, 1::2] = 100\n\n facecolors = np.full(z.shape, 'C0')\n facecolors[:5, :5] = 'C1'\n facecolors[6:, :4] = 'C2'\n facecolors[6:, 6:] = 'C3'\n\n edgecolors = np.full(z.shape, 'C4')\n edgecolors[1:5, 1:5] = 'C5'\n edgecolors[5:9, 1:5] = 'C6'\n edgecolors[5:9, 5:9] = 'C7'\n\n linewidths = np.full(z.shape, 2)\n linewidths[0::2, 0::2] = 5\n linewidths[1::2, 1::2] = 5\n\n x, y, z, sizes, facecolors, edgecolors, linewidths = (\n a.flatten()\n for a in [x, y, z, sizes, facecolors, edgecolors, linewidths]\n )\n\n ax_ref = fig_ref.add_subplot(projection='3d')\n sets = (np.unique(a) for a in [sizes, facecolors, edgecolors, linewidths])\n for s, fc, ec, lw in itertools.product(*sets):\n subset = (\n (sizes != s) |\n (facecolors != fc) |\n (edgecolors != ec) |\n (linewidths != lw)\n )\n subset = np.ma.masked_array(z, subset, dtype=float)\n\n # When depth shading is disabled, the colors are passed through as\n # single-item lists; this triggers single path optimization. The\n # following reshaping is a hack to disable that, since the optimization\n # would not occur for the full scatter which has multiple colors.\n fc = np.repeat(fc, sum(~subset.mask))\n\n ax_ref.scatter(x, y, subset, s=s, fc=fc, ec=ec, lw=lw, alpha=1,\n depthshade=depthshade)\n\n ax_test = fig_test.add_subplot(projection='3d')\n ax_test.scatter(x, y, z, s=sizes, fc=facecolors, ec=edgecolors,\n lw=linewidths, alpha=1, depthshade=depthshade)\n\n\n@pytest.mark.parametrize('azim', [-50, 130]) # yellow first, blue first\n@check_figures_equal(extensions=['png'])\ndef test_marker_draw_order_data_reversed(fig_test, fig_ref, azim):\n """\n Test that the draw order does not depend on the data point order.\n\n For the given viewing angle at azim=-50, the yellow marker should be in\n front. For azim=130, the blue marker should be in front.\n """\n x = [-1, 1]\n y = [1, -1]\n z = [0, 0]\n color = ['b', 'y']\n ax = fig_test.add_subplot(projection='3d')\n ax.scatter(x, y, z, s=3500, c=color)\n ax.view_init(elev=0, azim=azim, roll=0)\n ax = fig_ref.add_subplot(projection='3d')\n ax.scatter(x[::-1], y[::-1], z[::-1], s=3500, c=color[::-1])\n ax.view_init(elev=0, azim=azim, roll=0)\n\n\n@check_figures_equal(extensions=['png'])\ndef test_marker_draw_order_view_rotated(fig_test, fig_ref):\n """\n Test that the draw order changes with the direction.\n\n If we rotate *azim* by 180 degrees and exchange the colors, the plot\n plot should look the same again.\n """\n azim = 130\n x = [-1, 1]\n y = [1, -1]\n z = [0, 0]\n color = ['b', 'y']\n ax = fig_test.add_subplot(projection='3d')\n # axis are not exactly invariant under 180 degree rotation -> deactivate\n ax.set_axis_off()\n ax.scatter(x, y, z, s=3500, c=color)\n ax.view_init(elev=0, azim=azim, roll=0)\n ax = fig_ref.add_subplot(projection='3d')\n ax.set_axis_off()\n ax.scatter(x, y, z, s=3500, c=color[::-1]) # color reversed\n ax.view_init(elev=0, azim=azim - 180, roll=0) # view rotated by 180 deg\n\n\n@mpl3d_image_comparison(['plot_3d_from_2d.png'], tol=0.019, style='mpl20')\ndef test_plot_3d_from_2d():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n xs = np.arange(0, 5)\n ys = np.arange(5, 10)\n ax.plot(xs, ys, zs=0, zdir='x')\n ax.plot(xs, ys, zs=0, zdir='y')\n\n\n@mpl3d_image_comparison(['fill_between_quad.png'], style='mpl20')\ndef test_fill_between_quad():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n theta = np.linspace(0, 2*np.pi, 50)\n\n x1 = np.cos(theta)\n y1 = np.sin(theta)\n z1 = 0.1 * np.sin(6 * theta)\n\n x2 = 0.6 * np.cos(theta)\n y2 = 0.6 * np.sin(theta)\n z2 = 2\n\n where = (theta < np.pi/2) | (theta > 3*np.pi/2)\n\n # Since none of x1 == x2, y1 == y2, or z1 == z2 is True, the fill_between\n # mode will map to 'quad'\n ax.fill_between(x1, y1, z1, x2, y2, z2,\n where=where, mode='auto', alpha=0.5, edgecolor='k')\n\n\n@mpl3d_image_comparison(['fill_between_polygon.png'], style='mpl20')\ndef test_fill_between_polygon():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n theta = np.linspace(0, 2*np.pi, 50)\n\n x1 = x2 = theta\n y1 = y2 = 0\n z1 = np.cos(theta)\n z2 = z1 + 1\n\n where = (theta < np.pi/2) | (theta > 3*np.pi/2)\n\n # Since x1 == x2 and y1 == y2, the fill_between mode will be 'polygon'\n ax.fill_between(x1, y1, z1, x2, y2, z2,\n where=where, mode='auto', edgecolor='k')\n\n\n@mpl3d_image_comparison(['surface3d.png'], style='mpl20')\ndef test_surface3d():\n # Remove this line when this test image is regenerated.\n plt.rcParams['pcolormesh.snap'] = False\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X = np.arange(-5, 5, 0.25)\n Y = np.arange(-5, 5, 0.25)\n X, Y = np.meshgrid(X, Y)\n R = np.hypot(X, Y)\n Z = np.sin(R)\n surf = ax.plot_surface(X, Y, Z, rcount=40, ccount=40, cmap=cm.coolwarm,\n lw=0, antialiased=False)\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n ax.set_zlim(-1.01, 1.01)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n\n@image_comparison(['surface3d_label_offset_tick_position.png'], style='mpl20')\ndef test_surface3d_label_offset_tick_position():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n ax = plt.figure().add_subplot(projection="3d")\n\n x, y = np.mgrid[0:6 * np.pi:0.25, 0:4 * np.pi:0.25]\n z = np.sqrt(np.abs(np.cos(x) + np.cos(y)))\n\n ax.plot_surface(x * 1e5, y * 1e6, z * 1e8, cmap='autumn', cstride=2, rstride=2)\n ax.set_xlabel("X label")\n ax.set_ylabel("Y label")\n ax.set_zlabel("Z label")\n\n\n@mpl3d_image_comparison(['surface3d_shaded.png'], style='mpl20')\ndef test_surface3d_shaded():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X = np.arange(-5, 5, 0.25)\n Y = np.arange(-5, 5, 0.25)\n X, Y = np.meshgrid(X, Y)\n R = np.sqrt(X ** 2 + Y ** 2)\n Z = np.sin(R)\n ax.plot_surface(X, Y, Z, rstride=5, cstride=5,\n color=[0.25, 1, 0.25], lw=1, antialiased=False)\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n ax.set_zlim(-1.01, 1.01)\n\n\n@mpl3d_image_comparison(['surface3d_masked.png'], style='mpl20')\ndef test_surface3d_masked():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n y = [1, 2, 3, 4, 5, 6, 7, 8]\n\n x, y = np.meshgrid(x, y)\n matrix = np.array(\n [\n [-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [-1, 1, 2, 3, 4, 4, 4, 3, 2, 1, 1],\n [-1, -1., 4, 5, 6, 8, 6, 5, 4, 3, -1.],\n [-1, -1., 7, 8, 11, 12, 11, 8, 7, -1., -1.],\n [-1, -1., 8, 9, 10, 16, 10, 9, 10, 7, -1.],\n [-1, -1., -1., 12, 16, 20, 16, 12, 11, -1., -1.],\n [-1, -1., -1., -1., 22, 24, 22, 20, 18, -1., -1.],\n [-1, -1., -1., -1., -1., 28, 26, 25, -1., -1., -1.],\n ]\n )\n z = np.ma.masked_less(matrix, 0)\n norm = mcolors.Normalize(vmax=z.max(), vmin=z.min())\n colors = mpl.colormaps["plasma"](norm(z))\n ax.plot_surface(x, y, z, facecolors=colors)\n ax.view_init(30, -80, 0)\n\n\n@check_figures_equal(extensions=["png"])\ndef test_plot_scatter_masks(fig_test, fig_ref):\n x = np.linspace(0, 10, 100)\n y = np.linspace(0, 10, 100)\n z = np.sin(x) * np.cos(y)\n mask = z > 0\n\n z_masked = np.ma.array(z, mask=mask)\n ax_test = fig_test.add_subplot(projection='3d')\n ax_test.scatter(x, y, z_masked)\n ax_test.plot(x, y, z_masked)\n\n x[mask] = y[mask] = z[mask] = np.nan\n ax_ref = fig_ref.add_subplot(projection='3d')\n ax_ref.scatter(x, y, z)\n ax_ref.plot(x, y, z)\n\n\n@check_figures_equal(extensions=["png"])\ndef test_plot_surface_None_arg(fig_test, fig_ref):\n x, y = np.meshgrid(np.arange(5), np.arange(5))\n z = x + y\n ax_test = fig_test.add_subplot(projection='3d')\n ax_test.plot_surface(x, y, z, facecolors=None)\n ax_ref = fig_ref.add_subplot(projection='3d')\n ax_ref.plot_surface(x, y, z)\n\n\n@mpl3d_image_comparison(['surface3d_masked_strides.png'], style='mpl20')\ndef test_surface3d_masked_strides():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n x, y = np.mgrid[-6:6.1:1, -6:6.1:1]\n z = np.ma.masked_less(x * y, 2)\n\n ax.plot_surface(x, y, z, rstride=4, cstride=4)\n ax.view_init(60, -45, 0)\n\n\n@mpl3d_image_comparison(['text3d.png'], remove_text=False, style='mpl20')\ndef test_text3d():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n zdirs = (None, 'x', 'y', 'z', (1, 1, 0), (1, 1, 1))\n xs = (2, 6, 4, 9, 7, 2)\n ys = (6, 4, 8, 7, 2, 2)\n zs = (4, 2, 5, 6, 1, 7)\n\n for zdir, x, y, z in zip(zdirs, xs, ys, zs):\n label = '(%d, %d, %d), dir=%s' % (x, y, z, zdir)\n ax.text(x, y, z, label, zdir)\n\n ax.text(1, 1, 1, "red", color='red')\n ax.text2D(0.05, 0.95, "2D Text", transform=ax.transAxes)\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n ax.set_xlim3d(0, 10)\n ax.set_ylim3d(0, 10)\n ax.set_zlim3d(0, 10)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n\n\n@check_figures_equal(extensions=['png'])\ndef test_text3d_modification(fig_ref, fig_test):\n # Modifying the Text position after the fact should work the same as\n # setting it directly.\n zdirs = (None, 'x', 'y', 'z', (1, 1, 0), (1, 1, 1))\n xs = (2, 6, 4, 9, 7, 2)\n ys = (6, 4, 8, 7, 2, 2)\n zs = (4, 2, 5, 6, 1, 7)\n\n ax_test = fig_test.add_subplot(projection='3d')\n ax_test.set_xlim3d(0, 10)\n ax_test.set_ylim3d(0, 10)\n ax_test.set_zlim3d(0, 10)\n for zdir, x, y, z in zip(zdirs, xs, ys, zs):\n t = ax_test.text(0, 0, 0, f'({x}, {y}, {z}), dir={zdir}')\n t.set_position_3d((x, y, z), zdir=zdir)\n\n ax_ref = fig_ref.add_subplot(projection='3d')\n ax_ref.set_xlim3d(0, 10)\n ax_ref.set_ylim3d(0, 10)\n ax_ref.set_zlim3d(0, 10)\n for zdir, x, y, z in zip(zdirs, xs, ys, zs):\n ax_ref.text(x, y, z, f'({x}, {y}, {z}), dir={zdir}', zdir=zdir)\n\n\n@mpl3d_image_comparison(['trisurf3d.png'], tol=0.061, style='mpl20')\ndef test_trisurf3d():\n n_angles = 36\n n_radii = 8\n radii = np.linspace(0.125, 1.0, n_radii)\n angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)\n angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)\n angles[:, 1::2] += np.pi/n_angles\n\n x = np.append(0, (radii*np.cos(angles)).flatten())\n y = np.append(0, (radii*np.sin(angles)).flatten())\n z = np.sin(-x*y)\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n\n\n@mpl3d_image_comparison(['trisurf3d_shaded.png'], tol=0.03, style='mpl20')\ndef test_trisurf3d_shaded():\n n_angles = 36\n n_radii = 8\n radii = np.linspace(0.125, 1.0, n_radii)\n angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)\n angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)\n angles[:, 1::2] += np.pi/n_angles\n\n x = np.append(0, (radii*np.cos(angles)).flatten())\n y = np.append(0, (radii*np.sin(angles)).flatten())\n z = np.sin(-x*y)\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.plot_trisurf(x, y, z, color=[1, 0.5, 0], linewidth=0.2)\n\n\n@mpl3d_image_comparison(['wireframe3d.png'], style='mpl20')\ndef test_wireframe3d():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y, Z = axes3d.get_test_data(0.05)\n ax.plot_wireframe(X, Y, Z, rcount=13, ccount=13)\n\n\n@mpl3d_image_comparison(['wireframe3dzerocstride.png'], style='mpl20')\ndef test_wireframe3dzerocstride():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y, Z = axes3d.get_test_data(0.05)\n ax.plot_wireframe(X, Y, Z, rcount=13, ccount=0)\n\n\n@mpl3d_image_comparison(['wireframe3dzerorstride.png'], style='mpl20')\ndef test_wireframe3dzerorstride():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y, Z = axes3d.get_test_data(0.05)\n ax.plot_wireframe(X, Y, Z, rstride=0, cstride=10)\n\n\ndef test_wireframe3dzerostrideraises():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y, Z = axes3d.get_test_data(0.05)\n with pytest.raises(ValueError):\n ax.plot_wireframe(X, Y, Z, rstride=0, cstride=0)\n\n\ndef test_mixedsamplesraises():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n X, Y, Z = axes3d.get_test_data(0.05)\n with pytest.raises(ValueError):\n ax.plot_wireframe(X, Y, Z, rstride=10, ccount=50)\n with pytest.raises(ValueError):\n ax.plot_surface(X, Y, Z, cstride=50, rcount=10)\n\n\n# remove tolerance when regenerating the test image\n@mpl3d_image_comparison(['quiver3d.png'], style='mpl20', tol=0.003)\ndef test_quiver3d():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n pivots = ['tip', 'middle', 'tail']\n colors = ['tab:blue', 'tab:orange', 'tab:green']\n for i, (pivot, color) in enumerate(zip(pivots, colors)):\n x, y, z = np.meshgrid([-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5])\n u = -x\n v = -y\n w = -z\n # Offset each set in z direction\n z += 2 * i\n ax.quiver(x, y, z, u, v, w, length=1, pivot=pivot, color=color)\n ax.scatter(x, y, z, color=color)\n\n ax.set_xlim(-3, 3)\n ax.set_ylim(-3, 3)\n ax.set_zlim(-1, 5)\n\n\n@check_figures_equal(extensions=["png"])\ndef test_quiver3d_empty(fig_test, fig_ref):\n fig_ref.add_subplot(projection='3d')\n x = y = z = u = v = w = []\n ax = fig_test.add_subplot(projection='3d')\n ax.quiver(x, y, z, u, v, w, length=0.1, pivot='tip', normalize=True)\n\n\n@mpl3d_image_comparison(['quiver3d_masked.png'], style='mpl20')\ndef test_quiver3d_masked():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n # Using mgrid here instead of ogrid because masked_where doesn't\n # seem to like broadcasting very much...\n x, y, z = np.mgrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]\n\n u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)\n v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)\n w = (2/3)**0.5 * np.cos(np.pi * x) * np.cos(np.pi * y) * np.sin(np.pi * z)\n u = np.ma.masked_where((-0.4 < x) & (x < 0.1), u, copy=False)\n v = np.ma.masked_where((0.1 < y) & (y < 0.7), v, copy=False)\n\n ax.quiver(x, y, z, u, v, w, length=0.1, pivot='tip', normalize=True)\n\n\n@mpl3d_image_comparison(['quiver3d_colorcoded.png'], style='mpl20')\ndef test_quiver3d_colorcoded():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n x = y = dx = dz = np.zeros(10)\n z = dy = np.arange(10.)\n\n color = plt.cm.Reds(dy/dy.max())\n ax.quiver(x, y, z, dx, dy, dz, colors=color)\n ax.set_ylim(0, 10)\n\n\ndef test_patch_modification():\n fig = plt.figure()\n ax = fig.add_subplot(projection="3d")\n circle = Circle((0, 0))\n ax.add_patch(circle)\n art3d.patch_2d_to_3d(circle)\n circle.set_facecolor((1.0, 0.0, 0.0, 1))\n\n assert mcolors.same_color(circle.get_facecolor(), (1, 0, 0, 1))\n fig.canvas.draw()\n assert mcolors.same_color(circle.get_facecolor(), (1, 0, 0, 1))\n\n\n@check_figures_equal(extensions=['png'])\ndef test_patch_collection_modification(fig_test, fig_ref):\n # Test that modifying Patch3DCollection properties after creation works.\n patch1 = Circle((0, 0), 0.05)\n patch2 = Circle((0.1, 0.1), 0.03)\n facecolors = np.array([[0., 0.5, 0., 1.], [0.5, 0., 0., 0.5]])\n c = art3d.Patch3DCollection([patch1, patch2], linewidths=3)\n\n ax_test = fig_test.add_subplot(projection='3d')\n ax_test.add_collection3d(c)\n c.set_edgecolor('C2')\n c.set_facecolor(facecolors)\n c.set_alpha(0.7)\n assert c.get_depthshade()\n c.set_depthshade(False)\n assert not c.get_depthshade()\n\n patch1 = Circle((0, 0), 0.05)\n patch2 = Circle((0.1, 0.1), 0.03)\n facecolors = np.array([[0., 0.5, 0., 1.], [0.5, 0., 0., 0.5]])\n c = art3d.Patch3DCollection([patch1, patch2], linewidths=3,\n edgecolor='C2', facecolor=facecolors,\n alpha=0.7, depthshade=False)\n\n ax_ref = fig_ref.add_subplot(projection='3d')\n ax_ref.add_collection3d(c)\n\n\ndef test_poly3dcollection_verts_validation():\n poly = [[0, 0, 1], [0, 1, 1], [0, 1, 0], [0, 0, 0]]\n with pytest.raises(ValueError, match=r'list of \(N, 3\) array-like'):\n art3d.Poly3DCollection(poly) # should be Poly3DCollection([poly])\n\n poly = np.array(poly, dtype=float)\n with pytest.raises(ValueError, match=r'list of \(N, 3\) array-like'):\n art3d.Poly3DCollection(poly) # should be Poly3DCollection([poly])\n\n\n@mpl3d_image_comparison(['poly3dcollection_closed.png'], style='mpl20')\ndef test_poly3dcollection_closed():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n poly1 = np.array([[0, 0, 1], [0, 1, 1], [0, 0, 0]], float)\n poly2 = np.array([[0, 1, 1], [1, 1, 1], [1, 1, 0]], float)\n c1 = art3d.Poly3DCollection([poly1], linewidths=3, edgecolor='k',\n facecolor=(0.5, 0.5, 1, 0.5), closed=True)\n c2 = art3d.Poly3DCollection([poly2], linewidths=3, edgecolor='k',\n facecolor=(1, 0.5, 0.5, 0.5), closed=False)\n ax.add_collection3d(c1, autolim=False)\n ax.add_collection3d(c2, autolim=False)\n\n\ndef test_poly_collection_2d_to_3d_empty():\n poly = PolyCollection([])\n art3d.poly_collection_2d_to_3d(poly)\n assert isinstance(poly, art3d.Poly3DCollection)\n assert poly.get_paths() == []\n\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\n ax.add_artist(poly)\n minz = poly.do_3d_projection()\n assert np.isnan(minz)\n\n # Ensure drawing actually works.\n fig.canvas.draw()\n\n\n@mpl3d_image_comparison(['poly3dcollection_alpha.png'], style='mpl20')\ndef test_poly3dcollection_alpha():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n poly1 = np.array([[0, 0, 1], [0, 1, 1], [0, 0, 0]], float)\n poly2 = np.array([[0, 1, 1], [1, 1, 1], [1, 1, 0]], float)\n c1 = art3d.Poly3DCollection([poly1], linewidths=3, edgecolor='k',\n facecolor=(0.5, 0.5, 1), closed=True)\n c1.set_alpha(0.5)\n c2 = art3d.Poly3DCollection([poly2], linewidths=3, closed=False)\n # Post-creation modification should work.\n c2.set_facecolor((1, 0.5, 0.5))\n c2.set_edgecolor('k')\n c2.set_alpha(0.5)\n ax.add_collection3d(c1, autolim=False)\n ax.add_collection3d(c2, autolim=False)\n\n\n@mpl3d_image_comparison(['add_collection3d_zs_array.png'], style='mpl20')\ndef test_add_collection3d_zs_array():\n theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)\n z = np.linspace(-2, 2, 100)\n r = z**2 + 1\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n\n points = np.column_stack([x, y, z]).reshape(-1, 1, 3)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n norm = plt.Normalize(0, 2*np.pi)\n # 2D LineCollection from x & y values\n lc = LineCollection(segments[:, :, :2], cmap='twilight', norm=norm)\n lc.set_array(np.mod(theta, 2*np.pi))\n # Add 2D collection at z values to ax\n line = ax.add_collection3d(lc, zs=segments[:, :, 2])\n\n assert line is not None\n\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n ax.set_xlim(-5, 5)\n ax.set_ylim(-4, 6)\n ax.set_zlim(-2, 2)\n\n\n@mpl3d_image_comparison(['add_collection3d_zs_scalar.png'], style='mpl20')\ndef test_add_collection3d_zs_scalar():\n theta = np.linspace(0, 2 * np.pi, 100)\n z = 1\n r = z**2 + 1\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n\n points = np.column_stack([x, y]).reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n norm = plt.Normalize(0, 2*np.pi)\n lc = LineCollection(segments, cmap='twilight', norm=norm)\n lc.set_array(theta)\n line = ax.add_collection3d(lc, zs=z)\n\n assert line is not None\n\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n ax.set_xlim(-5, 5)\n ax.set_ylim(-4, 6)\n ax.set_zlim(0, 2)\n\n\ndef test_line3dCollection_autoscaling():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n lines = [[(0, 0, 0), (1, 4, 2)],\n [(1, 1, 3), (2, 0, 2)],\n [(1, 0, 4), (1, 4, 5)]]\n\n lc = art3d.Line3DCollection(lines)\n ax.add_collection3d(lc)\n assert np.allclose(ax.get_xlim3d(), (-0.041666666666666664, 2.0416666666666665))\n assert np.allclose(ax.get_ylim3d(), (-0.08333333333333333, 4.083333333333333))\n assert np.allclose(ax.get_zlim3d(), (-0.10416666666666666, 5.104166666666667))\n\n\ndef test_poly3dCollection_autoscaling():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n poly = np.array([[0, 0, 0], [1, 1, 3], [1, 0, 4]])\n col = art3d.Poly3DCollection([poly])\n ax.add_collection3d(col)\n assert np.allclose(ax.get_xlim3d(), (-0.020833333333333332, 1.0208333333333333))\n assert np.allclose(ax.get_ylim3d(), (-0.020833333333333332, 1.0208333333333333))\n assert np.allclose(ax.get_zlim3d(), (-0.0833333333333333, 4.083333333333333))\n\n\n@mpl3d_image_comparison(['axes3d_labelpad.png'],\n remove_text=False, style='mpl20')\ndef test_axes3d_labelpad():\n fig = plt.figure()\n ax = fig.add_axes(Axes3D(fig))\n # labelpad respects rcParams\n assert ax.xaxis.labelpad == mpl.rcParams['axes.labelpad']\n # labelpad can be set in set_label\n ax.set_xlabel('X LABEL', labelpad=10)\n assert ax.xaxis.labelpad == 10\n ax.set_ylabel('Y LABEL')\n ax.set_zlabel('Z LABEL', labelpad=20)\n assert ax.zaxis.labelpad == 20\n assert ax.get_zlabel() == 'Z LABEL'\n # or manually\n ax.yaxis.labelpad = 20\n ax.zaxis.labelpad = -40\n\n # Tick labels also respect tick.pad (also from rcParams)\n for i, tick in enumerate(ax.yaxis.get_major_ticks()):\n tick.set_pad(tick.get_pad() + 5 - i * 5)\n\n\n@mpl3d_image_comparison(['axes3d_cla.png'], remove_text=False, style='mpl20')\ndef test_axes3d_cla():\n # fixed in pull request 4553\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n ax.set_axis_off()\n ax.cla() # make sure the axis displayed is 3D (not 2D)\n\n\n@mpl3d_image_comparison(['axes3d_rotated.png'],\n remove_text=False, style='mpl20')\ndef test_axes3d_rotated():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n ax.view_init(90, 45, 0) # look down, rotated. Should be square\n\n\ndef test_plotsurface_1d_raises():\n x = np.linspace(0.5, 10, num=100)\n y = np.linspace(0.5, 10, num=100)\n X, Y = np.meshgrid(x, y)\n z = np.random.randn(100)\n\n fig = plt.figure(figsize=(14, 6))\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n with pytest.raises(ValueError):\n ax.plot_surface(X, Y, z)\n\n\ndef _test_proj_make_M():\n # eye point\n E = np.array([1000, -1000, 2000])\n R = np.array([100, 100, 100])\n V = np.array([0, 0, 1])\n roll = 0\n u, v, w = proj3d._view_axes(E, R, V, roll)\n viewM = proj3d._view_transformation_uvw(u, v, w, E)\n perspM = proj3d._persp_transformation(100, -100, 1)\n M = np.dot(perspM, viewM)\n return M\n\n\ndef test_proj_transform():\n M = _test_proj_make_M()\n invM = np.linalg.inv(M)\n\n xs = np.array([0, 1, 1, 0, 0, 0, 1, 1, 0, 0]) * 300.0\n ys = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 0]) * 300.0\n zs = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) * 300.0\n\n txs, tys, tzs = proj3d.proj_transform(xs, ys, zs, M)\n ixs, iys, izs = proj3d.inv_transform(txs, tys, tzs, invM)\n\n np.testing.assert_almost_equal(ixs, xs)\n np.testing.assert_almost_equal(iys, ys)\n np.testing.assert_almost_equal(izs, zs)\n\n\ndef _test_proj_draw_axes(M, s=1, *args, **kwargs):\n xs = [0, s, 0, 0]\n ys = [0, 0, s, 0]\n zs = [0, 0, 0, s]\n txs, tys, tzs = proj3d.proj_transform(xs, ys, zs, M)\n o, ax, ay, az = zip(txs, tys)\n lines = [(o, ax), (o, ay), (o, az)]\n\n fig, ax = plt.subplots(*args, **kwargs)\n linec = LineCollection(lines)\n ax.add_collection(linec)\n for x, y, t in zip(txs, tys, ['o', 'x', 'y', 'z']):\n ax.text(x, y, t)\n\n return fig, ax\n\n\n@mpl3d_image_comparison(['proj3d_axes_cube.png'], style='mpl20')\ndef test_proj_axes_cube():\n M = _test_proj_make_M()\n\n ts = '0 1 2 3 0 4 5 6 7 4'.split()\n xs = np.array([0, 1, 1, 0, 0, 0, 1, 1, 0, 0]) * 300.0\n ys = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 0]) * 300.0\n zs = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) * 300.0\n\n txs, tys, tzs = proj3d.proj_transform(xs, ys, zs, M)\n\n fig, ax = _test_proj_draw_axes(M, s=400)\n\n ax.scatter(txs, tys, c=tzs)\n ax.plot(txs, tys, c='r')\n for x, y, t in zip(txs, tys, ts):\n ax.text(x, y, t)\n\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n ax.set_xlim(-0.2, 0.2)\n ax.set_ylim(-0.2, 0.2)\n\n\n@mpl3d_image_comparison(['proj3d_axes_cube_ortho.png'], style='mpl20')\ndef test_proj_axes_cube_ortho():\n E = np.array([200, 100, 100])\n R = np.array([0, 0, 0])\n V = np.array([0, 0, 1])\n roll = 0\n u, v, w = proj3d._view_axes(E, R, V, roll)\n viewM = proj3d._view_transformation_uvw(u, v, w, E)\n orthoM = proj3d._ortho_transformation(-1, 1)\n M = np.dot(orthoM, viewM)\n\n ts = '0 1 2 3 0 4 5 6 7 4'.split()\n xs = np.array([0, 1, 1, 0, 0, 0, 1, 1, 0, 0]) * 100\n ys = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 0]) * 100\n zs = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]) * 100\n\n txs, tys, tzs = proj3d.proj_transform(xs, ys, zs, M)\n\n fig, ax = _test_proj_draw_axes(M, s=150)\n\n ax.scatter(txs, tys, s=300-tzs)\n ax.plot(txs, tys, c='r')\n for x, y, t in zip(txs, tys, ts):\n ax.text(x, y, t)\n\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n ax.set_xlim(-200, 200)\n ax.set_ylim(-200, 200)\n\n\ndef test_world():\n xmin, xmax = 100, 120\n ymin, ymax = -100, 100\n zmin, zmax = 0.1, 0.2\n M = proj3d.world_transformation(xmin, xmax, ymin, ymax, zmin, zmax)\n np.testing.assert_allclose(M,\n [[5e-2, 0, 0, -5],\n [0, 5e-3, 0, 5e-1],\n [0, 0, 1e1, -1],\n [0, 0, 0, 1]])\n\n\ndef test_autoscale():\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n assert ax.get_zscale() == 'linear'\n ax._view_margin = 0\n ax.margins(x=0, y=.1, z=.2)\n ax.plot([0, 1], [0, 1], [0, 1])\n assert ax.get_w_lims() == (0, 1, -.1, 1.1, -.2, 1.2)\n ax.autoscale(False)\n ax.set_autoscalez_on(True)\n ax.plot([0, 2], [0, 2], [0, 2])\n assert ax.get_w_lims() == (0, 1, -.1, 1.1, -.4, 2.4)\n ax.autoscale(axis='x')\n ax.plot([0, 2], [0, 2], [0, 2])\n assert ax.get_w_lims() == (0, 2, -.1, 1.1, -.4, 2.4)\n\n\n@pytest.mark.parametrize('axis', ('x', 'y', 'z'))\n@pytest.mark.parametrize('auto', (True, False, None))\ndef test_unautoscale(axis, auto):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n x = np.arange(100)\n y = np.linspace(-0.1, 0.1, 100)\n ax.scatter(x, y)\n\n get_autoscale_on = getattr(ax, f'get_autoscale{axis}_on')\n set_lim = getattr(ax, f'set_{axis}lim')\n get_lim = getattr(ax, f'get_{axis}lim')\n\n post_auto = get_autoscale_on() if auto is None else auto\n\n set_lim((-0.5, 0.5), auto=auto)\n assert post_auto == get_autoscale_on()\n fig.canvas.draw()\n np.testing.assert_array_equal(get_lim(), (-0.5, 0.5))\n\n\n@check_figures_equal(extensions=["png"])\ndef test_culling(fig_test, fig_ref):\n xmins = (-100, -50)\n for fig, xmin in zip((fig_test, fig_ref), xmins):\n ax = fig.add_subplot(projection='3d')\n n = abs(xmin) + 1\n xs = np.linspace(0, xmin, n)\n ys = np.ones(n)\n zs = np.zeros(n)\n ax.plot(xs, ys, zs, 'k')\n\n ax.set(xlim=(-5, 5), ylim=(-5, 5), zlim=(-5, 5))\n ax.view_init(5, 180, 0)\n\n\ndef test_axes3d_focal_length_checks():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n with pytest.raises(ValueError):\n ax.set_proj_type('persp', focal_length=0)\n with pytest.raises(ValueError):\n ax.set_proj_type('ortho', focal_length=1)\n\n\n@mpl3d_image_comparison(['axes3d_focal_length.png'],\n remove_text=False, style='mpl20')\ndef test_axes3d_focal_length():\n fig, axs = plt.subplots(1, 2, subplot_kw={'projection': '3d'})\n axs[0].set_proj_type('persp', focal_length=np.inf)\n axs[1].set_proj_type('persp', focal_length=0.15)\n\n\n@mpl3d_image_comparison(['axes3d_ortho.png'], remove_text=False, style='mpl20')\ndef test_axes3d_ortho():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.set_proj_type('ortho')\n\n\n@mpl3d_image_comparison(['axes3d_isometric.png'], style='mpl20')\ndef test_axes3d_isometric():\n from itertools import combinations, product\n fig, ax = plt.subplots(subplot_kw=dict(\n projection='3d',\n proj_type='ortho',\n box_aspect=(4, 4, 4)\n ))\n r = (-1, 1) # stackoverflow.com/a/11156353\n for s, e in combinations(np.array(list(product(r, r, r))), 2):\n if abs(s - e).sum() == r[1] - r[0]:\n ax.plot3D(*zip(s, e), c='k')\n ax.view_init(elev=np.degrees(np.arctan(1. / np.sqrt(2))), azim=-45, roll=0)\n ax.grid(True)\n\n\n@check_figures_equal(extensions=["png"])\ndef test_axlim_clip(fig_test, fig_ref):\n # With axlim clipping\n ax = fig_test.add_subplot(projection="3d")\n x = np.linspace(0, 1, 11)\n y = np.linspace(0, 1, 11)\n X, Y = np.meshgrid(x, y)\n Z = X + Y\n ax.plot_surface(X, Y, Z, facecolor='C1', edgecolors=None,\n rcount=50, ccount=50, axlim_clip=True)\n # This ax.plot is to cover the extra surface edge which is not clipped out\n ax.plot([0.5, 0.5], [0, 1], [0.5, 1.5],\n color='k', linewidth=3, zorder=5, axlim_clip=True)\n ax.scatter(X.ravel(), Y.ravel(), Z.ravel() + 1, axlim_clip=True)\n ax.quiver(X.ravel(), Y.ravel(), Z.ravel() + 2,\n 0*X.ravel(), 0*Y.ravel(), 0*Z.ravel() + 1,\n arrow_length_ratio=0, axlim_clip=True)\n ax.plot(X[0], Y[0], Z[0] + 3, color='C2', axlim_clip=True)\n ax.text(1.1, 0.5, 4, 'test', axlim_clip=True) # won't be visible\n ax.set(xlim=(0, 0.5), ylim=(0, 1), zlim=(0, 5))\n\n # With manual clipping\n ax = fig_ref.add_subplot(projection="3d")\n idx = (X <= 0.5)\n X = X[idx].reshape(11, 6)\n Y = Y[idx].reshape(11, 6)\n Z = Z[idx].reshape(11, 6)\n ax.plot_surface(X, Y, Z, facecolor='C1', edgecolors=None,\n rcount=50, ccount=50, axlim_clip=False)\n ax.plot([0.5, 0.5], [0, 1], [0.5, 1.5],\n color='k', linewidth=3, zorder=5, axlim_clip=False)\n ax.scatter(X.ravel(), Y.ravel(), Z.ravel() + 1, axlim_clip=False)\n ax.quiver(X.ravel(), Y.ravel(), Z.ravel() + 2,\n 0*X.ravel(), 0*Y.ravel(), 0*Z.ravel() + 1,\n arrow_length_ratio=0, axlim_clip=False)\n ax.plot(X[0], Y[0], Z[0] + 3, color='C2', axlim_clip=False)\n ax.set(xlim=(0, 0.5), ylim=(0, 1), zlim=(0, 5))\n\n\n@pytest.mark.parametrize('value', [np.inf, np.nan])\n@pytest.mark.parametrize(('setter', 'side'), [\n ('set_xlim3d', 'left'),\n ('set_xlim3d', 'right'),\n ('set_ylim3d', 'bottom'),\n ('set_ylim3d', 'top'),\n ('set_zlim3d', 'bottom'),\n ('set_zlim3d', 'top'),\n])\ndef test_invalid_axes_limits(setter, side, value):\n limit = {side: value}\n fig = plt.figure()\n obj = fig.add_subplot(projection='3d')\n with pytest.raises(ValueError):\n getattr(obj, setter)(**limit)\n\n\nclass TestVoxels:\n @mpl3d_image_comparison(['voxels-simple.png'], style='mpl20')\n def test_simple(self):\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n\n x, y, z = np.indices((5, 4, 3))\n voxels = (x == y) | (y == z)\n ax.voxels(voxels)\n\n @mpl3d_image_comparison(['voxels-edge-style.png'], style='mpl20')\n def test_edge_style(self):\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n\n x, y, z = np.indices((5, 5, 4))\n voxels = ((x - 2)**2 + (y - 2)**2 + (z-1.5)**2) < 2.2**2\n v = ax.voxels(voxels, linewidths=3, edgecolor='C1')\n\n # change the edge color of one voxel\n v[max(v.keys())].set_edgecolor('C2')\n\n @mpl3d_image_comparison(['voxels-named-colors.png'], style='mpl20')\n def test_named_colors(self):\n """Test with colors set to a 3D object array of strings."""\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n\n x, y, z = np.indices((10, 10, 10))\n voxels = (x == y) | (y == z)\n voxels = voxels & ~(x * y * z < 1)\n colors = np.full((10, 10, 10), 'C0', dtype=np.object_)\n colors[(x < 5) & (y < 5)] = '0.25'\n colors[(x + z) < 10] = 'cyan'\n ax.voxels(voxels, facecolors=colors)\n\n @mpl3d_image_comparison(['voxels-rgb-data.png'], style='mpl20')\n def test_rgb_data(self):\n """Test with colors set to a 4d float array of rgb data."""\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n\n x, y, z = np.indices((10, 10, 10))\n voxels = (x == y) | (y == z)\n colors = np.zeros((10, 10, 10, 3))\n colors[..., 0] = x / 9\n colors[..., 1] = y / 9\n colors[..., 2] = z / 9\n ax.voxels(voxels, facecolors=colors)\n\n @mpl3d_image_comparison(['voxels-alpha.png'], style='mpl20')\n def test_alpha(self):\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n\n x, y, z = np.indices((10, 10, 10))\n v1 = x == y\n v2 = np.abs(x - y) < 2\n voxels = v1 | v2\n colors = np.zeros((10, 10, 10, 4))\n colors[v2] = [1, 0, 0, 0.5]\n colors[v1] = [0, 1, 0, 0.5]\n v = ax.voxels(voxels, facecolors=colors)\n\n assert type(v) is dict\n for coord, poly in v.items():\n assert voxels[coord], "faces returned for absent voxel"\n assert isinstance(poly, art3d.Poly3DCollection)\n\n @mpl3d_image_comparison(['voxels-xyz.png'],\n tol=0.01, remove_text=False, style='mpl20')\n def test_xyz(self):\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n\n def midpoints(x):\n sl = ()\n for i in range(x.ndim):\n x = (x[sl + np.index_exp[:-1]] +\n x[sl + np.index_exp[1:]]) / 2.0\n sl += np.index_exp[:]\n return x\n\n # prepare some coordinates, and attach rgb values to each\n r, g, b = np.indices((17, 17, 17)) / 16.0\n rc = midpoints(r)\n gc = midpoints(g)\n bc = midpoints(b)\n\n # define a sphere about [0.5, 0.5, 0.5]\n sphere = (rc - 0.5)**2 + (gc - 0.5)**2 + (bc - 0.5)**2 < 0.5**2\n\n # combine the color components\n colors = np.zeros(sphere.shape + (3,))\n colors[..., 0] = rc\n colors[..., 1] = gc\n colors[..., 2] = bc\n\n # and plot everything\n ax.voxels(r, g, b, sphere,\n facecolors=colors,\n edgecolors=np.clip(2*colors - 0.5, 0, 1), # brighter\n linewidth=0.5)\n\n def test_calling_conventions(self):\n x, y, z = np.indices((3, 4, 5))\n filled = np.ones((2, 3, 4))\n\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n\n # all the valid calling conventions\n for kw in (dict(), dict(edgecolor='k')):\n ax.voxels(filled, **kw)\n ax.voxels(filled=filled, **kw)\n ax.voxels(x, y, z, filled, **kw)\n ax.voxels(x, y, z, filled=filled, **kw)\n\n # duplicate argument\n with pytest.raises(TypeError, match='voxels'):\n ax.voxels(x, y, z, filled, filled=filled)\n # missing arguments\n with pytest.raises(TypeError, match='voxels'):\n ax.voxels(x, y)\n # x, y, z are positional only - this passes them on as attributes of\n # Poly3DCollection\n with pytest.raises(AttributeError, match="keyword argument 'x'") as exec_info:\n ax.voxels(filled=filled, x=x, y=y, z=z)\n assert exec_info.value.name == 'x'\n\n\ndef test_line3d_set_get_data_3d():\n x, y, z = [0, 1], [2, 3], [4, 5]\n x2, y2, z2 = [6, 7], [8, 9], [10, 11]\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n lines = ax.plot(x, y, z)\n line = lines[0]\n np.testing.assert_array_equal((x, y, z), line.get_data_3d())\n line.set_data_3d(x2, y2, z2)\n np.testing.assert_array_equal((x2, y2, z2), line.get_data_3d())\n line.set_xdata(x)\n line.set_ydata(y)\n line.set_3d_properties(zs=z, zdir='z')\n np.testing.assert_array_equal((x, y, z), line.get_data_3d())\n line.set_3d_properties(zs=0, zdir='z')\n np.testing.assert_array_equal((x, y, np.zeros_like(z)), line.get_data_3d())\n\n\n@check_figures_equal(extensions=["png"])\ndef test_inverted(fig_test, fig_ref):\n # Plot then invert.\n ax = fig_test.add_subplot(projection="3d")\n ax.plot([1, 1, 10, 10], [1, 10, 10, 10], [1, 1, 1, 10])\n ax.invert_yaxis()\n # Invert then plot.\n ax = fig_ref.add_subplot(projection="3d")\n ax.invert_yaxis()\n ax.plot([1, 1, 10, 10], [1, 10, 10, 10], [1, 1, 1, 10])\n\n\ndef test_inverted_cla():\n # GitHub PR #5450. Setting autoscale should reset\n # axes to be non-inverted.\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n # 1. test that a new axis is not inverted per default\n assert not ax.xaxis_inverted()\n assert not ax.yaxis_inverted()\n assert not ax.zaxis_inverted()\n ax.set_xlim(1, 0)\n ax.set_ylim(1, 0)\n ax.set_zlim(1, 0)\n assert ax.xaxis_inverted()\n assert ax.yaxis_inverted()\n assert ax.zaxis_inverted()\n ax.cla()\n assert not ax.xaxis_inverted()\n assert not ax.yaxis_inverted()\n assert not ax.zaxis_inverted()\n\n\ndef test_ax3d_tickcolour():\n fig = plt.figure()\n ax = Axes3D(fig)\n\n ax.tick_params(axis='x', colors='red')\n ax.tick_params(axis='y', colors='red')\n ax.tick_params(axis='z', colors='red')\n fig.canvas.draw()\n\n for tick in ax.xaxis.get_major_ticks():\n assert tick.tick1line._color == 'red'\n for tick in ax.yaxis.get_major_ticks():\n assert tick.tick1line._color == 'red'\n for tick in ax.zaxis.get_major_ticks():\n assert tick.tick1line._color == 'red'\n\n\n@check_figures_equal(extensions=["png"])\ndef test_ticklabel_format(fig_test, fig_ref):\n axs = fig_test.subplots(4, 5, subplot_kw={"projection": "3d"})\n for ax in axs.flat:\n ax.set_xlim(1e7, 1e7 + 10)\n for row, name in zip(axs, ["x", "y", "z", "both"]):\n row[0].ticklabel_format(\n axis=name, style="plain")\n row[1].ticklabel_format(\n axis=name, scilimits=(-2, 2))\n row[2].ticklabel_format(\n axis=name, useOffset=not mpl.rcParams["axes.formatter.useoffset"])\n row[3].ticklabel_format(\n axis=name, useLocale=not mpl.rcParams["axes.formatter.use_locale"])\n row[4].ticklabel_format(\n axis=name,\n useMathText=not mpl.rcParams["axes.formatter.use_mathtext"])\n\n def get_formatters(ax, names):\n return [getattr(ax, name).get_major_formatter() for name in names]\n\n axs = fig_ref.subplots(4, 5, subplot_kw={"projection": "3d"})\n for ax in axs.flat:\n ax.set_xlim(1e7, 1e7 + 10)\n for row, names in zip(\n axs, [["xaxis"], ["yaxis"], ["zaxis"], ["xaxis", "yaxis", "zaxis"]]\n ):\n for fmt in get_formatters(row[0], names):\n fmt.set_scientific(False)\n for fmt in get_formatters(row[1], names):\n fmt.set_powerlimits((-2, 2))\n for fmt in get_formatters(row[2], names):\n fmt.set_useOffset(not mpl.rcParams["axes.formatter.useoffset"])\n for fmt in get_formatters(row[3], names):\n fmt.set_useLocale(not mpl.rcParams["axes.formatter.use_locale"])\n for fmt in get_formatters(row[4], names):\n fmt.set_useMathText(\n not mpl.rcParams["axes.formatter.use_mathtext"])\n\n\n@check_figures_equal(extensions=["png"])\ndef test_quiver3D_smoke(fig_test, fig_ref):\n pivot = "middle"\n # Make the grid\n x, y, z = np.meshgrid(\n np.arange(-0.8, 1, 0.2),\n np.arange(-0.8, 1, 0.2),\n np.arange(-0.8, 1, 0.8)\n )\n u = v = w = np.ones_like(x)\n\n for fig, length in zip((fig_ref, fig_test), (1, 1.0)):\n ax = fig.add_subplot(projection="3d")\n ax.quiver(x, y, z, u, v, w, length=length, pivot=pivot)\n\n\n@image_comparison(["minor_ticks.png"], style="mpl20")\ndef test_minor_ticks():\n ax = plt.figure().add_subplot(projection="3d")\n ax.set_xticks([0.25], minor=True)\n ax.set_xticklabels(["quarter"], minor=True)\n ax.set_yticks([0.33], minor=True)\n ax.set_yticklabels(["third"], minor=True)\n ax.set_zticks([0.50], minor=True)\n ax.set_zticklabels(["half"], minor=True)\n\n\n# remove tolerance when regenerating the test image\n@mpl3d_image_comparison(['errorbar3d_errorevery.png'], style='mpl20', tol=0.003)\ndef test_errorbar3d_errorevery():\n """Tests errorevery functionality for 3D errorbars."""\n t = np.arange(0, 2*np.pi+.1, 0.01)\n x, y, z = np.sin(t), np.cos(3*t), np.sin(5*t)\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n estep = 15\n i = np.arange(t.size)\n zuplims = (i % estep == 0) & (i // estep % 3 == 0)\n zlolims = (i % estep == 0) & (i // estep % 3 == 2)\n\n ax.errorbar(x, y, z, 0.2, zuplims=zuplims, zlolims=zlolims,\n errorevery=estep)\n\n\n@mpl3d_image_comparison(['errorbar3d.png'], style='mpl20',\n tol=0 if platform.machine() == 'x86_64' else 0.02)\ndef test_errorbar3d():\n """Tests limits, color styling, and legend for 3D errorbars."""\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n d = [1, 2, 3, 4, 5]\n e = [.5, .5, .5, .5, .5]\n ax.errorbar(x=d, y=d, z=d, xerr=e, yerr=e, zerr=e, capsize=3,\n zuplims=[False, True, False, True, True],\n zlolims=[True, False, False, True, False],\n yuplims=True,\n ecolor='purple', label='Error lines')\n ax.legend()\n\n\n@image_comparison(['stem3d.png'], style='mpl20', tol=0.009)\ndef test_stem3d():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig, axs = plt.subplots(2, 3, figsize=(8, 6),\n constrained_layout=True,\n subplot_kw={'projection': '3d'})\n\n theta = np.linspace(0, 2*np.pi)\n x = np.cos(theta - np.pi/2)\n y = np.sin(theta - np.pi/2)\n z = theta\n\n for ax, zdir in zip(axs[0], ['x', 'y', 'z']):\n ax.stem(x, y, z, orientation=zdir)\n ax.set_title(f'orientation={zdir}')\n\n x = np.linspace(-np.pi/2, np.pi/2, 20)\n y = np.ones_like(x)\n z = np.cos(x)\n\n for ax, zdir in zip(axs[1], ['x', 'y', 'z']):\n markerline, stemlines, baseline = ax.stem(\n x, y, z,\n linefmt='C4-.', markerfmt='C1D', basefmt='C2',\n orientation=zdir)\n ax.set_title(f'orientation={zdir}')\n markerline.set(markerfacecolor='none', markeredgewidth=2)\n baseline.set_linewidth(3)\n\n\n@image_comparison(["equal_box_aspect.png"], style="mpl20")\ndef test_equal_box_aspect():\n from itertools import product, combinations\n\n fig = plt.figure()\n ax = fig.add_subplot(projection="3d")\n\n # Make data\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n x = np.outer(np.cos(u), np.sin(v))\n y = np.outer(np.sin(u), np.sin(v))\n z = np.outer(np.ones_like(u), np.cos(v))\n\n # Plot the surface\n ax.plot_surface(x, y, z)\n\n # draw cube\n r = [-1, 1]\n for s, e in combinations(np.array(list(product(r, r, r))), 2):\n if np.sum(np.abs(s - e)) == r[1] - r[0]:\n ax.plot3D(*zip(s, e), color="b")\n\n # Make axes limits\n xyzlim = np.column_stack(\n [ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()]\n )\n XYZlim = [min(xyzlim[0]), max(xyzlim[1])]\n ax.set_xlim3d(XYZlim)\n ax.set_ylim3d(XYZlim)\n ax.set_zlim3d(XYZlim)\n ax.axis('off')\n ax.set_box_aspect((1, 1, 1))\n\n with pytest.raises(ValueError, match="Argument zoom ="):\n ax.set_box_aspect((1, 1, 1), zoom=-1)\n\n\ndef test_colorbar_pos():\n num_plots = 2\n fig, axs = plt.subplots(1, num_plots, figsize=(4, 5),\n constrained_layout=True,\n subplot_kw={'projection': '3d'})\n for ax in axs:\n p_tri = ax.plot_trisurf(np.random.randn(5), np.random.randn(5),\n np.random.randn(5))\n\n cbar = plt.colorbar(p_tri, ax=axs, orientation='horizontal')\n\n fig.canvas.draw()\n # check that actually on the bottom\n assert cbar.ax.get_position().extents[1] < 0.2\n\n\ndef test_inverted_zaxis():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.set_zlim(0, 1)\n assert not ax.zaxis_inverted()\n assert ax.get_zlim() == (0, 1)\n assert ax.get_zbound() == (0, 1)\n\n # Change bound\n ax.set_zbound((0, 2))\n assert not ax.zaxis_inverted()\n assert ax.get_zlim() == (0, 2)\n assert ax.get_zbound() == (0, 2)\n\n # Change invert\n ax.invert_zaxis()\n assert ax.zaxis_inverted()\n assert ax.get_zlim() == (2, 0)\n assert ax.get_zbound() == (0, 2)\n\n # Set upper bound\n ax.set_zbound(upper=1)\n assert ax.zaxis_inverted()\n assert ax.get_zlim() == (1, 0)\n assert ax.get_zbound() == (0, 1)\n\n # Set lower bound\n ax.set_zbound(lower=2)\n assert ax.zaxis_inverted()\n assert ax.get_zlim() == (2, 1)\n assert ax.get_zbound() == (1, 2)\n\n\ndef test_set_zlim():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n assert np.allclose(ax.get_zlim(), (-1/48, 49/48))\n ax.set_zlim(zmax=2)\n assert np.allclose(ax.get_zlim(), (-1/48, 2))\n ax.set_zlim(zmin=1)\n assert ax.get_zlim() == (1, 2)\n\n with pytest.raises(\n TypeError, match="Cannot pass both 'lower' and 'min'"):\n ax.set_zlim(bottom=0, zmin=1)\n with pytest.raises(\n TypeError, match="Cannot pass both 'upper' and 'max'"):\n ax.set_zlim(top=0, zmax=1)\n\n\n@check_figures_equal(extensions=["png"])\ndef test_shared_view(fig_test, fig_ref):\n elev, azim, roll = 5, 20, 30\n ax1 = fig_test.add_subplot(131, projection="3d")\n ax2 = fig_test.add_subplot(132, projection="3d", shareview=ax1)\n ax3 = fig_test.add_subplot(133, projection="3d")\n ax3.shareview(ax1)\n ax2.view_init(elev=elev, azim=azim, roll=roll, share=True)\n\n for subplot_num in (131, 132, 133):\n ax = fig_ref.add_subplot(subplot_num, projection="3d")\n ax.view_init(elev=elev, azim=azim, roll=roll)\n\n\ndef test_shared_axes_retick():\n fig = plt.figure()\n ax1 = fig.add_subplot(211, projection="3d")\n ax2 = fig.add_subplot(212, projection="3d", sharez=ax1)\n ax1.plot([0, 1], [0, 1], [0, 2])\n ax2.plot([0, 1], [0, 1], [0, 2])\n ax1.set_zticks([-0.5, 0, 2, 2.5])\n # check that setting ticks on a shared axis is synchronized\n assert ax1.get_zlim() == (-0.5, 2.5)\n assert ax2.get_zlim() == (-0.5, 2.5)\n\n\ndef test_quaternion():\n # 1:\n q1 = Quaternion(1, [0, 0, 0])\n assert q1.scalar == 1\n assert (q1.vector == [0, 0, 0]).all\n # __neg__:\n assert (-q1).scalar == -1\n assert ((-q1).vector == [0, 0, 0]).all\n # i, j, k:\n qi = Quaternion(0, [1, 0, 0])\n assert qi.scalar == 0\n assert (qi.vector == [1, 0, 0]).all\n qj = Quaternion(0, [0, 1, 0])\n assert qj.scalar == 0\n assert (qj.vector == [0, 1, 0]).all\n qk = Quaternion(0, [0, 0, 1])\n assert qk.scalar == 0\n assert (qk.vector == [0, 0, 1]).all\n # i^2 = j^2 = k^2 = -1:\n assert qi*qi == -q1\n assert qj*qj == -q1\n assert qk*qk == -q1\n # identity:\n assert q1*qi == qi\n assert q1*qj == qj\n assert q1*qk == qk\n # i*j=k, j*k=i, k*i=j:\n assert qi*qj == qk\n assert qj*qk == qi\n assert qk*qi == qj\n assert qj*qi == -qk\n assert qk*qj == -qi\n assert qi*qk == -qj\n # __mul__:\n assert (Quaternion(2, [3, 4, 5]) * Quaternion(6, [7, 8, 9])\n == Quaternion(-86, [28, 48, 44]))\n # conjugate():\n for q in [q1, qi, qj, qk]:\n assert q.conjugate().scalar == q.scalar\n assert (q.conjugate().vector == -q.vector).all\n assert q.conjugate().conjugate() == q\n assert ((q*q.conjugate()).vector == 0).all\n # norm:\n q0 = Quaternion(0, [0, 0, 0])\n assert q0.norm == 0\n assert q1.norm == 1\n assert qi.norm == 1\n assert qj.norm == 1\n assert qk.norm == 1\n for q in [q0, q1, qi, qj, qk]:\n assert q.norm == (q*q.conjugate()).scalar\n # normalize():\n for q in [\n Quaternion(2, [0, 0, 0]),\n Quaternion(0, [3, 0, 0]),\n Quaternion(0, [0, 4, 0]),\n Quaternion(0, [0, 0, 5]),\n Quaternion(6, [7, 8, 9])\n ]:\n assert q.normalize().norm == 1\n # reciprocal():\n for q in [q1, qi, qj, qk]:\n assert q*q.reciprocal() == q1\n assert q.reciprocal()*q == q1\n # rotate():\n assert (qi.rotate([1, 2, 3]) == np.array([1, -2, -3])).all\n # rotate_from_to():\n for r1, r2, q in [\n ([1, 0, 0], [0, 1, 0], Quaternion(np.sqrt(1/2), [0, 0, np.sqrt(1/2)])),\n ([1, 0, 0], [0, 0, 1], Quaternion(np.sqrt(1/2), [0, -np.sqrt(1/2), 0])),\n ([1, 0, 0], [1, 0, 0], Quaternion(1, [0, 0, 0]))\n ]:\n assert Quaternion.rotate_from_to(r1, r2) == q\n # rotate_from_to(), special case:\n for r1 in [[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]]:\n r1 = np.array(r1)\n with pytest.warns(UserWarning):\n q = Quaternion.rotate_from_to(r1, -r1)\n assert np.isclose(q.norm, 1)\n assert np.dot(q.vector, r1) == 0\n # from_cardan_angles(), as_cardan_angles():\n for elev, azim, roll in [(0, 0, 0),\n (90, 0, 0), (0, 90, 0), (0, 0, 90),\n (0, 30, 30), (30, 0, 30), (30, 30, 0),\n (47, 11, -24)]:\n for mag in [1, 2]:\n q = Quaternion.from_cardan_angles(\n np.deg2rad(elev), np.deg2rad(azim), np.deg2rad(roll))\n assert np.isclose(q.norm, 1)\n q = Quaternion(mag * q.scalar, mag * q.vector)\n np.testing.assert_allclose(np.rad2deg(Quaternion.as_cardan_angles(q)),\n (elev, azim, roll), atol=1e-6)\n\n\n@pytest.mark.parametrize('style',\n ('azel', 'trackball', 'sphere', 'arcball'))\ndef test_rotate(style):\n """Test rotating using the left mouse button."""\n if style == 'azel':\n s = 0.5\n else:\n s = mpl.rcParams['axes3d.trackballsize'] / 2\n s *= 0.5\n mpl.rcParams['axes3d.trackballborder'] = 0\n with mpl.rc_context({'axes3d.mouserotationstyle': style}):\n for roll, dx, dy in [\n [0, 1, 0],\n [30, 1, 0],\n [0, 0, 1],\n [30, 0, 1],\n [0, 0.5, np.sqrt(3)/2],\n [30, 0.5, np.sqrt(3)/2],\n [0, 2, 0]]:\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection='3d')\n ax.view_init(0, 0, roll)\n ax.figure.canvas.draw()\n\n # drag mouse to change orientation\n ax._button_press(\n mock_event(ax, button=MouseButton.LEFT, xdata=0, ydata=0))\n ax._on_move(\n mock_event(ax, button=MouseButton.LEFT,\n xdata=s*dx*ax._pseudo_w, ydata=s*dy*ax._pseudo_h))\n ax.figure.canvas.draw()\n\n c = np.sqrt(3)/2\n expectations = {\n ('azel', 0, 1, 0): (0, -45, 0),\n ('azel', 0, 0, 1): (-45, 0, 0),\n ('azel', 0, 0.5, c): (-38.971143, -22.5, 0),\n ('azel', 0, 2, 0): (0, -90, 0),\n ('azel', 30, 1, 0): (22.5, -38.971143, 30),\n ('azel', 30, 0, 1): (-38.971143, -22.5, 30),\n ('azel', 30, 0.5, c): (-22.5, -38.971143, 30),\n\n ('trackball', 0, 1, 0): (0, -28.64789, 0),\n ('trackball', 0, 0, 1): (-28.64789, 0, 0),\n ('trackball', 0, 0.5, c): (-24.531578, -15.277726, 3.340403),\n ('trackball', 0, 2, 0): (0, -180/np.pi, 0),\n ('trackball', 30, 1, 0): (13.869588, -25.319385, 26.87008),\n ('trackball', 30, 0, 1): (-24.531578, -15.277726, 33.340403),\n ('trackball', 30, 0.5, c): (-13.869588, -25.319385, 33.129920),\n\n ('sphere', 0, 1, 0): (0, -30, 0),\n ('sphere', 0, 0, 1): (-30, 0, 0),\n ('sphere', 0, 0.5, c): (-25.658906, -16.102114, 3.690068),\n ('sphere', 0, 2, 0): (0, -90, 0),\n ('sphere', 30, 1, 0): (14.477512, -26.565051, 26.565051),\n ('sphere', 30, 0, 1): (-25.658906, -16.102114, 33.690068),\n ('sphere', 30, 0.5, c): (-14.477512, -26.565051, 33.434949),\n\n ('arcball', 0, 1, 0): (0, -60, 0),\n ('arcball', 0, 0, 1): (-60, 0, 0),\n ('arcball', 0, 0.5, c): (-48.590378, -40.893395, 19.106605),\n ('arcball', 0, 2, 0): (0, 180, 0),\n ('arcball', 30, 1, 0): (25.658906, -56.309932, 16.102114),\n ('arcball', 30, 0, 1): (-48.590378, -40.893395, 49.106605),\n ('arcball', 30, 0.5, c): (-25.658906, -56.309932, 43.897886)}\n new_elev, new_azim, new_roll = expectations[(style, roll, dx, dy)]\n np.testing.assert_allclose((ax.elev, ax.azim, ax.roll),\n (new_elev, new_azim, new_roll), atol=1e-6)\n\n\ndef test_pan():\n """Test mouse panning using the middle mouse button."""\n\n def convert_lim(dmin, dmax):\n """Convert min/max limits to center and range."""\n center = (dmin + dmax) / 2\n range_ = dmax - dmin\n return center, range_\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(0, 0, 0)\n fig.canvas.draw()\n\n x_center0, x_range0 = convert_lim(*ax.get_xlim3d())\n y_center0, y_range0 = convert_lim(*ax.get_ylim3d())\n z_center0, z_range0 = convert_lim(*ax.get_zlim3d())\n\n # move mouse diagonally to pan along all axis.\n ax._button_press(\n mock_event(ax, button=MouseButton.MIDDLE, xdata=0, ydata=0))\n ax._on_move(\n mock_event(ax, button=MouseButton.MIDDLE, xdata=1, ydata=1))\n\n x_center, x_range = convert_lim(*ax.get_xlim3d())\n y_center, y_range = convert_lim(*ax.get_ylim3d())\n z_center, z_range = convert_lim(*ax.get_zlim3d())\n\n # Ranges have not changed\n assert x_range == pytest.approx(x_range0)\n assert y_range == pytest.approx(y_range0)\n assert z_range == pytest.approx(z_range0)\n\n # But center positions have\n assert x_center != pytest.approx(x_center0)\n assert y_center != pytest.approx(y_center0)\n assert z_center != pytest.approx(z_center0)\n\n\n@pytest.mark.parametrize("tool,button,key,expected",\n [("zoom", MouseButton.LEFT, None, # zoom in\n ((0.00, 0.06), (0.01, 0.07), (0.02, 0.08))),\n ("zoom", MouseButton.LEFT, 'x', # zoom in\n ((-0.01, 0.10), (-0.03, 0.08), (-0.06, 0.06))),\n ("zoom", MouseButton.LEFT, 'y', # zoom in\n ((-0.07, 0.05), (-0.04, 0.08), (0.00, 0.12))),\n ("zoom", MouseButton.RIGHT, None, # zoom out\n ((-0.09, 0.15), (-0.08, 0.17), (-0.07, 0.18))),\n ("pan", MouseButton.LEFT, None,\n ((-0.70, -0.58), (-1.04, -0.91), (-1.27, -1.15))),\n ("pan", MouseButton.LEFT, 'x',\n ((-0.97, -0.84), (-0.58, -0.46), (-0.06, 0.06))),\n ("pan", MouseButton.LEFT, 'y',\n ((0.20, 0.32), (-0.51, -0.39), (-1.27, -1.15)))])\ndef test_toolbar_zoom_pan(tool, button, key, expected):\n # NOTE: The expected zoom values are rough ballparks of moving in the view\n # to make sure we are getting the right direction of motion.\n # The specific values can and should change if the zoom movement\n # scaling factor gets updated.\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.scatter(0, 0, 0)\n fig.canvas.draw()\n xlim0, ylim0, zlim0 = ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()\n\n # Mouse from (0, 0) to (1, 1)\n d0 = (0, 0)\n d1 = (1, 1)\n # Convert to screen coordinates ("s"). Events are defined only with pixel\n # precision, so round the pixel values, and below, check against the\n # corresponding xdata/ydata, which are close but not equal to d0/d1.\n s0 = ax.transData.transform(d0).astype(int)\n s1 = ax.transData.transform(d1).astype(int)\n\n # Set up the mouse movements\n start_event = MouseEvent(\n "button_press_event", fig.canvas, *s0, button, key=key)\n stop_event = MouseEvent(\n "button_release_event", fig.canvas, *s1, button, key=key)\n\n tb = NavigationToolbar2(fig.canvas)\n if tool == "zoom":\n tb.zoom()\n tb.press_zoom(start_event)\n tb.drag_zoom(stop_event)\n tb.release_zoom(stop_event)\n else:\n tb.pan()\n tb.press_pan(start_event)\n tb.drag_pan(stop_event)\n tb.release_pan(stop_event)\n\n # Should be close, but won't be exact due to screen integer resolution\n xlim, ylim, zlim = expected\n assert ax.get_xlim3d() == pytest.approx(xlim, abs=0.01)\n assert ax.get_ylim3d() == pytest.approx(ylim, abs=0.01)\n assert ax.get_zlim3d() == pytest.approx(zlim, abs=0.01)\n\n # Ensure that back, forward, and home buttons work\n tb.back()\n assert ax.get_xlim3d() == pytest.approx(xlim0)\n assert ax.get_ylim3d() == pytest.approx(ylim0)\n assert ax.get_zlim3d() == pytest.approx(zlim0)\n\n tb.forward()\n assert ax.get_xlim3d() == pytest.approx(xlim, abs=0.01)\n assert ax.get_ylim3d() == pytest.approx(ylim, abs=0.01)\n assert ax.get_zlim3d() == pytest.approx(zlim, abs=0.01)\n\n tb.home()\n assert ax.get_xlim3d() == pytest.approx(xlim0)\n assert ax.get_ylim3d() == pytest.approx(ylim0)\n assert ax.get_zlim3d() == pytest.approx(zlim0)\n\n\n@mpl.style.context('default')\n@check_figures_equal(extensions=["png"])\ndef test_scalarmap_update(fig_test, fig_ref):\n\n x, y, z = np.array(list(itertools.product(*[np.arange(0, 5, 1),\n np.arange(0, 5, 1),\n np.arange(0, 5, 1)]))).T\n c = x + y\n\n # test\n ax_test = fig_test.add_subplot(111, projection='3d')\n sc_test = ax_test.scatter(x, y, z, c=c, s=40, cmap='viridis')\n # force a draw\n fig_test.canvas.draw()\n # mark it as "stale"\n sc_test.changed()\n\n # ref\n ax_ref = fig_ref.add_subplot(111, projection='3d')\n sc_ref = ax_ref.scatter(x, y, z, c=c, s=40, cmap='viridis')\n\n\ndef test_subfigure_simple():\n # smoketest that subfigures can work...\n fig = plt.figure()\n sf = fig.subfigures(1, 2)\n ax = sf[0].add_subplot(1, 1, 1, projection='3d')\n ax = sf[1].add_subplot(1, 1, 1, projection='3d', label='other')\n\n\n# Update style when regenerating the test image\n@image_comparison(baseline_images=['computed_zorder'], remove_text=True,\n extensions=['png'], style=('mpl20'))\ndef test_computed_zorder():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax1 = fig.add_subplot(221, projection='3d')\n ax2 = fig.add_subplot(222, projection='3d')\n ax2.computed_zorder = False\n\n # create a horizontal plane\n corners = ((0, 0, 0), (0, 5, 0), (5, 5, 0), (5, 0, 0))\n for ax in (ax1, ax2):\n tri = art3d.Poly3DCollection([corners],\n facecolors='white',\n edgecolors='black',\n zorder=1)\n ax.add_collection3d(tri)\n\n # plot a vector\n ax.plot((2, 2), (2, 2), (0, 4), c='red', zorder=2)\n\n # plot some points\n ax.scatter((3, 3), (1, 3), (1, 3), c='red', zorder=10)\n\n ax.set_xlim((0, 5.0))\n ax.set_ylim((0, 5.0))\n ax.set_zlim((0, 2.5))\n\n ax3 = fig.add_subplot(223, projection='3d')\n ax4 = fig.add_subplot(224, projection='3d')\n ax4.computed_zorder = False\n\n dim = 10\n X, Y = np.meshgrid((-dim, dim), (-dim, dim))\n Z = np.zeros((2, 2))\n\n angle = 0.5\n X2, Y2 = np.meshgrid((-dim, dim), (0, dim))\n Z2 = Y2 * angle\n X3, Y3 = np.meshgrid((-dim, dim), (-dim, 0))\n Z3 = Y3 * angle\n\n r = 7\n M = 1000\n th = np.linspace(0, 2 * np.pi, M)\n x, y, z = r * np.cos(th), r * np.sin(th), angle * r * np.sin(th)\n for ax in (ax3, ax4):\n ax.plot_surface(X2, Y3, Z3,\n color='blue',\n alpha=0.5,\n linewidth=0,\n zorder=-1)\n ax.plot(x[y < 0], y[y < 0], z[y < 0],\n lw=5,\n linestyle='--',\n color='green',\n zorder=0)\n\n ax.plot_surface(X, Y, Z,\n color='red',\n alpha=0.5,\n linewidth=0,\n zorder=1)\n\n ax.plot(r * np.sin(th), r * np.cos(th), np.zeros(M),\n lw=5,\n linestyle='--',\n color='black',\n zorder=2)\n\n ax.plot_surface(X2, Y2, Z2,\n color='blue',\n alpha=0.5,\n linewidth=0,\n zorder=3)\n\n ax.plot(x[y > 0], y[y > 0], z[y > 0], lw=5,\n linestyle='--',\n color='green',\n zorder=4)\n ax.view_init(elev=20, azim=-20, roll=0)\n ax.axis('off')\n\n\ndef test_format_coord():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n x = np.arange(10)\n ax.plot(x, np.sin(x))\n xv = 0.1\n yv = 0.1\n fig.canvas.draw()\n assert ax.format_coord(xv, yv) == 'x=10.5227, y pane=1.0417, z=0.1444'\n\n # Modify parameters\n ax.view_init(roll=30, vertical_axis="y")\n fig.canvas.draw()\n assert ax.format_coord(xv, yv) == 'x pane=9.1875, y=0.9761, z=0.1291'\n\n # Reset parameters\n ax.view_init()\n fig.canvas.draw()\n assert ax.format_coord(xv, yv) == 'x=10.5227, y pane=1.0417, z=0.1444'\n\n # Check orthographic projection\n ax.set_proj_type('ortho')\n fig.canvas.draw()\n assert ax.format_coord(xv, yv) == 'x=10.8869, y pane=1.0417, z=0.1528'\n\n # Check non-default perspective projection\n ax.set_proj_type('persp', focal_length=0.1)\n fig.canvas.draw()\n assert ax.format_coord(xv, yv) == 'x=9.0620, y pane=1.0417, z=0.1110'\n\n\ndef test_get_axis_position():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n x = np.arange(10)\n ax.plot(x, np.sin(x))\n fig.canvas.draw()\n assert ax.get_axis_position() == (False, True, False)\n\n\ndef test_margins():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.margins(0.2)\n assert ax.margins() == (0.2, 0.2, 0.2)\n ax.margins(0.1, 0.2, 0.3)\n assert ax.margins() == (0.1, 0.2, 0.3)\n ax.margins(x=0)\n assert ax.margins() == (0, 0.2, 0.3)\n ax.margins(y=0.1)\n assert ax.margins() == (0, 0.1, 0.3)\n ax.margins(z=0)\n assert ax.margins() == (0, 0.1, 0)\n\n\ndef test_margin_getters():\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.margins(0.1, 0.2, 0.3)\n assert ax.get_xmargin() == 0.1\n assert ax.get_ymargin() == 0.2\n assert ax.get_zmargin() == 0.3\n\n\n@pytest.mark.parametrize('err, args, kwargs, match', (\n (ValueError, (-1,), {}, r'margin must be greater than -0\.5'),\n (ValueError, (1, -1, 1), {}, r'margin must be greater than -0\.5'),\n (ValueError, (1, 1, -1), {}, r'margin must be greater than -0\.5'),\n (ValueError, tuple(), {'x': -1}, r'margin must be greater than -0\.5'),\n (ValueError, tuple(), {'y': -1}, r'margin must be greater than -0\.5'),\n (ValueError, tuple(), {'z': -1}, r'margin must be greater than -0\.5'),\n (TypeError, (1, ), {'x': 1},\n 'Cannot pass both positional and keyword'),\n (TypeError, (1, ), {'x': 1, 'y': 1, 'z': 1},\n 'Cannot pass both positional and keyword'),\n (TypeError, (1, ), {'x': 1, 'y': 1},\n 'Cannot pass both positional and keyword'),\n (TypeError, (1, 1), {}, 'Must pass a single positional argument for'),\n))\ndef test_margins_errors(err, args, kwargs, match):\n with pytest.raises(err, match=match):\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.margins(*args, **kwargs)\n\n\n@check_figures_equal(extensions=["png"])\ndef test_text_3d(fig_test, fig_ref):\n ax = fig_ref.add_subplot(projection="3d")\n txt = Text(0.5, 0.5, r'Foo bar $\int$')\n art3d.text_2d_to_3d(txt, z=1)\n ax.add_artist(txt)\n assert txt.get_position_3d() == (0.5, 0.5, 1)\n\n ax = fig_test.add_subplot(projection="3d")\n t3d = art3d.Text3D(0.5, 0.5, 1, r'Foo bar $\int$')\n ax.add_artist(t3d)\n assert t3d.get_position_3d() == (0.5, 0.5, 1)\n\n\ndef test_draw_single_lines_from_Nx1():\n # Smoke test for GH#23459\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n ax.plot([[0], [1]], [[0], [1]], [[0], [1]])\n\n\n@check_figures_equal(extensions=["png"])\ndef test_pathpatch_3d(fig_test, fig_ref):\n ax = fig_ref.add_subplot(projection="3d")\n path = Path.unit_rectangle()\n patch = PathPatch(path)\n art3d.pathpatch_2d_to_3d(patch, z=(0, 0.5, 0.7, 1, 0), zdir='y')\n ax.add_artist(patch)\n\n ax = fig_test.add_subplot(projection="3d")\n pp3d = art3d.PathPatch3D(path, zs=(0, 0.5, 0.7, 1, 0), zdir='y')\n ax.add_artist(pp3d)\n\n\n@image_comparison(baseline_images=['scatter_spiral.png'],\n remove_text=True,\n style='mpl20')\ndef test_scatter_spiral():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n th = np.linspace(0, 2 * np.pi * 6, 256)\n sc = ax.scatter(np.sin(th), np.cos(th), th, s=(1 + th * 5), c=th ** 2)\n\n # force at least 1 draw!\n fig.canvas.draw()\n\n\ndef test_Poly3DCollection_get_path():\n # Smoke test to see that get_path does not raise\n # See GH#27361\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n p = Circle((0, 0), 1.0)\n ax.add_patch(p)\n art3d.pathpatch_2d_to_3d(p)\n p.get_path()\n\n\ndef test_Poly3DCollection_get_facecolor():\n # Smoke test to see that get_facecolor does not raise\n # See GH#4067\n y, x = np.ogrid[1:10:100j, 1:10:100j]\n z2 = np.cos(x) ** 3 - np.sin(y) ** 2\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n r = ax.plot_surface(x, y, z2, cmap='hot')\n r.get_facecolor()\n\n\ndef test_Poly3DCollection_get_edgecolor():\n # Smoke test to see that get_edgecolor does not raise\n # See GH#4067\n y, x = np.ogrid[1:10:100j, 1:10:100j]\n z2 = np.cos(x) ** 3 - np.sin(y) ** 2\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n r = ax.plot_surface(x, y, z2, cmap='hot')\n r.get_edgecolor()\n\n\n@pytest.mark.parametrize(\n "vertical_axis, proj_expected, axis_lines_expected, tickdirs_expected",\n [\n (\n "z",\n [\n [0.0, 1.142857, 0.0, -0.571429],\n [0.0, 0.0, 0.857143, -0.428571],\n [0.0, 0.0, 0.0, -10.0],\n [-1.142857, 0.0, 0.0, 10.571429],\n ],\n [\n ([0.05617978, 0.06329114], [-0.04213483, -0.04746835]),\n ([-0.06329114, 0.06329114], [-0.04746835, -0.04746835]),\n ([-0.06329114, -0.06329114], [-0.04746835, 0.04746835]),\n ],\n [1, 0, 0],\n ),\n (\n "y",\n [\n [1.142857, 0.0, 0.0, -0.571429],\n [0.0, 0.857143, 0.0, -0.428571],\n [0.0, 0.0, 0.0, -10.0],\n [0.0, 0.0, -1.142857, 10.571429],\n ],\n [\n ([-0.06329114, 0.06329114], [0.04746835, 0.04746835]),\n ([0.06329114, 0.06329114], [-0.04746835, 0.04746835]),\n ([-0.05617978, -0.06329114], [0.04213483, 0.04746835]),\n ],\n [2, 2, 0],\n ),\n (\n "x",\n [\n [0.0, 0.0, 1.142857, -0.571429],\n [0.857143, 0.0, 0.0, -0.428571],\n [0.0, 0.0, 0.0, -10.0],\n [0.0, -1.142857, 0.0, 10.571429],\n ],\n [\n ([-0.06329114, -0.06329114], [0.04746835, -0.04746835]),\n ([0.06329114, 0.05617978], [0.04746835, 0.04213483]),\n ([0.06329114, -0.06329114], [0.04746835, 0.04746835]),\n ],\n [1, 2, 1],\n ),\n ],\n)\ndef test_view_init_vertical_axis(\n vertical_axis, proj_expected, axis_lines_expected, tickdirs_expected\n):\n """\n Test the actual projection, axis lines and ticks matches expected values.\n\n Parameters\n ----------\n vertical_axis : str\n Axis to align vertically.\n proj_expected : ndarray\n Expected values from ax.get_proj().\n axis_lines_expected : tuple of arrays\n Edgepoints of the axis line. Expected values retrieved according\n to ``ax.get_[xyz]axis().line.get_data()``.\n tickdirs_expected : list of int\n indexes indicating which axis to create a tick line along.\n """\n rtol = 2e-06\n ax = plt.subplot(1, 1, 1, projection="3d")\n ax.view_init(elev=0, azim=0, roll=0, vertical_axis=vertical_axis)\n ax.get_figure().canvas.draw()\n\n # Assert the projection matrix:\n proj_actual = ax.get_proj()\n np.testing.assert_allclose(proj_expected, proj_actual, rtol=rtol)\n\n for i, axis in enumerate([ax.get_xaxis(), ax.get_yaxis(), ax.get_zaxis()]):\n # Assert black lines are correctly aligned:\n axis_line_expected = axis_lines_expected[i]\n axis_line_actual = axis.line.get_data()\n np.testing.assert_allclose(axis_line_expected, axis_line_actual,\n rtol=rtol)\n\n # Assert ticks are correctly aligned:\n tickdir_expected = tickdirs_expected[i]\n tickdir_actual = axis._get_tickdir('default')\n np.testing.assert_array_equal(tickdir_expected, tickdir_actual)\n\n\n@pytest.mark.parametrize("vertical_axis", ["x", "y", "z"])\ndef test_on_move_vertical_axis(vertical_axis: str) -> None:\n """\n Test vertical axis is respected when rotating the plot interactively.\n """\n ax = plt.subplot(1, 1, 1, projection="3d")\n ax.view_init(elev=0, azim=0, roll=0, vertical_axis=vertical_axis)\n ax.get_figure().canvas.draw()\n\n proj_before = ax.get_proj()\n event_click = mock_event(ax, button=MouseButton.LEFT, xdata=0, ydata=1)\n ax._button_press(event_click)\n\n event_move = mock_event(ax, button=MouseButton.LEFT, xdata=0.5, ydata=0.8)\n ax._on_move(event_move)\n\n assert ax._axis_names.index(vertical_axis) == ax._vertical_axis\n\n # Make sure plot has actually moved:\n proj_after = ax.get_proj()\n np.testing.assert_raises(\n AssertionError, np.testing.assert_allclose, proj_before, proj_after\n )\n\n\n@pytest.mark.parametrize(\n "vertical_axis, aspect_expected",\n [\n ("x", [1.190476, 0.892857, 1.190476]),\n ("y", [0.892857, 1.190476, 1.190476]),\n ("z", [1.190476, 1.190476, 0.892857]),\n ],\n)\ndef test_set_box_aspect_vertical_axis(vertical_axis, aspect_expected):\n ax = plt.subplot(1, 1, 1, projection="3d")\n ax.view_init(elev=0, azim=0, roll=0, vertical_axis=vertical_axis)\n ax.get_figure().canvas.draw()\n\n ax.set_box_aspect(None)\n\n np.testing.assert_allclose(aspect_expected, ax._box_aspect, rtol=1e-6)\n\n\n@image_comparison(baseline_images=['arc_pathpatch.png'],\n remove_text=True,\n style='mpl20')\ndef test_arc_pathpatch():\n ax = plt.subplot(1, 1, 1, projection="3d")\n a = mpatch.Arc((0.5, 0.5), width=0.5, height=0.9,\n angle=20, theta1=10, theta2=130)\n ax.add_patch(a)\n art3d.pathpatch_2d_to_3d(a, z=0, zdir='z')\n\n\n@image_comparison(baseline_images=['panecolor_rcparams.png'],\n remove_text=True,\n style='mpl20')\ndef test_panecolor_rcparams():\n with plt.rc_context({'axes3d.xaxis.panecolor': 'r',\n 'axes3d.yaxis.panecolor': 'g',\n 'axes3d.zaxis.panecolor': 'b'}):\n fig = plt.figure(figsize=(1, 1))\n fig.add_subplot(projection='3d')\n\n\n@check_figures_equal(extensions=["png"])\ndef test_mutating_input_arrays_y_and_z(fig_test, fig_ref):\n """\n Test to see if the `z` axis does not get mutated\n after a call to `Axes3D.plot`\n\n test cases came from GH#8990\n """\n ax1 = fig_test.add_subplot(111, projection='3d')\n x = [1, 2, 3]\n y = [0.0, 0.0, 0.0]\n z = [0.0, 0.0, 0.0]\n ax1.plot(x, y, z, 'o-')\n\n # mutate y,z to get a nontrivial line\n y[:] = [1, 2, 3]\n z[:] = [1, 2, 3]\n\n # draw the same plot without mutating x and y\n ax2 = fig_ref.add_subplot(111, projection='3d')\n x = [1, 2, 3]\n y = [0.0, 0.0, 0.0]\n z = [0.0, 0.0, 0.0]\n ax2.plot(x, y, z, 'o-')\n\n\ndef test_scatter_masked_color():\n """\n Test color parameter usage with non-finite coordinate arrays.\n\n GH#26236\n """\n\n x = [np.nan, 1, 2, 1]\n y = [0, np.inf, 2, 1]\n z = [0, 1, -np.inf, 1]\n colors = [\n [0.0, 0.0, 0.0, 1],\n [0.0, 0.0, 0.0, 1],\n [0.0, 0.0, 0.0, 1],\n [0.0, 0.0, 0.0, 1]\n ]\n\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n path3d = ax.scatter(x, y, z, color=colors)\n\n # Assert sizes' equality\n assert len(path3d.get_offsets()) ==\\n len(super(type(path3d), path3d).get_facecolors())\n\n\n@mpl3d_image_comparison(['surface3d_zsort_inf.png'], style='mpl20')\ndef test_surface3d_zsort_inf():\n plt.rcParams['axes3d.automargin'] = True # Remove when image is regenerated\n fig = plt.figure()\n ax = fig.add_subplot(projection='3d')\n\n x, y = np.mgrid[-2:2:0.1, -2:2:0.1]\n z = np.sin(x)**2 + np.cos(y)**2\n z[x.shape[0] // 2:, x.shape[1] // 2:] = np.inf\n\n ax.plot_surface(x, y, z, cmap='jet')\n ax.view_init(elev=45, azim=145)\n\n\ndef test_Poly3DCollection_init_value_error():\n # smoke test to ensure the input check works\n # GH#26420\n with pytest.raises(ValueError,\n match='You must provide facecolors, edgecolors, '\n 'or both for shade to work.'):\n poly = np.array([[0, 0, 1], [0, 1, 1], [0, 0, 0]], float)\n c = art3d.Poly3DCollection([poly], shade=True)\n\n\ndef test_ndarray_color_kwargs_value_error():\n # smoke test\n # ensures ndarray can be passed to color in kwargs for 3d projection plot\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(1, 0, 0, color=np.array([0, 0, 0, 1]))\n fig.canvas.draw()\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\tests\test_axes3d.py | test_axes3d.py | Python | 92,348 | 0.75 | 0.083333 | 0.065604 | python-kit | 184 | 2025-05-28T08:46:09.693315 | Apache-2.0 | true | 6d3430d4990d4d028a4a3ca55f78f828 |
import platform\n\nimport numpy as np\n\nimport matplotlib as mpl\nfrom matplotlib.colors import same_color\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import art3d\n\n\n@image_comparison(['legend_plot.png'], remove_text=True, style='mpl20')\ndef test_legend_plot():\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\n x = np.arange(10)\n ax.plot(x, 5 - x, 'o', zdir='y', label='z=1')\n ax.plot(x, x - 5, 'o', zdir='y', label='z=-1')\n ax.legend()\n\n\n@image_comparison(['legend_bar.png'], remove_text=True, style='mpl20')\ndef test_legend_bar():\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\n x = np.arange(10)\n b1 = ax.bar(x, x, zdir='y', align='edge', color='m')\n b2 = ax.bar(x, x[::-1], zdir='x', align='edge', color='g')\n ax.legend([b1[0], b2[0]], ['up', 'down'])\n\n\n@image_comparison(['fancy.png'], remove_text=True, style='mpl20',\n tol=0 if platform.machine() == 'x86_64' else 0.011)\ndef test_fancy():\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\n ax.plot(np.arange(10), np.full(10, 5), np.full(10, 5), 'o--', label='line')\n ax.scatter(np.arange(10), np.arange(10, 0, -1), label='scatter')\n ax.errorbar(np.full(10, 5), np.arange(10), np.full(10, 10),\n xerr=0.5, zerr=0.5, label='errorbar')\n ax.legend(loc='lower left', ncols=2, title='My legend', numpoints=1)\n\n\ndef test_linecollection_scaled_dashes():\n lines1 = [[(0, .5), (.5, 1)], [(.3, .6), (.2, .2)]]\n lines2 = [[[0.7, .2], [.8, .4]], [[.5, .7], [.6, .1]]]\n lines3 = [[[0.6, .2], [.8, .4]], [[.5, .7], [.1, .1]]]\n lc1 = art3d.Line3DCollection(lines1, linestyles="--", lw=3)\n lc2 = art3d.Line3DCollection(lines2, linestyles="-.")\n lc3 = art3d.Line3DCollection(lines3, linestyles=":", lw=.5)\n\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\n ax.add_collection(lc1)\n ax.add_collection(lc2)\n ax.add_collection(lc3)\n\n leg = ax.legend([lc1, lc2, lc3], ['line1', 'line2', 'line 3'])\n h1, h2, h3 = leg.legend_handles\n\n for oh, lh in zip((lc1, lc2, lc3), (h1, h2, h3)):\n assert oh.get_linestyles()[0] == lh._dash_pattern\n\n\ndef test_handlerline3d():\n # Test marker consistency for monolithic Line3D legend handler.\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\n ax.scatter([0, 1], [0, 1], marker="v")\n handles = [art3d.Line3D([0], [0], [0], marker="v")]\n leg = ax.legend(handles, ["Aardvark"], numpoints=1)\n assert handles[0].get_marker() == leg.legend_handles[0].get_marker()\n\n\ndef test_contour_legend_elements():\n x, y = np.mgrid[1:10, 1:10]\n h = x * y\n colors = ['blue', '#00FF00', 'red']\n\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\n cs = ax.contour(x, y, h, levels=[10, 30, 50], colors=colors, extend='both')\n\n artists, labels = cs.legend_elements()\n assert labels == ['$x = 10.0$', '$x = 30.0$', '$x = 50.0$']\n assert all(isinstance(a, mpl.lines.Line2D) for a in artists)\n assert all(same_color(a.get_color(), c)\n for a, c in zip(artists, colors))\n\n\ndef test_contourf_legend_elements():\n x, y = np.mgrid[1:10, 1:10]\n h = x * y\n\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'))\n cs = ax.contourf(x, y, h, levels=[10, 30, 50],\n colors=['#FFFF00', '#FF00FF', '#00FFFF'],\n extend='both')\n cs.cmap.set_over('red')\n cs.cmap.set_under('blue')\n cs.changed()\n artists, labels = cs.legend_elements()\n assert labels == ['$x \\leq -1e+250s$',\n '$10.0 < x \\leq 30.0$',\n '$30.0 < x \\leq 50.0$',\n '$x > 1e+250s$']\n expected_colors = ('blue', '#FFFF00', '#FF00FF', 'red')\n assert all(isinstance(a, mpl.patches.Rectangle) for a in artists)\n assert all(same_color(a.get_facecolor(), c)\n for a, c in zip(artists, expected_colors))\n\n\ndef test_legend_Poly3dCollection():\n\n verts = np.asarray([[0, 0, 0], [0, 1, 1], [1, 0, 1]])\n mesh = art3d.Poly3DCollection([verts], label="surface")\n\n fig, ax = plt.subplots(subplot_kw={"projection": "3d"})\n mesh.set_edgecolor('k')\n handle = ax.add_collection3d(mesh)\n leg = ax.legend()\n assert (leg.legend_handles[0].get_facecolor()\n == handle.get_facecolor()).all()\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\tests\test_legend3d.py | test_legend3d.py | Python | 4,343 | 0.95 | 0.128205 | 0.010989 | node-utils | 422 | 2025-02-23T22:23:23.170994 | BSD-3-Clause | true | bb66322e7cec44494fc2a5ed9bdb2d30 |
from pathlib import Path\n\n\n# Check that the test directories exist\nif not (Path(__file__).parent / "baseline_images").exists():\n raise OSError(\n 'The baseline image directory does not exist. '\n 'This is most likely because the test data is not installed. '\n 'You may need to install matplotlib from source to get the '\n 'test data.')\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\tests\__init__.py | __init__.py | Python | 365 | 0.95 | 0.1 | 0.125 | awesome-app | 106 | 2025-05-25T16:46:25.954307 | Apache-2.0 | true | 58fdb0c615f036e99765a61f6b25a3a4 |
\n\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\tests\__pycache__\conftest.cpython-313.pyc | conftest.cpython-313.pyc | Other | 338 | 0.7 | 0 | 0 | react-lib | 508 | 2025-05-16T08:49:20.997635 | BSD-3-Clause | true | 38075aac74c12e9017dfa75cbe58a166 |
\n\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\tests\__pycache__\test_art3d.cpython-313.pyc | test_art3d.cpython-313.pyc | Other | 4,800 | 0.8 | 0 | 0.052632 | vue-tools | 151 | 2025-03-04T20:03:54.124395 | BSD-3-Clause | true | 3c4797df02e59663aae19b0fb33de19d |
\n\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\tests\__pycache__\test_legend3d.cpython-313.pyc | test_legend3d.cpython-313.pyc | Other | 9,152 | 0.8 | 0 | 0 | vue-tools | 931 | 2025-06-12T21:12:10.366807 | MIT | true | e9a0715569c5beceee0518948faff0d4 |
\n\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\tests\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 619 | 0.7 | 0 | 0 | python-kit | 303 | 2024-09-23T18:28:18.815213 | Apache-2.0 | true | 87863735ef923d958d7cebbfd8541d20 |
\n\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\__pycache__\art3d.cpython-313.pyc | art3d.cpython-313.pyc | Other | 64,832 | 0.75 | 0.044759 | 0.011421 | awesome-app | 537 | 2024-12-12T19:55:44.019745 | BSD-3-Clause | false | 9547c72ff0ac90bf5458281c50ffc7d2 |
\n\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\__pycache__\axis3d.cpython-313.pyc | axis3d.cpython-313.pyc | Other | 34,434 | 0.95 | 0.017804 | 0.00627 | node-utils | 387 | 2024-10-13T23:22:16.390884 | GPL-3.0 | false | 492cbc7fbbb202bb180cf1c84f571cb3 |
\n\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\__pycache__\proj3d.cpython-313.pyc | proj3d.cpython-313.pyc | Other | 10,306 | 0.8 | 0.022222 | 0.023438 | awesome-app | 614 | 2025-05-18T07:02:41.436482 | GPL-3.0 | false | cf44c954664ea73c650b823cc9a1bb15 |
\n\n | .venv\Lib\site-packages\mpl_toolkits\mplot3d\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 262 | 0.7 | 0 | 0 | python-kit | 233 | 2023-11-23T17:50:28.333693 | MIT | false | 5af7ad84694ad0d39bad6a46d206cbfa |
PEP-561 marker. | .venv\Lib\site-packages\multidict\py.typed | py.typed | Other | 15 | 0.5 | 0.1 | 0 | react-lib | 361 | 2024-06-03T02:15:00.095461 | GPL-3.0 | false | c70d6df73a734925e35e5cf39ebd2ff3 |
import abc\nfrom collections.abc import Iterable, Mapping, MutableMapping\nfrom typing import TYPE_CHECKING, Protocol, TypeVar, Union, overload\n\nif TYPE_CHECKING:\n from ._multidict_py import istr\nelse:\n istr = str\n\n_V = TypeVar("_V")\n_V_co = TypeVar("_V_co", covariant=True)\n_T = TypeVar("_T")\n\n\nclass SupportsKeys(Protocol[_V_co]):\n def keys(self) -> Iterable[str]: ...\n def __getitem__(self, key: str, /) -> _V_co: ...\n\n\nclass SupportsIKeys(Protocol[_V_co]):\n def keys(self) -> Iterable[istr]: ...\n def __getitem__(self, key: istr, /) -> _V_co: ...\n\n\nMDArg = Union[SupportsKeys[_V], SupportsIKeys[_V], Iterable[tuple[str, _V]], None]\n\n\nclass MultiMapping(Mapping[str, _V_co]):\n @overload\n def getall(self, key: str) -> list[_V_co]: ...\n @overload\n def getall(self, key: str, default: _T) -> Union[list[_V_co], _T]: ...\n @abc.abstractmethod\n def getall(self, key: str, default: _T = ...) -> Union[list[_V_co], _T]:\n """Return all values for key."""\n\n @overload\n def getone(self, key: str) -> _V_co: ...\n @overload\n def getone(self, key: str, default: _T) -> Union[_V_co, _T]: ...\n @abc.abstractmethod\n def getone(self, key: str, default: _T = ...) -> Union[_V_co, _T]:\n """Return first value for key."""\n\n\nclass MutableMultiMapping(MultiMapping[_V], MutableMapping[str, _V]):\n @abc.abstractmethod\n def add(self, key: str, value: _V) -> None:\n """Add value to list."""\n\n @abc.abstractmethod\n def extend(self, arg: MDArg[_V] = None, /, **kwargs: _V) -> None:\n """Add everything from arg and kwargs to the mapping."""\n\n @abc.abstractmethod\n def merge(self, arg: MDArg[_V] = None, /, **kwargs: _V) -> None:\n """Merge into the mapping, adding non-existing keys."""\n\n @overload\n def popone(self, key: str) -> _V: ...\n @overload\n def popone(self, key: str, default: _T) -> Union[_V, _T]: ...\n @abc.abstractmethod\n def popone(self, key: str, default: _T = ...) -> Union[_V, _T]:\n """Remove specified key and return the corresponding value."""\n\n @overload\n def popall(self, key: str) -> list[_V]: ...\n @overload\n def popall(self, key: str, default: _T) -> Union[list[_V], _T]: ...\n @abc.abstractmethod\n def popall(self, key: str, default: _T = ...) -> Union[list[_V], _T]:\n """Remove all occurrences of key and return the list of corresponding values."""\n | .venv\Lib\site-packages\multidict\_abc.py | _abc.py | Python | 2,402 | 0.85 | 0.356164 | 0 | awesome-app | 186 | 2024-11-08T09:10:38.368915 | GPL-3.0 | false | 771255d15793a643bd2b43c86ba971cc |
import os\nimport platform\n\nNO_EXTENSIONS = bool(os.environ.get("MULTIDICT_NO_EXTENSIONS"))\n\nPYPY = platform.python_implementation() == "PyPy"\n\nUSE_EXTENSIONS = not NO_EXTENSIONS and not PYPY\n\nif USE_EXTENSIONS:\n try:\n from . import _multidict # type: ignore[attr-defined] # noqa: F401\n except ImportError: # pragma: no cover\n # FIXME: Refactor for coverage. See #837.\n USE_EXTENSIONS = False\n | .venv\Lib\site-packages\multidict\_compat.py | _compat.py | Python | 422 | 0.95 | 0.2 | 0.090909 | node-utils | 842 | 2025-01-12T10:43:44.928484 | MIT | false | 4f33cf77f6b952d0389c7eca8599b102 |
MZ | .venv\Lib\site-packages\multidict\_multidict.cp313-win_amd64.pyd | _multidict.cp313-win_amd64.pyd | Other | 80,384 | 0.75 | 0.049536 | 0.015674 | vue-tools | 347 | 2023-08-01T05:44:03.215400 | GPL-3.0 | false | 8611b21a64982abf3611157bc26b4c3a |
import enum\nimport functools\nimport reprlib\nimport sys\nfrom array import array\nfrom collections.abc import (\n ItemsView,\n Iterable,\n Iterator,\n KeysView,\n Mapping,\n ValuesView,\n)\nfrom dataclasses import dataclass\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ClassVar,\n Generic,\n NoReturn,\n Optional,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nfrom ._abc import MDArg, MultiMapping, MutableMultiMapping, SupportsKeys\n\nif sys.version_info >= (3, 11):\n from typing import Self\nelse:\n from typing_extensions import Self\n\n\nclass istr(str):\n """Case insensitive str."""\n\n __is_istr__ = True\n __istr_identity__: Optional[str] = None\n\n\n_V = TypeVar("_V")\n_T = TypeVar("_T")\n\n_SENTINEL = enum.Enum("_SENTINEL", "sentinel")\nsentinel = _SENTINEL.sentinel\n\n_version = array("Q", [0])\n\n\nclass _Iter(Generic[_T]):\n __slots__ = ("_size", "_iter")\n\n def __init__(self, size: int, iterator: Iterator[_T]):\n self._size = size\n self._iter = iterator\n\n def __iter__(self) -> Self:\n return self\n\n def __next__(self) -> _T:\n return next(self._iter)\n\n def __length_hint__(self) -> int:\n return self._size\n\n\nclass _ViewBase(Generic[_V]):\n def __init__(\n self,\n md: "MultiDict[_V]",\n ):\n self._md = md\n\n def __len__(self) -> int:\n return len(self._md)\n\n\nclass _ItemsView(_ViewBase[_V], ItemsView[str, _V]):\n def __contains__(self, item: object) -> bool:\n if not isinstance(item, (tuple, list)) or len(item) != 2:\n return False\n key, value = item\n try:\n identity = self._md._identity(key)\n except TypeError:\n return False\n hash_ = hash(identity)\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n if e.identity == identity and value == e.value:\n return True\n return False\n\n def __iter__(self) -> _Iter[tuple[str, _V]]:\n return _Iter(len(self), self._iter(self._md._version))\n\n def _iter(self, version: int) -> Iterator[tuple[str, _V]]:\n for e in self._md._keys.iter_entries():\n if version != self._md._version:\n raise RuntimeError("Dictionary changed during iteration")\n yield self._md._key(e.key), e.value\n\n @reprlib.recursive_repr()\n def __repr__(self) -> str:\n lst = []\n for e in self._md._keys.iter_entries():\n lst.append(f"'{e.key}': {e.value!r}")\n body = ", ".join(lst)\n return f"<{self.__class__.__name__}({body})>"\n\n def _parse_item(\n self, arg: Union[tuple[str, _V], _T]\n ) -> Optional[tuple[int, str, str, _V]]:\n if not isinstance(arg, tuple):\n return None\n if len(arg) != 2:\n return None\n try:\n identity = self._md._identity(arg[0])\n return (hash(identity), identity, arg[0], arg[1])\n except TypeError:\n return None\n\n def _tmp_set(self, it: Iterable[_T]) -> set[tuple[str, _V]]:\n tmp = set()\n for arg in it:\n item = self._parse_item(arg)\n if item is None:\n continue\n else:\n tmp.add((item[1], item[3]))\n return tmp\n\n def __and__(self, other: Iterable[Any]) -> set[tuple[str, _V]]:\n ret = set()\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n for arg in it:\n item = self._parse_item(arg)\n if item is None:\n continue\n hash_, identity, key, value = item\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n e.hash = -1\n if e.identity == identity and e.value == value:\n ret.add((e.key, e.value))\n self._md._keys.restore_hash(hash_)\n return ret\n\n def __rand__(self, other: Iterable[_T]) -> set[_T]:\n ret = set()\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n for arg in it:\n item = self._parse_item(arg)\n if item is None:\n continue\n hash_, identity, key, value = item\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n if e.identity == identity and e.value == value:\n ret.add(arg)\n break\n return ret\n\n def __or__(self, other: Iterable[_T]) -> set[Union[tuple[str, _V], _T]]:\n ret: set[Union[tuple[str, _V], _T]] = set(self)\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n for arg in it:\n item: Optional[tuple[int, str, str, _V]] = self._parse_item(arg)\n if item is None:\n ret.add(arg)\n continue\n hash_, identity, key, value = item\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n if e.identity == identity and e.value == value: # pragma: no branch\n break\n else:\n ret.add(arg)\n return ret\n\n def __ror__(self, other: Iterable[_T]) -> set[Union[tuple[str, _V], _T]]:\n try:\n ret: set[Union[tuple[str, _V], _T]] = set(other)\n except TypeError:\n return NotImplemented\n tmp = self._tmp_set(ret)\n\n for e in self._md._keys.iter_entries():\n if (e.identity, e.value) not in tmp:\n ret.add((e.key, e.value))\n return ret\n\n def __sub__(self, other: Iterable[_T]) -> set[Union[tuple[str, _V], _T]]:\n ret: set[Union[tuple[str, _V], _T]] = set()\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n tmp = self._tmp_set(it)\n\n for e in self._md._keys.iter_entries():\n if (e.identity, e.value) not in tmp:\n ret.add((e.key, e.value))\n\n return ret\n\n def __rsub__(self, other: Iterable[_T]) -> set[_T]:\n ret: set[_T] = set()\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n for arg in it:\n item = self._parse_item(arg)\n if item is None:\n ret.add(arg)\n continue\n\n hash_, identity, key, value = item\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n if e.identity == identity and e.value == value: # pragma: no branch\n break\n else:\n ret.add(arg)\n return ret\n\n def __xor__(self, other: Iterable[_T]) -> set[Union[tuple[str, _V], _T]]:\n try:\n rgt = set(other)\n except TypeError:\n return NotImplemented\n ret: set[Union[tuple[str, _V], _T]] = self - rgt\n ret |= rgt - self\n return ret\n\n __rxor__ = __xor__\n\n def isdisjoint(self, other: Iterable[tuple[str, _V]]) -> bool:\n for arg in other:\n item = self._parse_item(arg)\n if item is None:\n continue\n\n hash_, identity, key, value = item\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n if e.identity == identity and e.value == value: # pragma: no branch\n return False\n return True\n\n\nclass _ValuesView(_ViewBase[_V], ValuesView[_V]):\n def __contains__(self, value: object) -> bool:\n for e in self._md._keys.iter_entries():\n if e.value == value:\n return True\n return False\n\n def __iter__(self) -> _Iter[_V]:\n return _Iter(len(self), self._iter(self._md._version))\n\n def _iter(self, version: int) -> Iterator[_V]:\n for e in self._md._keys.iter_entries():\n if version != self._md._version:\n raise RuntimeError("Dictionary changed during iteration")\n yield e.value\n\n @reprlib.recursive_repr()\n def __repr__(self) -> str:\n lst = []\n for e in self._md._keys.iter_entries():\n lst.append(repr(e.value))\n body = ", ".join(lst)\n return f"<{self.__class__.__name__}({body})>"\n\n\nclass _KeysView(_ViewBase[_V], KeysView[str]):\n def __contains__(self, key: object) -> bool:\n if not isinstance(key, str):\n return False\n identity = self._md._identity(key)\n hash_ = hash(identity)\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n return True\n return False\n\n def __iter__(self) -> _Iter[str]:\n return _Iter(len(self), self._iter(self._md._version))\n\n def _iter(self, version: int) -> Iterator[str]:\n for e in self._md._keys.iter_entries():\n if version != self._md._version:\n raise RuntimeError("Dictionary changed during iteration")\n yield self._md._key(e.key)\n\n def __repr__(self) -> str:\n lst = []\n for e in self._md._keys.iter_entries():\n lst.append(f"'{e.key}'")\n body = ", ".join(lst)\n return f"<{self.__class__.__name__}({body})>"\n\n def __and__(self, other: Iterable[object]) -> set[str]:\n ret = set()\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n for key in it:\n if not isinstance(key, str):\n continue\n identity = self._md._identity(key)\n hash_ = hash(identity)\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n ret.add(e.key)\n break\n return ret\n\n def __rand__(self, other: Iterable[_T]) -> set[_T]:\n ret = set()\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n for key in it:\n if not isinstance(key, str):\n continue\n if key in self._md:\n ret.add(key)\n return cast(set[_T], ret)\n\n def __or__(self, other: Iterable[_T]) -> set[Union[str, _T]]:\n ret: set[Union[str, _T]] = set(self)\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n for key in it:\n if not isinstance(key, str):\n ret.add(key)\n continue\n if key not in self._md:\n ret.add(key)\n return ret\n\n def __ror__(self, other: Iterable[_T]) -> set[Union[str, _T]]:\n try:\n ret: set[Union[str, _T]] = set(other)\n except TypeError:\n return NotImplemented\n\n tmp = set()\n for key in ret:\n if not isinstance(key, str):\n continue\n identity = self._md._identity(key)\n tmp.add(identity)\n\n for e in self._md._keys.iter_entries():\n if e.identity not in tmp:\n ret.add(e.key)\n return ret\n\n def __sub__(self, other: Iterable[object]) -> set[str]:\n ret = set(self)\n try:\n it = iter(other)\n except TypeError:\n return NotImplemented\n for key in it:\n if not isinstance(key, str):\n continue\n identity = self._md._identity(key)\n hash_ = hash(identity)\n for slot, idx, e in self._md._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n ret.discard(e.key)\n break\n return ret\n\n def __rsub__(self, other: Iterable[_T]) -> set[_T]:\n try:\n ret: set[_T] = set(other)\n except TypeError:\n return NotImplemented\n for key in other:\n if not isinstance(key, str):\n continue\n if key in self._md:\n ret.discard(key) # type: ignore[arg-type]\n return ret\n\n def __xor__(self, other: Iterable[_T]) -> set[Union[str, _T]]:\n try:\n rgt = set(other)\n except TypeError:\n return NotImplemented\n ret: set[Union[str, _T]] = self - rgt # type: ignore[assignment]\n ret |= rgt - self\n return ret\n\n __rxor__ = __xor__\n\n def isdisjoint(self, other: Iterable[object]) -> bool:\n for key in other:\n if not isinstance(key, str):\n continue\n if key in self._md:\n return False\n return True\n\n\nclass _CSMixin:\n _ci: ClassVar[bool] = False\n\n def _key(self, key: str) -> str:\n return key\n\n def _identity(self, key: str) -> str:\n if isinstance(key, str):\n return key\n else:\n raise TypeError("MultiDict keys should be either str or subclasses of str")\n\n\nclass _CIMixin:\n _ci: ClassVar[bool] = True\n\n def _key(self, key: str) -> str:\n if type(key) is istr:\n return key\n else:\n return istr(key)\n\n def _identity(self, key: str) -> str:\n if isinstance(key, istr):\n ret = key.__istr_identity__\n if ret is None:\n ret = key.lower()\n key.__istr_identity__ = ret\n return ret\n if isinstance(key, str):\n return key.lower()\n else:\n raise TypeError("MultiDict keys should be either str or subclasses of str")\n\n\ndef estimate_log2_keysize(n: int) -> int:\n # 7 == HT_MINSIZE - 1\n return (((n * 3 + 1) // 2) | 7).bit_length()\n\n\n@dataclass\nclass _Entry(Generic[_V]):\n hash: int\n identity: str\n key: str\n value: _V\n\n\n@dataclass\nclass _HtKeys(Generic[_V]): # type: ignore[misc]\n LOG_MINSIZE: ClassVar[int] = 3\n MINSIZE: ClassVar[int] = 8\n PREALLOCATED_INDICES: ClassVar[dict[int, array]] = { # type: ignore[type-arg]\n log2_size: array(\n "b" if log2_size < 8 else "h", (-1 for i in range(1 << log2_size))\n )\n for log2_size in range(3, 10)\n }\n\n log2_size: int\n usable: int\n\n indices: array # type: ignore[type-arg] # in py3.9 array is not generic\n entries: list[Optional[_Entry[_V]]]\n\n @functools.cached_property\n def nslots(self) -> int:\n return 1 << self.log2_size\n\n @functools.cached_property\n def mask(self) -> int:\n return self.nslots - 1\n\n if sys.implementation.name != "pypy":\n\n def __sizeof__(self) -> int:\n return (\n object.__sizeof__(self)\n + sys.getsizeof(self.indices)\n + sys.getsizeof(self.entries)\n )\n\n @classmethod\n def new(cls, log2_size: int, entries: list[Optional[_Entry[_V]]]) -> Self:\n size = 1 << log2_size\n usable = (size << 1) // 3\n if log2_size < 10:\n indices = cls.PREALLOCATED_INDICES[log2_size].__copy__()\n elif log2_size < 16:\n indices = array("h", (-1 for i in range(size)))\n elif log2_size < 32:\n indices = array("l", (-1 for i in range(size)))\n else: # pragma: no cover # don't test huge multidicts\n indices = array("q", (-1 for i in range(size)))\n ret = cls(\n log2_size=log2_size,\n usable=usable,\n indices=indices,\n entries=entries,\n )\n return ret\n\n def clone(self) -> "_HtKeys[_V]":\n entries = [\n _Entry(e.hash, e.identity, e.key, e.value) if e is not None else None\n for e in self.entries\n ]\n\n return _HtKeys(\n log2_size=self.log2_size,\n usable=self.usable,\n indices=self.indices.__copy__(),\n entries=entries,\n )\n\n def build_indices(self, update: bool) -> None:\n mask = self.mask\n indices = self.indices\n for idx, e in enumerate(self.entries):\n assert e is not None\n hash_ = e.hash\n if update:\n if hash_ == -1:\n hash_ = hash(e.identity)\n else:\n assert hash_ != -1\n i = hash_ & mask\n perturb = hash_ & sys.maxsize\n while indices[i] != -1:\n perturb >>= 5\n i = mask & (i * 5 + perturb + 1)\n indices[i] = idx\n\n def find_empty_slot(self, hash_: int) -> int:\n mask = self.mask\n indices = self.indices\n i = hash_ & mask\n perturb = hash_ & sys.maxsize\n ix = indices[i]\n while ix != -1:\n perturb >>= 5\n i = (i * 5 + perturb + 1) & mask\n ix = indices[i]\n return i\n\n def iter_hash(self, hash_: int) -> Iterator[tuple[int, int, _Entry[_V]]]:\n mask = self.mask\n indices = self.indices\n entries = self.entries\n i = hash_ & mask\n perturb = hash_ & sys.maxsize\n ix = indices[i]\n while ix != -1:\n if ix != -2:\n e = entries[ix]\n if e.hash == hash_:\n yield i, ix, e\n perturb >>= 5\n i = (i * 5 + perturb + 1) & mask\n ix = indices[i]\n\n def del_idx(self, hash_: int, idx: int) -> None:\n mask = self.mask\n indices = self.indices\n i = hash_ & mask\n perturb = hash_ & sys.maxsize\n ix = indices[i]\n while ix != idx:\n perturb >>= 5\n i = (i * 5 + perturb + 1) & mask\n ix = indices[i]\n indices[i] = -2\n\n def iter_entries(self) -> Iterator[_Entry[_V]]:\n return filter(None, self.entries)\n\n def restore_hash(self, hash_: int) -> None:\n mask = self.mask\n indices = self.indices\n entries = self.entries\n i = hash_ & mask\n perturb = hash_ & sys.maxsize\n ix = indices[i]\n while ix != -1:\n if ix != -2:\n entry = entries[ix]\n if entry.hash == -1:\n entry.hash = hash_\n perturb >>= 5\n i = (i * 5 + perturb + 1) & mask\n ix = indices[i]\n\n\nclass MultiDict(_CSMixin, MutableMultiMapping[_V]):\n """Dictionary with the support for duplicate keys."""\n\n __slots__ = ("_keys", "_used", "_version")\n\n def __init__(self, arg: MDArg[_V] = None, /, **kwargs: _V):\n self._used = 0\n v = _version\n v[0] += 1\n self._version = v[0]\n if not kwargs:\n md = None\n if isinstance(arg, MultiDictProxy):\n md = arg._md\n elif isinstance(arg, MultiDict):\n md = arg\n if md is not None and md._ci is self._ci:\n self._from_md(md)\n return\n\n it = self._parse_args(arg, kwargs)\n log2_size = estimate_log2_keysize(cast(int, next(it)))\n if log2_size > 17: # pragma: no cover\n # Don't overallocate really huge keys space in init\n log2_size = 17\n self._keys: _HtKeys[_V] = _HtKeys.new(log2_size, [])\n self._extend_items(cast(Iterator[_Entry[_V]], it))\n\n def _from_md(self, md: "MultiDict[_V]") -> None:\n # Copy everything as-is without compacting the new multidict,\n # otherwise it requires reindexing\n self._keys = md._keys.clone()\n self._used = md._used\n\n @overload\n def getall(self, key: str) -> list[_V]: ...\n @overload\n def getall(self, key: str, default: _T) -> Union[list[_V], _T]: ...\n def getall(\n self, key: str, default: Union[_T, _SENTINEL] = sentinel\n ) -> Union[list[_V], _T]:\n """Return a list of all values matching the key."""\n identity = self._identity(key)\n hash_ = hash(identity)\n res = []\n restore = []\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n res.append(e.value)\n e.hash = -1\n restore.append(idx)\n\n if res:\n entries = self._keys.entries\n for idx in restore:\n entries[idx].hash = hash_ # type: ignore[union-attr]\n return res\n if not res and default is not sentinel:\n return default\n raise KeyError("Key not found: %r" % key)\n\n @overload\n def getone(self, key: str) -> _V: ...\n @overload\n def getone(self, key: str, default: _T) -> Union[_V, _T]: ...\n def getone(\n self, key: str, default: Union[_T, _SENTINEL] = sentinel\n ) -> Union[_V, _T]:\n """Get first value matching the key.\n\n Raises KeyError if the key is not found and no default is provided.\n """\n identity = self._identity(key)\n hash_ = hash(identity)\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n return e.value\n if default is not sentinel:\n return default\n raise KeyError("Key not found: %r" % key)\n\n # Mapping interface #\n\n def __getitem__(self, key: str) -> _V:\n return self.getone(key)\n\n @overload\n def get(self, key: str, /) -> Union[_V, None]: ...\n @overload\n def get(self, key: str, /, default: _T) -> Union[_V, _T]: ...\n def get(self, key: str, default: Union[_T, None] = None) -> Union[_V, _T, None]:\n """Get first value matching the key.\n\n If the key is not found, returns the default (or None if no default is provided)\n """\n return self.getone(key, default)\n\n def __iter__(self) -> Iterator[str]:\n return iter(self.keys())\n\n def __len__(self) -> int:\n return self._used\n\n def keys(self) -> KeysView[str]:\n """Return a new view of the dictionary's keys."""\n return _KeysView(self)\n\n def items(self) -> ItemsView[str, _V]:\n """Return a new view of the dictionary's items *(key, value) pairs)."""\n return _ItemsView(self)\n\n def values(self) -> _ValuesView[_V]:\n """Return a new view of the dictionary's values."""\n return _ValuesView(self)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Mapping):\n return NotImplemented\n if isinstance(other, MultiDictProxy):\n return self == other._md\n if isinstance(other, MultiDict):\n lft = self._keys\n rht = other._keys\n if self._used != other._used:\n return False\n for e1, e2 in zip(lft.iter_entries(), rht.iter_entries()):\n if e1.identity != e2.identity or e1.value != e2.value:\n return False\n return True\n if self._used != len(other):\n return False\n for k, v in self.items():\n nv = other.get(k, sentinel)\n if v != nv:\n return False\n return True\n\n def __contains__(self, key: object) -> bool:\n if not isinstance(key, str):\n return False\n identity = self._identity(key)\n hash_ = hash(identity)\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n return True\n return False\n\n @reprlib.recursive_repr()\n def __repr__(self) -> str:\n body = ", ".join(f"'{e.key}': {e.value!r}" for e in self._keys.iter_entries())\n return f"<{self.__class__.__name__}({body})>"\n\n if sys.implementation.name != "pypy":\n\n def __sizeof__(self) -> int:\n return object.__sizeof__(self) + sys.getsizeof(self._keys)\n\n def __reduce__(self) -> tuple[type[Self], tuple[list[tuple[str, _V]]]]:\n return (self.__class__, (list(self.items()),))\n\n def add(self, key: str, value: _V) -> None:\n identity = self._identity(key)\n hash_ = hash(identity)\n self._add_with_hash(_Entry(hash_, identity, key, value))\n self._incr_version()\n\n def copy(self) -> Self:\n """Return a copy of itself."""\n cls = self.__class__\n return cls(self)\n\n __copy__ = copy\n\n def extend(self, arg: MDArg[_V] = None, /, **kwargs: _V) -> None:\n """Extend current MultiDict with more values.\n\n This method must be used instead of update.\n """\n it = self._parse_args(arg, kwargs)\n newsize = self._used + cast(int, next(it))\n self._resize(estimate_log2_keysize(newsize), False)\n self._extend_items(cast(Iterator[_Entry[_V]], it))\n\n def _parse_args(\n self,\n arg: MDArg[_V],\n kwargs: Mapping[str, _V],\n ) -> Iterator[Union[int, _Entry[_V]]]:\n identity_func = self._identity\n if arg:\n if isinstance(arg, MultiDictProxy):\n arg = arg._md\n if isinstance(arg, MultiDict):\n yield len(arg) + len(kwargs)\n if self._ci is not arg._ci:\n for e in arg._keys.iter_entries():\n identity = identity_func(e.key)\n yield _Entry(hash(identity), identity, e.key, e.value)\n else:\n for e in arg._keys.iter_entries():\n yield _Entry(e.hash, e.identity, e.key, e.value)\n if kwargs:\n for key, value in kwargs.items():\n identity = identity_func(key)\n yield _Entry(hash(identity), identity, key, value)\n else:\n if hasattr(arg, "keys"):\n arg = cast(SupportsKeys[_V], arg)\n arg = [(k, arg[k]) for k in arg.keys()]\n if kwargs:\n arg = list(arg)\n arg.extend(list(kwargs.items()))\n try:\n yield len(arg) + len(kwargs) # type: ignore[arg-type]\n except TypeError:\n yield 0\n for pos, item in enumerate(arg):\n if not len(item) == 2:\n raise ValueError(\n f"multidict update sequence element #{pos}"\n f"has length {len(item)}; 2 is required"\n )\n identity = identity_func(item[0])\n yield _Entry(hash(identity), identity, item[0], item[1])\n else:\n yield len(kwargs)\n for key, value in kwargs.items():\n identity = identity_func(key)\n yield _Entry(hash(identity), identity, key, value)\n\n def _extend_items(self, items: Iterable[_Entry[_V]]) -> None:\n for e in items:\n self._add_with_hash(e)\n self._incr_version()\n\n def clear(self) -> None:\n """Remove all items from MultiDict."""\n self._used = 0\n self._keys = _HtKeys.new(_HtKeys.LOG_MINSIZE, [])\n self._incr_version()\n\n # Mapping interface #\n\n def __setitem__(self, key: str, value: _V) -> None:\n identity = self._identity(key)\n hash_ = hash(identity)\n found = False\n\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n if not found:\n e.key = key\n e.value = value\n e.hash = -1\n found = True\n self._incr_version()\n elif e.hash != -1: # pragma: no branch\n self._del_at(slot, idx)\n\n if not found:\n self._add_with_hash(_Entry(hash_, identity, key, value))\n else:\n self._keys.restore_hash(hash_)\n\n def __delitem__(self, key: str) -> None:\n found = False\n identity = self._identity(key)\n hash_ = hash(identity)\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n self._del_at(slot, idx)\n found = True\n if not found:\n raise KeyError(key)\n else:\n self._incr_version()\n\n @overload\n def setdefault(\n self: "MultiDict[Union[_T, None]]", key: str, default: None = None\n ) -> Union[_T, None]: ...\n @overload\n def setdefault(self, key: str, default: _V) -> _V: ...\n def setdefault(self, key: str, default: Union[_V, None] = None) -> Union[_V, None]: # type: ignore[misc]\n """Return value for key, set value to default if key is not present."""\n identity = self._identity(key)\n hash_ = hash(identity)\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n return e.value\n self.add(key, default) # type: ignore[arg-type]\n return default\n\n @overload\n def popone(self, key: str) -> _V: ...\n @overload\n def popone(self, key: str, default: _T) -> Union[_V, _T]: ...\n def popone(\n self, key: str, default: Union[_T, _SENTINEL] = sentinel\n ) -> Union[_V, _T]:\n """Remove specified key and return the corresponding value.\n\n If key is not found, d is returned if given, otherwise\n KeyError is raised.\n\n """\n identity = self._identity(key)\n hash_ = hash(identity)\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n value = e.value\n self._del_at(slot, idx)\n self._incr_version()\n return value\n if default is sentinel:\n raise KeyError(key)\n else:\n return default\n\n # Type checking will inherit signature for pop() if we don't confuse it here.\n if not TYPE_CHECKING:\n pop = popone\n\n @overload\n def popall(self, key: str) -> list[_V]: ...\n @overload\n def popall(self, key: str, default: _T) -> Union[list[_V], _T]: ...\n def popall(\n self, key: str, default: Union[_T, _SENTINEL] = sentinel\n ) -> Union[list[_V], _T]:\n """Remove all occurrences of key and return the list of corresponding\n values.\n\n If key is not found, default is returned if given, otherwise\n KeyError is raised.\n\n """\n found = False\n identity = self._identity(key)\n hash_ = hash(identity)\n ret = []\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n found = True\n ret.append(e.value)\n self._del_at(slot, idx)\n self._incr_version()\n\n if not found:\n if default is sentinel:\n raise KeyError(key)\n else:\n return default\n else:\n return ret\n\n def popitem(self) -> tuple[str, _V]:\n """Remove and return an arbitrary (key, value) pair."""\n if self._used <= 0:\n raise KeyError("empty multidict")\n\n pos = len(self._keys.entries) - 1\n entry = self._keys.entries.pop()\n\n while entry is None:\n pos -= 1\n entry = self._keys.entries.pop()\n\n ret = self._key(entry.key), entry.value\n self._keys.del_idx(entry.hash, pos)\n self._used -= 1\n self._incr_version()\n return ret\n\n def update(self, arg: MDArg[_V] = None, /, **kwargs: _V) -> None:\n """Update the dictionary, overwriting existing keys."""\n it = self._parse_args(arg, kwargs)\n newsize = self._used + cast(int, next(it))\n log2_size = estimate_log2_keysize(newsize)\n if log2_size > 17: # pragma: no cover\n # Don't overallocate really huge keys space in update,\n # duplicate keys could reduce the resulting anount of entries\n log2_size = 17\n if log2_size > self._keys.log2_size:\n self._resize(log2_size, False)\n try:\n self._update_items(cast(Iterator[_Entry[_V]], it))\n finally:\n self._post_update()\n\n def _update_items(self, items: Iterator[_Entry[_V]]) -> None:\n for entry in items:\n found = False\n hash_ = entry.hash\n identity = entry.identity\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n if not found:\n found = True\n e.key = entry.key\n e.value = entry.value\n e.hash = -1\n else:\n self._del_at_for_upd(e)\n if not found:\n self._add_with_hash_for_upd(entry)\n\n def _post_update(self) -> None:\n keys = self._keys\n indices = keys.indices\n entries = keys.entries\n for slot in range(keys.nslots):\n idx = indices[slot]\n if idx >= 0:\n e2 = entries[idx]\n assert e2 is not None\n if e2.key is None:\n entries[idx] = None\n indices[slot] = -2\n self._used -= 1\n if e2.hash == -1:\n e2.hash = hash(e2.identity)\n\n self._incr_version()\n\n def merge(self, arg: MDArg[_V] = None, /, **kwargs: _V) -> None:\n """Merge into the dictionary, adding non-existing keys."""\n it = self._parse_args(arg, kwargs)\n newsize = self._used + cast(int, next(it))\n log2_size = estimate_log2_keysize(newsize)\n if log2_size > 17: # pragma: no cover\n # Don't overallocate really huge keys space in update,\n # duplicate keys could reduce the resulting anount of entries\n log2_size = 17\n if log2_size > self._keys.log2_size:\n self._resize(log2_size, False)\n try:\n self._merge_items(cast(Iterator[_Entry[_V]], it))\n finally:\n self._post_update()\n\n def _merge_items(self, items: Iterator[_Entry[_V]]) -> None:\n for entry in items:\n hash_ = entry.hash\n identity = entry.identity\n for slot, idx, e in self._keys.iter_hash(hash_):\n if e.identity == identity: # pragma: no branch\n break\n else:\n self._add_with_hash_for_upd(entry)\n\n def _incr_version(self) -> None:\n v = _version\n v[0] += 1\n self._version = v[0]\n\n def _resize(self, log2_newsize: int, update: bool) -> None:\n oldkeys = self._keys\n newentries = self._used\n\n if len(oldkeys.entries) == newentries:\n entries = oldkeys.entries\n else:\n entries = [e for e in oldkeys.entries if e is not None]\n newkeys: _HtKeys[_V] = _HtKeys.new(log2_newsize, entries)\n newkeys.usable -= newentries\n newkeys.build_indices(update)\n self._keys = newkeys\n\n def _add_with_hash(self, entry: _Entry[_V]) -> None:\n if self._keys.usable <= 0:\n self._resize((self._used * 3 | _HtKeys.MINSIZE - 1).bit_length(), False)\n keys = self._keys\n slot = keys.find_empty_slot(entry.hash)\n keys.indices[slot] = len(keys.entries)\n keys.entries.append(entry)\n self._incr_version()\n self._used += 1\n keys.usable -= 1\n\n def _add_with_hash_for_upd(self, entry: _Entry[_V]) -> None:\n if self._keys.usable <= 0:\n self._resize((self._used * 3 | _HtKeys.MINSIZE - 1).bit_length(), True)\n keys = self._keys\n slot = keys.find_empty_slot(entry.hash)\n keys.indices[slot] = len(keys.entries)\n entry.hash = -1\n keys.entries.append(entry)\n self._incr_version()\n self._used += 1\n keys.usable -= 1\n\n def _del_at(self, slot: int, idx: int) -> None:\n self._keys.entries[idx] = None\n self._keys.indices[slot] = -2\n self._used -= 1\n\n def _del_at_for_upd(self, entry: _Entry[_V]) -> None:\n entry.key = None # type: ignore[assignment]\n entry.value = None # type: ignore[assignment]\n\n\nclass CIMultiDict(_CIMixin, MultiDict[_V]):\n """Dictionary with the support for duplicate case-insensitive keys."""\n\n\nclass MultiDictProxy(_CSMixin, MultiMapping[_V]):\n """Read-only proxy for MultiDict instance."""\n\n __slots__ = ("_md",)\n\n _md: MultiDict[_V]\n\n def __init__(self, arg: Union[MultiDict[_V], "MultiDictProxy[_V]"]):\n if not isinstance(arg, (MultiDict, MultiDictProxy)):\n raise TypeError(\n f"ctor requires MultiDict or MultiDictProxy instance, not {type(arg)}"\n )\n if isinstance(arg, MultiDictProxy):\n self._md = arg._md\n else:\n self._md = arg\n\n def __reduce__(self) -> NoReturn:\n raise TypeError(f"can't pickle {self.__class__.__name__} objects")\n\n @overload\n def getall(self, key: str) -> list[_V]: ...\n @overload\n def getall(self, key: str, default: _T) -> Union[list[_V], _T]: ...\n def getall(\n self, key: str, default: Union[_T, _SENTINEL] = sentinel\n ) -> Union[list[_V], _T]:\n """Return a list of all values matching the key."""\n if default is not sentinel:\n return self._md.getall(key, default)\n else:\n return self._md.getall(key)\n\n @overload\n def getone(self, key: str) -> _V: ...\n @overload\n def getone(self, key: str, default: _T) -> Union[_V, _T]: ...\n def getone(\n self, key: str, default: Union[_T, _SENTINEL] = sentinel\n ) -> Union[_V, _T]:\n """Get first value matching the key.\n\n Raises KeyError if the key is not found and no default is provided.\n """\n if default is not sentinel:\n return self._md.getone(key, default)\n else:\n return self._md.getone(key)\n\n # Mapping interface #\n\n def __getitem__(self, key: str) -> _V:\n return self.getone(key)\n\n @overload\n def get(self, key: str, /) -> Union[_V, None]: ...\n @overload\n def get(self, key: str, /, default: _T) -> Union[_V, _T]: ...\n def get(self, key: str, default: Union[_T, None] = None) -> Union[_V, _T, None]:\n """Get first value matching the key.\n\n If the key is not found, returns the default (or None if no default is provided)\n """\n return self._md.getone(key, default)\n\n def __iter__(self) -> Iterator[str]:\n return iter(self._md.keys())\n\n def __len__(self) -> int:\n return len(self._md)\n\n def keys(self) -> KeysView[str]:\n """Return a new view of the dictionary's keys."""\n return self._md.keys()\n\n def items(self) -> ItemsView[str, _V]:\n """Return a new view of the dictionary's items *(key, value) pairs)."""\n return self._md.items()\n\n def values(self) -> _ValuesView[_V]:\n """Return a new view of the dictionary's values."""\n return self._md.values()\n\n def __eq__(self, other: object) -> bool:\n return self._md == other\n\n def __contains__(self, key: object) -> bool:\n return key in self._md\n\n @reprlib.recursive_repr()\n def __repr__(self) -> str:\n body = ", ".join(f"'{k}': {v!r}" for k, v in self.items())\n return f"<{self.__class__.__name__}({body})>"\n\n def copy(self) -> MultiDict[_V]:\n """Return a copy of itself."""\n return MultiDict(self._md)\n\n\nclass CIMultiDictProxy(_CIMixin, MultiDictProxy[_V]):\n """Read-only proxy for CIMultiDict instance."""\n\n def __init__(self, arg: Union[MultiDict[_V], MultiDictProxy[_V]]):\n if not isinstance(arg, (CIMultiDict, CIMultiDictProxy)):\n raise TypeError(\n "ctor requires CIMultiDict or CIMultiDictProxy instance"\n f", not {type(arg)}"\n )\n\n super().__init__(arg)\n\n def copy(self) -> CIMultiDict[_V]:\n """Return a copy of itself."""\n return CIMultiDict(self._md)\n\n\ndef getversion(md: Union[MultiDict[object], MultiDictProxy[object]]) -> int:\n if isinstance(md, MultiDictProxy):\n md = md._md\n elif not isinstance(md, MultiDict):\n raise TypeError("Parameter should be multidict or proxy")\n return md._version\n | .venv\Lib\site-packages\multidict\_multidict_py.py | _multidict_py.py | Python | 39,955 | 0.95 | 0.28905 | 0.011342 | awesome-app | 431 | 2025-06-26T22:44:22.194966 | Apache-2.0 | false | 8200a4c0d815c6ca7b44161bccc9ef7a |
"""Multidict implementation.\n\nHTTP Headers and URL query string require specific data structure:\nmultidict. It behaves mostly like a dict but it can have\nseveral values for the same key.\n"""\n\nfrom typing import TYPE_CHECKING\n\nfrom ._abc import MultiMapping, MutableMultiMapping\nfrom ._compat import USE_EXTENSIONS\n\n__all__ = (\n "MultiMapping",\n "MutableMultiMapping",\n "MultiDictProxy",\n "CIMultiDictProxy",\n "MultiDict",\n "CIMultiDict",\n "upstr",\n "istr",\n "getversion",\n)\n\n__version__ = "6.6.3"\n\n\nif TYPE_CHECKING or not USE_EXTENSIONS:\n from ._multidict_py import (\n CIMultiDict,\n CIMultiDictProxy,\n MultiDict,\n MultiDictProxy,\n getversion,\n istr,\n )\nelse:\n from collections.abc import ItemsView, KeysView, ValuesView\n\n from ._multidict import (\n CIMultiDict,\n CIMultiDictProxy,\n MultiDict,\n MultiDictProxy,\n _ItemsView,\n _KeysView,\n _ValuesView,\n getversion,\n istr,\n )\n\n MultiMapping.register(MultiDictProxy)\n MutableMultiMapping.register(MultiDict)\n KeysView.register(_KeysView)\n ItemsView.register(_ItemsView)\n ValuesView.register(_ValuesView)\n\n\nupstr = istr\n | .venv\Lib\site-packages\multidict\__init__.py | __init__.py | Python | 1,227 | 0.85 | 0.033898 | 0 | awesome-app | 123 | 2024-03-27T20:52:35.175442 | MIT | false | 60cd41c5e45b8ed8db9591d34005ce1b |
\n\n | .venv\Lib\site-packages\multidict\__pycache__\_abc.cpython-313.pyc | _abc.cpython-313.pyc | Other | 5,596 | 0.8 | 0.037736 | 0 | react-lib | 594 | 2025-01-02T22:40:09.333089 | MIT | false | c52b7bd42aed3d9b341e08a5a3a9ff71 |
\n\n | .venv\Lib\site-packages\multidict\__pycache__\_compat.cpython-313.pyc | _compat.cpython-313.pyc | Other | 692 | 0.8 | 0 | 0 | node-utils | 555 | 2024-09-13T01:50:21.730433 | Apache-2.0 | false | 85e51253de2d2fef254512d5ec2176a7 |
\n\n | .venv\Lib\site-packages\multidict\__pycache__\_multidict_py.cpython-313.pyc | _multidict_py.cpython-313.pyc | Other | 66,164 | 0.75 | 0.021834 | 0.01171 | node-utils | 578 | 2024-03-16T10:16:36.669489 | BSD-3-Clause | false | 9d7788398a42151cd7f5580b304e450c |
\n\n | .venv\Lib\site-packages\multidict\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,402 | 0.95 | 0.030303 | 0 | vue-tools | 137 | 2024-03-09T15:48:58.289283 | MIT | false | 17018db3a5dcbd840d358b036e02a7f4 |
pip\n | .venv\Lib\site-packages\multidict-6.6.3.dist-info\INSTALLER | INSTALLER | Other | 4 | 0.5 | 0 | 0 | awesome-app | 329 | 2023-09-05T09:47:39.007095 | MIT | false | 365c9bfeb7d89244f2ce01c1de44cb85 |
Metadata-Version: 2.4\nName: multidict\nVersion: 6.6.3\nSummary: multidict implementation\nHome-page: https://github.com/aio-libs/multidict\nAuthor: Andrew Svetlov\nAuthor-email: andrew.svetlov@gmail.com\nLicense: Apache License 2.0\nProject-URL: Chat: Matrix, https://matrix.to/#/#aio-libs:matrix.org\nProject-URL: Chat: Matrix Space, https://matrix.to/#/#aio-libs-space:matrix.org\nProject-URL: CI: GitHub, https://github.com/aio-libs/multidict/actions\nProject-URL: Code of Conduct, https://github.com/aio-libs/.github/blob/master/CODE_OF_CONDUCT.md\nProject-URL: Coverage: codecov, https://codecov.io/github/aio-libs/multidict\nProject-URL: Docs: Changelog, https://multidict.aio-libs.org/en/latest/changes/\nProject-URL: Docs: RTD, https://multidict.aio-libs.org\nProject-URL: GitHub: issues, https://github.com/aio-libs/multidict/issues\nProject-URL: GitHub: repo, https://github.com/aio-libs/multidict\nClassifier: Development Status :: 5 - Production/Stable\nClassifier: Intended Audience :: Developers\nClassifier: Programming Language :: Python\nClassifier: Programming Language :: Python :: 3\nClassifier: Programming Language :: Python :: 3.9\nClassifier: Programming Language :: Python :: 3.10\nClassifier: Programming Language :: Python :: 3.11\nClassifier: Programming Language :: Python :: 3.12\nClassifier: Programming Language :: Python :: 3.13\nRequires-Python: >=3.9\nDescription-Content-Type: text/x-rst\nLicense-File: LICENSE\nRequires-Dist: typing-extensions>=4.1.0; python_version < "3.11"\nDynamic: license-file\n\n=========\nmultidict\n=========\n\n.. image:: https://github.com/aio-libs/multidict/actions/workflows/ci-cd.yml/badge.svg\n :target: https://github.com/aio-libs/multidict/actions\n :alt: GitHub status for master branch\n\n.. image:: https://codecov.io/gh/aio-libs/multidict/branch/master/graph/badge.svg?flag=pytest\n :target: https://codecov.io/gh/aio-libs/multidict?flags[]=pytest\n :alt: Coverage metrics\n\n.. image:: https://img.shields.io/pypi/v/multidict.svg\n :target: https://pypi.org/project/multidict\n :alt: PyPI\n\n.. image:: https://readthedocs.org/projects/multidict/badge/?version=latest\n :target: https://multidict.aio-libs.org\n :alt: Read The Docs build status badge\n\n.. image:: https://img.shields.io/endpoint?url=https://codspeed.io/badge.json\n :target: https://codspeed.io/aio-libs/multidict\n :alt: CodSpeed\n\n.. image:: https://img.shields.io/pypi/pyversions/multidict.svg\n :target: https://pypi.org/project/multidict\n :alt: Python versions\n\n.. image:: https://img.shields.io/matrix/aio-libs:matrix.org?label=Discuss%20on%20Matrix%20at%20%23aio-libs%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat\n :target: https://matrix.to/#/%23aio-libs:matrix.org\n :alt: Matrix Room — #aio-libs:matrix.org\n\n.. image:: https://img.shields.io/matrix/aio-libs-space:matrix.org?label=Discuss%20on%20Matrix%20at%20%23aio-libs-space%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat\n :target: https://matrix.to/#/%23aio-libs-space:matrix.org\n :alt: Matrix Space — #aio-libs-space:matrix.org\n\nMultidict is dict-like collection of *key-value pairs* where key\nmight occur more than once in the container.\n\nIntroduction\n------------\n\n*HTTP Headers* and *URL query string* require specific data structure:\n*multidict*. It behaves mostly like a regular ``dict`` but it may have\nseveral *values* for the same *key* and *preserves insertion ordering*.\n\nThe *key* is ``str`` (or ``istr`` for case-insensitive dictionaries).\n\n``multidict`` has four multidict classes:\n``MultiDict``, ``MultiDictProxy``, ``CIMultiDict``\nand ``CIMultiDictProxy``.\n\nImmutable proxies (``MultiDictProxy`` and\n``CIMultiDictProxy``) provide a dynamic view for the\nproxied multidict, the view reflects underlying collection changes. They\nimplement the ``collections.abc.Mapping`` interface.\n\nRegular mutable (``MultiDict`` and ``CIMultiDict``) classes\nimplement ``collections.abc.MutableMapping`` and allows them to change\ntheir own content.\n\n\n*Case insensitive* (``CIMultiDict`` and\n``CIMultiDictProxy``) assume the *keys* are case\ninsensitive, e.g.::\n\n >>> dct = CIMultiDict(key='val')\n >>> 'Key' in dct\n True\n >>> dct['Key']\n 'val'\n\n*Keys* should be ``str`` or ``istr`` instances.\n\nThe library has optional C Extensions for speed.\n\n\nLicense\n-------\n\nApache 2\n\nLibrary Installation\n--------------------\n\n.. code-block:: bash\n\n $ pip install multidict\n\nThe library is Python 3 only!\n\nPyPI contains binary wheels for Linux, Windows and MacOS. If you want to install\n``multidict`` on another operating system (or *Alpine Linux* inside a Docker) the\ntarball will be used to compile the library from source. It requires a C compiler and\nPython headers to be installed.\n\nTo skip the compilation, please use the `MULTIDICT_NO_EXTENSIONS` environment variable,\ne.g.:\n\n.. code-block:: bash\n\n $ MULTIDICT_NO_EXTENSIONS=1 pip install multidict\n\nPlease note, the pure Python (uncompiled) version is about 20-50 times slower depending on\nthe usage scenario!!!\n\nFor extension development, set the ``MULTIDICT_DEBUG_BUILD`` environment variable to compile\nthe extensions in debug mode:\n\n.. code-block:: console\n\n $ MULTIDICT_DEBUG_BUILD=1 pip install multidict\n\nChangelog\n---------\nSee `RTD page <http://multidict.aio-libs.org/en/latest/changes>`_.\n | .venv\Lib\site-packages\multidict-6.6.3.dist-info\METADATA | METADATA | Other | 5,418 | 0.95 | 0.040541 | 0.036036 | react-lib | 891 | 2025-04-21T08:45:00.442000 | GPL-3.0 | false | dd131281a64fd991b16c651e846af22f |
multidict-6.6.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4\nmultidict-6.6.3.dist-info/METADATA,sha256=JDvaAsOSyJf0vPvPLrfCJW-WMFl06G-qGaOgVhgKPJE,5418\nmultidict-6.6.3.dist-info/RECORD,,\nmultidict-6.6.3.dist-info/WHEEL,sha256=qV0EIPljj1XC_vuSatRWjn02nZIz3N1t8jsZz7HBr2U,101\nmultidict-6.6.3.dist-info/licenses/LICENSE,sha256=k9Ealo4vDzY3PECBH_bSDhc_WMPKtYhM1mF7v9eVSSo,611\nmultidict-6.6.3.dist-info/top_level.txt,sha256=-euDElkk5_qkmfIJ7WiqCab02ZlSFZWynejKg59qZQQ,10\nmultidict/__init__.py,sha256=O0b0Yq-jV26Yc-AzqGfsZ03H50lgdpCHXxTw1hGE340,1227\nmultidict/__pycache__/__init__.cpython-313.pyc,,\nmultidict/__pycache__/_abc.cpython-313.pyc,,\nmultidict/__pycache__/_compat.cpython-313.pyc,,\nmultidict/__pycache__/_multidict_py.cpython-313.pyc,,\nmultidict/_abc.py,sha256=e_0JDJi7E6LWS0A3gUJ17SkgDLlmg8ffjfylTu_vboc,2402\nmultidict/_compat.py,sha256=TcRjCStk2iIY1_DwDNj8kNpJRQ9rtLj92Xvk1z2G_ak,422\nmultidict/_multidict.cp313-win_amd64.pyd,sha256=DXaohnFFt_0VFyd0-BktvglKkS0BbDBVxKNvuTiSsyY,80384\nmultidict/_multidict_py.py,sha256=VGQ58P7VOd6lRf3WVAinb62aD16DPdAWRt68qmiJMXE,39955\nmultidict/py.typed,sha256=e9bmbH3UFxsabQrnNFPG9qxIXztwbcM6IKDYnvZwprY,15\n | .venv\Lib\site-packages\multidict-6.6.3.dist-info\RECORD | RECORD | Other | 1,189 | 0.7 | 0 | 0 | react-lib | 217 | 2024-05-23T23:01:51.338959 | MIT | false | acf258469d0b1a02e3c0ccdbcd383c10 |
multidict\n | .venv\Lib\site-packages\multidict-6.6.3.dist-info\top_level.txt | top_level.txt | Other | 10 | 0.5 | 0 | 0 | node-utils | 413 | 2025-06-12T19:38:33.757253 | Apache-2.0 | false | c25c8390af673b8ed2d00811b61fe34b |
Wheel-Version: 1.0\nGenerator: setuptools (80.9.0)\nRoot-Is-Purelib: false\nTag: cp313-cp313-win_amd64\n\n | .venv\Lib\site-packages\multidict-6.6.3.dist-info\WHEEL | WHEEL | Other | 101 | 0.7 | 0 | 0 | node-utils | 897 | 2025-01-19T17:04:59.467758 | Apache-2.0 | false | eb6c9e665bbbd698545236600675f165 |
Copyright 2016 Andrew Svetlov and aio-libs contributors\n\n Licensed under the Apache License, Version 2.0 (the "License");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an "AS IS" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n | .venv\Lib\site-packages\multidict-6.6.3.dist-info\licenses\LICENSE | LICENSE | Other | 611 | 0.95 | 0.076923 | 0 | vue-tools | 227 | 2024-11-01T17:58:01.944226 | Apache-2.0 | false | b4fef6e4b0828c2401fb983363985b39 |
#\n# A higher level module for using sockets (or Windows named pipes)\n#\n# multiprocessing/connection.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\n__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]\n\nimport errno\nimport io\nimport os\nimport sys\nimport socket\nimport struct\nimport time\nimport tempfile\nimport itertools\n\ntry:\n import _multiprocess as _multiprocessing\nexcept ImportError:\n import _multiprocessing\n\nfrom . import util\n\nfrom . import AuthenticationError, BufferTooShort\nfrom .context import reduction\n_ForkingPickler = reduction.ForkingPickler\n\ntry:\n import _winapi\n from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE\nexcept ImportError:\n if sys.platform == 'win32':\n raise\n _winapi = None\n\n#\n#\n#\n\nBUFSIZE = 8192\n# A very generous timeout when it comes to local connections...\nCONNECTION_TIMEOUT = 20.\n\n_mmap_counter = itertools.count()\n\ndefault_family = 'AF_INET'\nfamilies = ['AF_INET']\n\nif hasattr(socket, 'AF_UNIX'):\n default_family = 'AF_UNIX'\n families += ['AF_UNIX']\n\nif sys.platform == 'win32':\n default_family = 'AF_PIPE'\n families += ['AF_PIPE']\n\n\ndef _init_timeout(timeout=CONNECTION_TIMEOUT):\n return getattr(time,'monotonic',time.time)() + timeout\n\ndef _check_timeout(t):\n return getattr(time,'monotonic',time.time)() > t\n\n#\n#\n#\n\ndef arbitrary_address(family):\n '''\n Return an arbitrary free address for the given family\n '''\n if family == 'AF_INET':\n return ('localhost', 0)\n elif family == 'AF_UNIX':\n return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())\n elif family == 'AF_PIPE':\n return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %\n (os.getpid(), next(_mmap_counter)), dir="")\n else:\n raise ValueError('unrecognized family')\n\ndef _validate_family(family):\n '''\n Checks if the family is valid for the current environment.\n '''\n if sys.platform != 'win32' and family == 'AF_PIPE':\n raise ValueError('Family %s is not recognized.' % family)\n\n if sys.platform == 'win32' and family == 'AF_UNIX':\n # double check\n if not hasattr(socket, family):\n raise ValueError('Family %s is not recognized.' % family)\n\ndef address_type(address):\n '''\n Return the types of the address\n\n This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'\n '''\n if type(address) == tuple:\n return 'AF_INET'\n elif type(address) is str and address.startswith('\\\\'):\n return 'AF_PIPE'\n elif type(address) is str or util.is_abstract_socket_namespace(address):\n return 'AF_UNIX'\n else:\n raise ValueError('address type of %r unrecognized' % address)\n\n#\n# Connection classes\n#\n\nclass _ConnectionBase:\n _handle = None\n\n def __init__(self, handle, readable=True, writable=True):\n handle = handle.__index__()\n if handle < 0:\n raise ValueError("invalid handle")\n if not readable and not writable:\n raise ValueError(\n "at least one of `readable` and `writable` must be True")\n self._handle = handle\n self._readable = readable\n self._writable = writable\n\n # XXX should we use util.Finalize instead of a __del__?\n\n def __del__(self):\n if self._handle is not None:\n self._close()\n\n def _check_closed(self):\n if self._handle is None:\n raise OSError("handle is closed")\n\n def _check_readable(self):\n if not self._readable:\n raise OSError("connection is write-only")\n\n def _check_writable(self):\n if not self._writable:\n raise OSError("connection is read-only")\n\n def _bad_message_length(self):\n if self._writable:\n self._readable = False\n else:\n self.close()\n raise OSError("bad message length")\n\n @property\n def closed(self):\n """True if the connection is closed"""\n return self._handle is None\n\n @property\n def readable(self):\n """True if the connection is readable"""\n return self._readable\n\n @property\n def writable(self):\n """True if the connection is writable"""\n return self._writable\n\n def fileno(self):\n """File descriptor or handle of the connection"""\n self._check_closed()\n return self._handle\n\n def close(self):\n """Close the connection"""\n if self._handle is not None:\n try:\n self._close()\n finally:\n self._handle = None\n\n def send_bytes(self, buf, offset=0, size=None):\n """Send the bytes data from a bytes-like object"""\n self._check_closed()\n self._check_writable()\n m = memoryview(buf)\n if m.itemsize > 1:\n m = m.cast('B')\n n = m.nbytes\n if offset < 0:\n raise ValueError("offset is negative")\n if n < offset:\n raise ValueError("buffer length < offset")\n if size is None:\n size = n - offset\n elif size < 0:\n raise ValueError("size is negative")\n elif offset + size > n:\n raise ValueError("buffer length < offset + size")\n self._send_bytes(m[offset:offset + size])\n\n def send(self, obj):\n """Send a (picklable) object"""\n self._check_closed()\n self._check_writable()\n self._send_bytes(_ForkingPickler.dumps(obj))\n\n def recv_bytes(self, maxlength=None):\n """\n Receive bytes data as a bytes object.\n """\n self._check_closed()\n self._check_readable()\n if maxlength is not None and maxlength < 0:\n raise ValueError("negative maxlength")\n buf = self._recv_bytes(maxlength)\n if buf is None:\n self._bad_message_length()\n return buf.getvalue()\n\n def recv_bytes_into(self, buf, offset=0):\n """\n Receive bytes data into a writeable bytes-like object.\n Return the number of bytes read.\n """\n self._check_closed()\n self._check_readable()\n with memoryview(buf) as m:\n # Get bytesize of arbitrary buffer\n itemsize = m.itemsize\n bytesize = itemsize * len(m)\n if offset < 0:\n raise ValueError("negative offset")\n elif offset > bytesize:\n raise ValueError("offset too large")\n result = self._recv_bytes()\n size = result.tell()\n if bytesize < offset + size:\n raise BufferTooShort(result.getvalue())\n # Message can fit in dest\n result.seek(0)\n result.readinto(m[offset // itemsize :\n (offset + size) // itemsize])\n return size\n\n def recv(self):\n """Receive a (picklable) object"""\n self._check_closed()\n self._check_readable()\n buf = self._recv_bytes()\n return _ForkingPickler.loads(buf.getbuffer())\n\n def poll(self, timeout=0.0):\n """Whether there is any input available to be read"""\n self._check_closed()\n self._check_readable()\n return self._poll(timeout)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.close()\n\n\nif _winapi:\n\n class PipeConnection(_ConnectionBase):\n """\n Connection class based on a Windows named pipe.\n Overlapped I/O is used, so the handles must have been created\n with FILE_FLAG_OVERLAPPED.\n """\n _got_empty_message = False\n _send_ov = None\n\n def _close(self, _CloseHandle=_winapi.CloseHandle):\n ov = self._send_ov\n if ov is not None:\n # Interrupt WaitForMultipleObjects() in _send_bytes()\n ov.cancel()\n _CloseHandle(self._handle)\n\n def _send_bytes(self, buf):\n if self._send_ov is not None:\n # A connection should only be used by a single thread\n raise ValueError("concurrent send_bytes() calls "\n "are not supported")\n ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)\n self._send_ov = ov\n try:\n if err == _winapi.ERROR_IO_PENDING:\n waitres = _winapi.WaitForMultipleObjects(\n [ov.event], False, INFINITE)\n assert waitres == WAIT_OBJECT_0\n except:\n ov.cancel()\n raise\n finally:\n self._send_ov = None\n nwritten, err = ov.GetOverlappedResult(True)\n if err == _winapi.ERROR_OPERATION_ABORTED:\n # close() was called by another thread while\n # WaitForMultipleObjects() was waiting for the overlapped\n # operation.\n raise OSError(errno.EPIPE, "handle is closed")\n assert err == 0\n assert nwritten == len(buf)\n\n def _recv_bytes(self, maxsize=None):\n if self._got_empty_message:\n self._got_empty_message = False\n return io.BytesIO()\n else:\n bsize = 128 if maxsize is None else min(maxsize, 128)\n try:\n ov, err = _winapi.ReadFile(self._handle, bsize,\n overlapped=True)\n try:\n if err == _winapi.ERROR_IO_PENDING:\n waitres = _winapi.WaitForMultipleObjects(\n [ov.event], False, INFINITE)\n assert waitres == WAIT_OBJECT_0\n except:\n ov.cancel()\n raise\n finally:\n nread, err = ov.GetOverlappedResult(True)\n if err == 0:\n f = io.BytesIO()\n f.write(ov.getbuffer())\n return f\n elif err == _winapi.ERROR_MORE_DATA:\n return self._get_more_data(ov, maxsize)\n except OSError as e:\n if e.winerror == _winapi.ERROR_BROKEN_PIPE:\n raise EOFError\n else:\n raise\n raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")\n\n def _poll(self, timeout):\n if (self._got_empty_message or\n _winapi.PeekNamedPipe(self._handle)[0] != 0):\n return True\n return bool(wait([self], timeout))\n\n def _get_more_data(self, ov, maxsize):\n buf = ov.getbuffer()\n f = io.BytesIO()\n f.write(buf)\n left = _winapi.PeekNamedPipe(self._handle)[1]\n assert left > 0\n if maxsize is not None and len(buf) + left > maxsize:\n self._bad_message_length()\n ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)\n rbytes, err = ov.GetOverlappedResult(True)\n assert err == 0\n assert rbytes == left\n f.write(ov.getbuffer())\n return f\n\n\nclass Connection(_ConnectionBase):\n """\n Connection class based on an arbitrary file descriptor (Unix only), or\n a socket handle (Windows).\n """\n\n if _winapi:\n def _close(self, _close=_multiprocessing.closesocket):\n _close(self._handle)\n _write = _multiprocessing.send\n _read = _multiprocessing.recv\n else:\n def _close(self, _close=os.close):\n _close(self._handle)\n _write = os.write\n _read = os.read\n\n def _send(self, buf, write=_write):\n remaining = len(buf)\n while True:\n n = write(self._handle, buf)\n remaining -= n\n if remaining == 0:\n break\n buf = buf[n:]\n\n def _recv(self, size, read=_read):\n buf = io.BytesIO()\n handle = self._handle\n remaining = size\n while remaining > 0:\n chunk = read(handle, remaining)\n n = len(chunk)\n if n == 0:\n if remaining == size:\n raise EOFError\n else:\n raise OSError("got end of file during message")\n buf.write(chunk)\n remaining -= n\n return buf\n\n def _send_bytes(self, buf):\n n = len(buf)\n if n > 0x7fffffff:\n pre_header = struct.pack("!i", -1)\n header = struct.pack("!Q", n)\n self._send(pre_header)\n self._send(header)\n self._send(buf)\n else:\n # For wire compatibility with 3.7 and lower\n header = struct.pack("!i", n)\n if n > 16384:\n # The payload is large so Nagle's algorithm won't be triggered\n # and we'd better avoid the cost of concatenation.\n self._send(header)\n self._send(buf)\n else:\n # Issue #20540: concatenate before sending, to avoid delays due\n # to Nagle's algorithm on a TCP socket.\n # Also note we want to avoid sending a 0-length buffer separately,\n # to avoid "broken pipe" errors if the other end closed the pipe.\n self._send(header + buf)\n\n def _recv_bytes(self, maxsize=None):\n buf = self._recv(4)\n size, = struct.unpack("!i", buf.getvalue())\n if size == -1:\n buf = self._recv(8)\n size, = struct.unpack("!Q", buf.getvalue())\n if maxsize is not None and size > maxsize:\n return None\n return self._recv(size)\n\n def _poll(self, timeout):\n r = wait([self], timeout)\n return bool(r)\n\n\n#\n# Public functions\n#\n\nclass Listener(object):\n '''\n Returns a listener object.\n\n This is a wrapper for a bound socket which is 'listening' for\n connections, or for a Windows named pipe.\n '''\n def __init__(self, address=None, family=None, backlog=1, authkey=None):\n family = family or (address and address_type(address)) \\n or default_family\n address = address or arbitrary_address(family)\n\n _validate_family(family)\n if family == 'AF_PIPE':\n self._listener = PipeListener(address, backlog)\n else:\n self._listener = SocketListener(address, family, backlog)\n\n if authkey is not None and not isinstance(authkey, bytes):\n raise TypeError('authkey should be a byte string')\n\n self._authkey = authkey\n\n def accept(self):\n '''\n Accept a connection on the bound socket or named pipe of `self`.\n\n Returns a `Connection` object.\n '''\n if self._listener is None:\n raise OSError('listener is closed')\n c = self._listener.accept()\n if self._authkey:\n deliver_challenge(c, self._authkey)\n answer_challenge(c, self._authkey)\n return c\n\n def close(self):\n '''\n Close the bound socket or named pipe of `self`.\n '''\n listener = self._listener\n if listener is not None:\n self._listener = None\n listener.close()\n\n @property\n def address(self):\n return self._listener._address\n\n @property\n def last_accepted(self):\n return self._listener._last_accepted\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.close()\n\n\ndef Client(address, family=None, authkey=None):\n '''\n Returns a connection to the address of a `Listener`\n '''\n family = family or address_type(address)\n _validate_family(family)\n if family == 'AF_PIPE':\n c = PipeClient(address)\n else:\n c = SocketClient(address)\n\n if authkey is not None and not isinstance(authkey, bytes):\n raise TypeError('authkey should be a byte string')\n\n if authkey is not None:\n answer_challenge(c, authkey)\n deliver_challenge(c, authkey)\n\n return c\n\n\nif sys.platform != 'win32':\n\n def Pipe(duplex=True):\n '''\n Returns pair of connection objects at either end of a pipe\n '''\n if duplex:\n s1, s2 = socket.socketpair()\n s1.setblocking(True)\n s2.setblocking(True)\n c1 = Connection(s1.detach())\n c2 = Connection(s2.detach())\n else:\n fd1, fd2 = os.pipe()\n c1 = Connection(fd1, writable=False)\n c2 = Connection(fd2, readable=False)\n\n return c1, c2\n\nelse:\n\n def Pipe(duplex=True):\n '''\n Returns pair of connection objects at either end of a pipe\n '''\n address = arbitrary_address('AF_PIPE')\n if duplex:\n openmode = _winapi.PIPE_ACCESS_DUPLEX\n access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE\n obsize, ibsize = BUFSIZE, BUFSIZE\n else:\n openmode = _winapi.PIPE_ACCESS_INBOUND\n access = _winapi.GENERIC_WRITE\n obsize, ibsize = 0, BUFSIZE\n\n h1 = _winapi.CreateNamedPipe(\n address, openmode | _winapi.FILE_FLAG_OVERLAPPED |\n _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,\n _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |\n _winapi.PIPE_WAIT,\n 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,\n # default security descriptor: the handle cannot be inherited\n _winapi.NULL\n )\n h2 = _winapi.CreateFile(\n address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,\n _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL\n )\n _winapi.SetNamedPipeHandleState(\n h2, _winapi.PIPE_READMODE_MESSAGE, None, None\n )\n\n overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)\n _, err = overlapped.GetOverlappedResult(True)\n assert err == 0\n\n c1 = PipeConnection(h1, writable=duplex)\n c2 = PipeConnection(h2, readable=duplex)\n\n return c1, c2\n\n#\n# Definitions for connections based on sockets\n#\n\nclass SocketListener(object):\n '''\n Representation of a socket which is bound to an address and listening\n '''\n def __init__(self, address, family, backlog=1):\n self._socket = socket.socket(getattr(socket, family))\n try:\n # SO_REUSEADDR has different semantics on Windows (issue #2550).\n if os.name == 'posix':\n self._socket.setsockopt(socket.SOL_SOCKET,\n socket.SO_REUSEADDR, 1)\n self._socket.setblocking(True)\n self._socket.bind(address)\n self._socket.listen(backlog)\n self._address = self._socket.getsockname()\n except OSError:\n self._socket.close()\n raise\n self._family = family\n self._last_accepted = None\n\n if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address):\n # Linux abstract socket namespaces do not need to be explicitly unlinked\n self._unlink = util.Finalize(\n self, os.unlink, args=(address,), exitpriority=0\n )\n else:\n self._unlink = None\n\n def accept(self):\n s, self._last_accepted = self._socket.accept()\n s.setblocking(True)\n return Connection(s.detach())\n\n def close(self):\n try:\n self._socket.close()\n finally:\n unlink = self._unlink\n if unlink is not None:\n self._unlink = None\n unlink()\n\n\ndef SocketClient(address):\n '''\n Return a connection object connected to the socket given by `address`\n '''\n family = address_type(address)\n with socket.socket( getattr(socket, family) ) as s:\n s.setblocking(True)\n s.connect(address)\n return Connection(s.detach())\n\n#\n# Definitions for connections based on named pipes\n#\n\nif sys.platform == 'win32':\n\n class PipeListener(object):\n '''\n Representation of a named pipe\n '''\n def __init__(self, address, backlog=None):\n self._address = address\n self._handle_queue = [self._new_handle(first=True)]\n\n self._last_accepted = None\n util.sub_debug('listener created with address=%r', self._address)\n self.close = util.Finalize(\n self, PipeListener._finalize_pipe_listener,\n args=(self._handle_queue, self._address), exitpriority=0\n )\n\n def _new_handle(self, first=False):\n flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED\n if first:\n flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE\n return _winapi.CreateNamedPipe(\n self._address, flags,\n _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |\n _winapi.PIPE_WAIT,\n _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,\n _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL\n )\n\n def accept(self):\n self._handle_queue.append(self._new_handle())\n handle = self._handle_queue.pop(0)\n try:\n ov = _winapi.ConnectNamedPipe(handle, overlapped=True)\n except OSError as e:\n if e.winerror != _winapi.ERROR_NO_DATA:\n raise\n # ERROR_NO_DATA can occur if a client has already connected,\n # written data and then disconnected -- see Issue 14725.\n else:\n try:\n res = _winapi.WaitForMultipleObjects(\n [ov.event], False, INFINITE)\n except:\n ov.cancel()\n _winapi.CloseHandle(handle)\n raise\n finally:\n _, err = ov.GetOverlappedResult(True)\n assert err == 0\n return PipeConnection(handle)\n\n @staticmethod\n def _finalize_pipe_listener(queue, address):\n util.sub_debug('closing listener with address=%r', address)\n for handle in queue:\n _winapi.CloseHandle(handle)\n\n def PipeClient(address):\n '''\n Return a connection object connected to the pipe given by `address`\n '''\n t = _init_timeout()\n while 1:\n try:\n _winapi.WaitNamedPipe(address, 1000)\n h = _winapi.CreateFile(\n address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,\n 0, _winapi.NULL, _winapi.OPEN_EXISTING,\n _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL\n )\n except OSError as e:\n if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,\n _winapi.ERROR_PIPE_BUSY) or _check_timeout(t):\n raise\n else:\n break\n else:\n raise\n\n _winapi.SetNamedPipeHandleState(\n h, _winapi.PIPE_READMODE_MESSAGE, None, None\n )\n return PipeConnection(h)\n\n#\n# Authentication stuff\n#\n\nMESSAGE_LENGTH = 40 # MUST be > 20\nMESSAGE_MAXLEN = 256 # default is None\n\n_CHALLENGE = b'#CHALLENGE#'\n_WELCOME = b'#WELCOME#'\n_FAILURE = b'#FAILURE#'\n\n# multiprocessing.connection Authentication Handshake Protocol Description\n# (as documented for reference after reading the existing code)\n# =============================================================================\n#\n# On Windows: native pipes with "overlapped IO" are used to send the bytes,\n# instead of the length prefix SIZE scheme described below. (ie: the OS deals\n# with message sizes for us)\n#\n# Protocol error behaviors:\n#\n# On POSIX, any failure to receive the length prefix into SIZE, for SIZE greater\n# than the requested maxsize to receive, or receiving fewer than SIZE bytes\n# results in the connection being closed and auth to fail.\n#\n# On Windows, receiving too few bytes is never a low level _recv_bytes read\n# error, receiving too many will trigger an error only if receive maxsize\n# value was larger than 128 OR the if the data arrived in smaller pieces.\n#\n# Serving side Client side\n# ------------------------------ ---------------------------------------\n# 0. Open a connection on the pipe.\n# 1. Accept connection.\n# 2. Random 20+ bytes -> MESSAGE\n# Modern servers always send\n# more than 20 bytes and include\n# a {digest} prefix on it with\n# their preferred HMAC digest.\n# Legacy ones send ==20 bytes.\n# 3. send 4 byte length (net order)\n# prefix followed by:\n# b'#CHALLENGE#' + MESSAGE\n# 4. Receive 4 bytes, parse as network byte\n# order integer. If it is -1, receive an\n# additional 8 bytes, parse that as network\n# byte order. The result is the length of\n# the data that follows -> SIZE.\n# 5. Receive min(SIZE, 256) bytes -> M1\n# 6. Assert that M1 starts with:\n# b'#CHALLENGE#'\n# 7. Strip that prefix from M1 into -> M2\n# 7.1. Parse M2: if it is exactly 20 bytes in\n# length this indicates a legacy server\n# supporting only HMAC-MD5. Otherwise the\n# 7.2. preferred digest is looked up from an\n# expected "{digest}" prefix on M2. No prefix\n# or unsupported digest? <- AuthenticationError\n# 7.3. Put divined algorithm name in -> D_NAME\n# 8. Compute HMAC-D_NAME of AUTHKEY, M2 -> C_DIGEST\n# 9. Send 4 byte length prefix (net order)\n# followed by C_DIGEST bytes.\n# 10. Receive 4 or 4+8 byte length\n# prefix (#4 dance) -> SIZE.\n# 11. Receive min(SIZE, 256) -> C_D.\n# 11.1. Parse C_D: legacy servers\n# accept it as is, "md5" -> D_NAME\n# 11.2. modern servers check the length\n# of C_D, IF it is 16 bytes?\n# 11.2.1. "md5" -> D_NAME\n# and skip to step 12.\n# 11.3. longer? expect and parse a "{digest}"\n# prefix into -> D_NAME.\n# Strip the prefix and store remaining\n# bytes in -> C_D.\n# 11.4. Don't like D_NAME? <- AuthenticationError\n# 12. Compute HMAC-D_NAME of AUTHKEY,\n# MESSAGE into -> M_DIGEST.\n# 13. Compare M_DIGEST == C_D:\n# 14a: Match? Send length prefix &\n# b'#WELCOME#'\n# <- RETURN\n# 14b: Mismatch? Send len prefix &\n# b'#FAILURE#'\n# <- CLOSE & AuthenticationError\n# 15. Receive 4 or 4+8 byte length prefix (net\n# order) again as in #4 into -> SIZE.\n# 16. Receive min(SIZE, 256) bytes -> M3.\n# 17. Compare M3 == b'#WELCOME#':\n# 17a. Match? <- RETURN\n# 17b. Mismatch? <- CLOSE & AuthenticationError\n#\n# If this RETURNed, the connection remains open: it has been authenticated.\n#\n# Length prefixes are used consistently. Even on the legacy protocol, this\n# was good fortune and allowed us to evolve the protocol by using the length\n# of the opening challenge or length of the returned digest as a signal as\n# to which protocol the other end supports.\n\n_ALLOWED_DIGESTS = frozenset(\n {b'md5', b'sha256', b'sha384', b'sha3_256', b'sha3_384'})\n_MAX_DIGEST_LEN = max(len(_) for _ in _ALLOWED_DIGESTS)\n\n# Old hmac-md5 only server versions from Python <=3.11 sent a message of this\n# length. It happens to not match the length of any supported digest so we can\n# use a message of this length to indicate that we should work in backwards\n# compatible md5-only mode without a {digest_name} prefix on our response.\n_MD5ONLY_MESSAGE_LENGTH = 20\n_MD5_DIGEST_LEN = 16\n_LEGACY_LENGTHS = (_MD5ONLY_MESSAGE_LENGTH, _MD5_DIGEST_LEN)\n\n\ndef _get_digest_name_and_payload(message: bytes) -> (str, bytes):\n """Returns a digest name and the payload for a response hash.\n\n If a legacy protocol is detected based on the message length\n or contents the digest name returned will be empty to indicate\n legacy mode where MD5 and no digest prefix should be sent.\n """\n # modern message format: b"{digest}payload" longer than 20 bytes\n # legacy message format: 16 or 20 byte b"payload"\n if len(message) in _LEGACY_LENGTHS:\n # Either this was a legacy server challenge, or we're processing\n # a reply from a legacy client that sent an unprefixed 16-byte\n # HMAC-MD5 response. All messages using the modern protocol will\n # be longer than either of these lengths.\n return '', message\n if (message.startswith(b'{') and\n (curly := message.find(b'}', 1, _MAX_DIGEST_LEN+2)) > 0):\n digest = message[1:curly]\n if digest in _ALLOWED_DIGESTS:\n payload = message[curly+1:]\n return digest.decode('ascii'), payload\n raise AuthenticationError(\n 'unsupported message length, missing digest prefix, '\n f'or unsupported digest: {message=}')\n\n\ndef _create_response(authkey, message):\n """Create a MAC based on authkey and message\n\n The MAC algorithm defaults to HMAC-MD5, unless MD5 is not available or\n the message has a '{digest_name}' prefix. For legacy HMAC-MD5, the response\n is the raw MAC, otherwise the response is prefixed with '{digest_name}',\n e.g. b'{sha256}abcdefg...'\n\n Note: The MAC protects the entire message including the digest_name prefix.\n """\n import hmac\n digest_name = _get_digest_name_and_payload(message)[0]\n # The MAC protects the entire message: digest header and payload.\n if not digest_name:\n # Legacy server without a {digest} prefix on message.\n # Generate a legacy non-prefixed HMAC-MD5 reply.\n try:\n return hmac.new(authkey, message, 'md5').digest()\n except ValueError:\n # HMAC-MD5 is not available (FIPS mode?), fall back to\n # HMAC-SHA2-256 modern protocol. The legacy server probably\n # doesn't support it and will reject us anyways. :shrug:\n digest_name = 'sha256'\n # Modern protocol, indicate the digest used in the reply.\n response = hmac.new(authkey, message, digest_name).digest()\n return b'{%s}%s' % (digest_name.encode('ascii'), response)\n\n\ndef _verify_challenge(authkey, message, response):\n """Verify MAC challenge\n\n If our message did not include a digest_name prefix, the client is allowed\n to select a stronger digest_name from _ALLOWED_DIGESTS.\n\n In case our message is prefixed, a client cannot downgrade to a weaker\n algorithm, because the MAC is calculated over the entire message\n including the '{digest_name}' prefix.\n """\n import hmac\n response_digest, response_mac = _get_digest_name_and_payload(response)\n response_digest = response_digest or 'md5'\n try:\n expected = hmac.new(authkey, message, response_digest).digest()\n except ValueError:\n raise AuthenticationError(f'{response_digest=} unsupported')\n if len(expected) != len(response_mac):\n raise AuthenticationError(\n f'expected {response_digest!r} of length {len(expected)} '\n f'got {len(response_mac)}')\n if not hmac.compare_digest(expected, response_mac):\n raise AuthenticationError('digest received was wrong')\n\n\ndef deliver_challenge(connection, authkey: bytes, digest_name='sha256'):\n if not isinstance(authkey, bytes):\n raise ValueError(\n "Authkey must be bytes, not {0!s}".format(type(authkey)))\n assert MESSAGE_LENGTH > _MD5ONLY_MESSAGE_LENGTH, "protocol constraint"\n message = os.urandom(MESSAGE_LENGTH)\n message = b'{%s}%s' % (digest_name.encode('ascii'), message)\n # Even when sending a challenge to a legacy client that does not support\n # digest prefixes, they'll take the entire thing as a challenge and\n # respond to it with a raw HMAC-MD5.\n connection.send_bytes(_CHALLENGE + message)\n response = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message\n try:\n _verify_challenge(authkey, message, response)\n except AuthenticationError:\n connection.send_bytes(_FAILURE)\n raise\n else:\n connection.send_bytes(_WELCOME)\n\n\ndef answer_challenge(connection, authkey: bytes):\n if not isinstance(authkey, bytes):\n raise ValueError(\n "Authkey must be bytes, not {0!s}".format(type(authkey)))\n message = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message\n if not message.startswith(_CHALLENGE):\n raise AuthenticationError(\n f'Protocol error, expected challenge: {message=}')\n message = message[len(_CHALLENGE):]\n if len(message) < _MD5ONLY_MESSAGE_LENGTH:\n raise AuthenticationError('challenge too short: {len(message)} bytes')\n digest = _create_response(authkey, message)\n connection.send_bytes(digest)\n response = connection.recv_bytes(MESSAGE_MAXLEN) # reject large message\n if response != _WELCOME:\n raise AuthenticationError('digest sent was rejected')\n\n#\n# Support for using xmlrpclib for serialization\n#\n\nclass ConnectionWrapper(object):\n def __init__(self, conn, dumps, loads):\n self._conn = conn\n self._dumps = dumps\n self._loads = loads\n for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):\n obj = getattr(conn, attr)\n setattr(self, attr, obj)\n def send(self, obj):\n s = self._dumps(obj)\n self._conn.send_bytes(s)\n def recv(self):\n s = self._conn.recv_bytes()\n return self._loads(s)\n\ndef _xml_dumps(obj):\n return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')\n\ndef _xml_loads(s):\n (obj,), method = xmlrpclib.loads(s.decode('utf-8'))\n return obj\n\nclass XmlListener(Listener):\n def accept(self):\n global xmlrpclib\n import xmlrpc.client as xmlrpclib\n obj = Listener.accept(self)\n return ConnectionWrapper(obj, _xml_dumps, _xml_loads)\n\ndef XmlClient(*args, **kwds):\n global xmlrpclib\n import xmlrpc.client as xmlrpclib\n return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)\n\n#\n# Wait\n#\n\nif sys.platform == 'win32':\n\n def _exhaustive_wait(handles, timeout):\n # Return ALL handles which are currently signalled. (Only\n # returning the first signalled might create starvation issues.)\n L = list(handles)\n ready = []\n while L:\n res = _winapi.WaitForMultipleObjects(L, False, timeout)\n if res == WAIT_TIMEOUT:\n break\n elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):\n res -= WAIT_OBJECT_0\n elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):\n res -= WAIT_ABANDONED_0\n else:\n raise RuntimeError('Should not get here')\n ready.append(L[res])\n L = L[res+1:]\n timeout = 0\n return ready\n\n _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}\n\n def wait(object_list, timeout=None):\n '''\n Wait till an object in object_list is ready/readable.\n\n Returns list of those objects in object_list which are ready/readable.\n '''\n if timeout is None:\n timeout = INFINITE\n elif timeout < 0:\n timeout = 0\n else:\n timeout = int(timeout * 1000 + 0.5)\n\n object_list = list(object_list)\n waithandle_to_obj = {}\n ov_list = []\n ready_objects = set()\n ready_handles = set()\n\n try:\n for o in object_list:\n try:\n fileno = getattr(o, 'fileno')\n except AttributeError:\n waithandle_to_obj[o.__index__()] = o\n else:\n # start an overlapped read of length zero\n try:\n ov, err = _winapi.ReadFile(fileno(), 0, True)\n except OSError as e:\n ov, err = None, e.winerror\n if err not in _ready_errors:\n raise\n if err == _winapi.ERROR_IO_PENDING:\n ov_list.append(ov)\n waithandle_to_obj[ov.event] = o\n else:\n # If o.fileno() is an overlapped pipe handle and\n # err == 0 then there is a zero length message\n # in the pipe, but it HAS NOT been consumed...\n if ov and sys.getwindowsversion()[:2] >= (6, 2):\n # ... except on Windows 8 and later, where\n # the message HAS been consumed.\n try:\n _, err = ov.GetOverlappedResult(False)\n except OSError as e:\n err = e.winerror\n if not err and hasattr(o, '_got_empty_message'):\n o._got_empty_message = True\n ready_objects.add(o)\n timeout = 0\n\n ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)\n finally:\n # request that overlapped reads stop\n for ov in ov_list:\n ov.cancel()\n\n # wait for all overlapped reads to stop\n for ov in ov_list:\n try:\n _, err = ov.GetOverlappedResult(True)\n except OSError as e:\n err = e.winerror\n if err not in _ready_errors:\n raise\n if err != _winapi.ERROR_OPERATION_ABORTED:\n o = waithandle_to_obj[ov.event]\n ready_objects.add(o)\n if err == 0:\n # If o.fileno() is an overlapped pipe handle then\n # a zero length message HAS been consumed.\n if hasattr(o, '_got_empty_message'):\n o._got_empty_message = True\n\n ready_objects.update(waithandle_to_obj[h] for h in ready_handles)\n return [o for o in object_list if o in ready_objects]\n\nelse:\n\n import selectors\n\n # poll/select have the advantage of not requiring any extra file\n # descriptor, contrarily to epoll/kqueue (also, they require a single\n # syscall).\n if hasattr(selectors, 'PollSelector'):\n _WaitSelector = selectors.PollSelector\n else:\n _WaitSelector = selectors.SelectSelector\n\n def wait(object_list, timeout=None):\n '''\n Wait till an object in object_list is ready/readable.\n\n Returns list of those objects in object_list which are ready/readable.\n '''\n with _WaitSelector() as selector:\n for obj in object_list:\n selector.register(obj, selectors.EVENT_READ)\n\n if timeout is not None:\n deadline = getattr(time,'monotonic',time.time)() + timeout\n\n while True:\n ready = selector.select(timeout)\n if ready:\n return [key.fileobj for (key, events) in ready]\n else:\n if timeout is not None:\n timeout = deadline - getattr(time,'monotonic',time.time)()\n if timeout < 0:\n return ready\n\n#\n# Make connection and socket objects shareable if possible\n#\n\nif sys.platform == 'win32':\n def reduce_connection(conn):\n handle = conn.fileno()\n with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:\n from . import resource_sharer\n ds = resource_sharer.DupSocket(s)\n return rebuild_connection, (ds, conn.readable, conn.writable)\n def rebuild_connection(ds, readable, writable):\n sock = ds.detach()\n return Connection(sock.detach(), readable, writable)\n reduction.register(Connection, reduce_connection)\n\n def reduce_pipe_connection(conn):\n access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |\n (_winapi.FILE_GENERIC_WRITE if conn.writable else 0))\n dh = reduction.DupHandle(conn.fileno(), access)\n return rebuild_pipe_connection, (dh, conn.readable, conn.writable)\n def rebuild_pipe_connection(dh, readable, writable):\n handle = dh.detach()\n return PipeConnection(handle, readable, writable)\n reduction.register(PipeConnection, reduce_pipe_connection)\n\nelse:\n def reduce_connection(conn):\n df = reduction.DupFd(conn.fileno())\n return rebuild_connection, (df, conn.readable, conn.writable)\n def rebuild_connection(df, readable, writable):\n fd = df.detach()\n return Connection(fd, readable, writable)\n reduction.register(Connection, reduce_connection)\n | .venv\Lib\site-packages\multiprocess\connection.py | connection.py | Python | 41,577 | 0.95 | 0.203218 | 0.17607 | python-kit | 846 | 2024-07-15T10:26:09.826234 | Apache-2.0 | false | 59b44a3c7f930da27303a472abb8ee1a |
import os\nimport sys\nimport threading\n\nfrom . import process\nfrom . import reduction\n\n__all__ = ()\n\n#\n# Exceptions\n#\n\nclass ProcessError(Exception):\n pass\n\nclass BufferTooShort(ProcessError):\n pass\n\nclass TimeoutError(ProcessError):\n pass\n\nclass AuthenticationError(ProcessError):\n pass\n\n#\n# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py\n#\n\nclass BaseContext(object):\n\n ProcessError = ProcessError\n BufferTooShort = BufferTooShort\n TimeoutError = TimeoutError\n AuthenticationError = AuthenticationError\n\n current_process = staticmethod(process.current_process)\n parent_process = staticmethod(process.parent_process)\n active_children = staticmethod(process.active_children)\n\n def cpu_count(self):\n '''Returns the number of CPUs in the system'''\n num = os.cpu_count()\n if num is None:\n raise NotImplementedError('cannot determine number of cpus')\n else:\n return num\n\n def Manager(self):\n '''Returns a manager associated with a running server process\n\n The managers methods such as `Lock()`, `Condition()` and `Queue()`\n can be used to create shared objects.\n '''\n from .managers import SyncManager\n m = SyncManager(ctx=self.get_context())\n m.start()\n return m\n\n def Pipe(self, duplex=True):\n '''Returns two connection object connected by a pipe'''\n from .connection import Pipe\n return Pipe(duplex)\n\n def Lock(self):\n '''Returns a non-recursive lock object'''\n from .synchronize import Lock\n return Lock(ctx=self.get_context())\n\n def RLock(self):\n '''Returns a recursive lock object'''\n from .synchronize import RLock\n return RLock(ctx=self.get_context())\n\n def Condition(self, lock=None):\n '''Returns a condition object'''\n from .synchronize import Condition\n return Condition(lock, ctx=self.get_context())\n\n def Semaphore(self, value=1):\n '''Returns a semaphore object'''\n from .synchronize import Semaphore\n return Semaphore(value, ctx=self.get_context())\n\n def BoundedSemaphore(self, value=1):\n '''Returns a bounded semaphore object'''\n from .synchronize import BoundedSemaphore\n return BoundedSemaphore(value, ctx=self.get_context())\n\n def Event(self):\n '''Returns an event object'''\n from .synchronize import Event\n return Event(ctx=self.get_context())\n\n def Barrier(self, parties, action=None, timeout=None):\n '''Returns a barrier object'''\n from .synchronize import Barrier\n return Barrier(parties, action, timeout, ctx=self.get_context())\n\n def Queue(self, maxsize=0):\n '''Returns a queue object'''\n from .queues import Queue\n return Queue(maxsize, ctx=self.get_context())\n\n def JoinableQueue(self, maxsize=0):\n '''Returns a queue object'''\n from .queues import JoinableQueue\n return JoinableQueue(maxsize, ctx=self.get_context())\n\n def SimpleQueue(self):\n '''Returns a queue object'''\n from .queues import SimpleQueue\n return SimpleQueue(ctx=self.get_context())\n\n def Pool(self, processes=None, initializer=None, initargs=(),\n maxtasksperchild=None):\n '''Returns a process pool object'''\n from .pool import Pool\n return Pool(processes, initializer, initargs, maxtasksperchild,\n context=self.get_context())\n\n def RawValue(self, typecode_or_type, *args):\n '''Returns a shared object'''\n from .sharedctypes import RawValue\n return RawValue(typecode_or_type, *args)\n\n def RawArray(self, typecode_or_type, size_or_initializer):\n '''Returns a shared array'''\n from .sharedctypes import RawArray\n return RawArray(typecode_or_type, size_or_initializer)\n\n def Value(self, typecode_or_type, *args, lock=True):\n '''Returns a synchronized shared object'''\n from .sharedctypes import Value\n return Value(typecode_or_type, *args, lock=lock,\n ctx=self.get_context())\n\n def Array(self, typecode_or_type, size_or_initializer, *, lock=True):\n '''Returns a synchronized shared array'''\n from .sharedctypes import Array\n return Array(typecode_or_type, size_or_initializer, lock=lock,\n ctx=self.get_context())\n\n def freeze_support(self):\n '''Check whether this is a fake forked process in a frozen executable.\n If so then run code specified by commandline and exit.\n '''\n if sys.platform == 'win32' and getattr(sys, 'frozen', False):\n from .spawn import freeze_support\n freeze_support()\n\n def get_logger(self):\n '''Return package logger -- if it does not already exist then\n it is created.\n '''\n from .util import get_logger\n return get_logger()\n\n def log_to_stderr(self, level=None):\n '''Turn on logging and add a handler which prints to stderr'''\n from .util import log_to_stderr\n return log_to_stderr(level)\n\n def allow_connection_pickling(self):\n '''Install support for sending connections and sockets\n between processes\n '''\n # This is undocumented. In previous versions of multiprocessing\n # its only effect was to make socket objects inheritable on Windows.\n from . import connection\n\n def set_executable(self, executable):\n '''Sets the path to a python.exe or pythonw.exe binary used to run\n child processes instead of sys.executable when using the 'spawn'\n start method. Useful for people embedding Python.\n '''\n from .spawn import set_executable\n set_executable(executable)\n\n def set_forkserver_preload(self, module_names):\n '''Set list of module names to try to load in forkserver process.\n This is really just a hint.\n '''\n from .forkserver import set_forkserver_preload\n set_forkserver_preload(module_names)\n\n def get_context(self, method=None):\n if method is None:\n return self\n try:\n ctx = _concrete_contexts[method]\n except KeyError:\n raise ValueError('cannot find context for %r' % method) from None\n ctx._check_available()\n return ctx\n\n def get_start_method(self, allow_none=False):\n return self._name\n\n def set_start_method(self, method, force=False):\n raise ValueError('cannot set start method of concrete context')\n\n @property\n def reducer(self):\n '''Controls how objects will be reduced to a form that can be\n shared with other processes.'''\n return globals().get('reduction')\n\n @reducer.setter\n def reducer(self, reduction):\n globals()['reduction'] = reduction\n\n def _check_available(self):\n pass\n\n#\n# Type of default context -- underlying context can be set at most once\n#\n\nclass Process(process.BaseProcess):\n _start_method = None\n @staticmethod\n def _Popen(process_obj):\n return _default_context.get_context().Process._Popen(process_obj)\n\n @staticmethod\n def _after_fork():\n return _default_context.get_context().Process._after_fork()\n\nclass DefaultContext(BaseContext):\n Process = Process\n\n def __init__(self, context):\n self._default_context = context\n self._actual_context = None\n\n def get_context(self, method=None):\n if method is None:\n if self._actual_context is None:\n self._actual_context = self._default_context\n return self._actual_context\n else:\n return super().get_context(method)\n\n def set_start_method(self, method, force=False):\n if self._actual_context is not None and not force:\n raise RuntimeError('context has already been set')\n if method is None and force:\n self._actual_context = None\n return\n self._actual_context = self.get_context(method)\n\n def get_start_method(self, allow_none=False):\n if self._actual_context is None:\n if allow_none:\n return None\n self._actual_context = self._default_context\n return self._actual_context._name\n\n def get_all_start_methods(self):\n """Returns a list of the supported start methods, default first."""\n if sys.platform == 'win32':\n return ['spawn']\n else:\n methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn']\n if reduction.HAVE_SEND_HANDLE:\n methods.append('forkserver')\n return methods\n\n\n#\n# Context types for fixed start method\n#\n\nif sys.platform != 'win32':\n\n class ForkProcess(process.BaseProcess):\n _start_method = 'fork'\n @staticmethod\n def _Popen(process_obj):\n from .popen_fork import Popen\n return Popen(process_obj)\n\n class SpawnProcess(process.BaseProcess):\n _start_method = 'spawn'\n @staticmethod\n def _Popen(process_obj):\n from .popen_spawn_posix import Popen\n return Popen(process_obj)\n\n @staticmethod\n def _after_fork():\n # process is spawned, nothing to do\n pass\n\n class ForkServerProcess(process.BaseProcess):\n _start_method = 'forkserver'\n @staticmethod\n def _Popen(process_obj):\n from .popen_forkserver import Popen\n return Popen(process_obj)\n\n class ForkContext(BaseContext):\n _name = 'fork'\n Process = ForkProcess\n\n class SpawnContext(BaseContext):\n _name = 'spawn'\n Process = SpawnProcess\n\n class ForkServerContext(BaseContext):\n _name = 'forkserver'\n Process = ForkServerProcess\n def _check_available(self):\n if not reduction.HAVE_SEND_HANDLE:\n raise ValueError('forkserver start method not available')\n\n _concrete_contexts = {\n 'fork': ForkContext(),\n 'spawn': SpawnContext(),\n 'forkserver': ForkServerContext(),\n }\n if sys.platform == 'darwin':\n # bpo-33725: running arbitrary code after fork() is no longer reliable\n # on macOS since macOS 10.14 (Mojave). Use spawn by default instead.\n _default_context = DefaultContext(_concrete_contexts['fork']) #FIXME: spawn\n else:\n _default_context = DefaultContext(_concrete_contexts['fork'])\n\nelse:\n\n class SpawnProcess(process.BaseProcess):\n _start_method = 'spawn'\n @staticmethod\n def _Popen(process_obj):\n from .popen_spawn_win32 import Popen\n return Popen(process_obj)\n\n @staticmethod\n def _after_fork():\n # process is spawned, nothing to do\n pass\n\n class SpawnContext(BaseContext):\n _name = 'spawn'\n Process = SpawnProcess\n\n _concrete_contexts = {\n 'spawn': SpawnContext(),\n }\n _default_context = DefaultContext(_concrete_contexts['spawn'])\n\n#\n# Force the start method\n#\n\ndef _force_start_method(method):\n _default_context._actual_context = _concrete_contexts[method]\n\n#\n# Check that the current thread is spawning a child process\n#\n\n_tls = threading.local()\n\ndef get_spawning_popen():\n return getattr(_tls, 'spawning_popen', None)\n\ndef set_spawning_popen(popen):\n _tls.spawning_popen = popen\n\ndef assert_spawning(obj):\n if get_spawning_popen() is None:\n raise RuntimeError(\n '%s objects should only be shared between processes'\n ' through inheritance' % type(obj).__name__\n )\n | .venv\Lib\site-packages\multiprocess\context.py | context.py | Python | 11,686 | 0.95 | 0.230769 | 0.079208 | node-utils | 162 | 2024-02-03T12:15:19.808462 | Apache-2.0 | false | b76d690801082a7d93e02e86a1b43e83 |
import errno\nimport os\nimport selectors\nimport signal\nimport socket\nimport struct\nimport sys\nimport threading\nimport warnings\n\nfrom . import connection\nfrom . import process\nfrom .context import reduction\nfrom . import resource_tracker\nfrom . import spawn\nfrom . import util\n\n__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',\n 'set_forkserver_preload']\n\n#\n#\n#\n\nMAXFDS_TO_SEND = 256\nSIGNED_STRUCT = struct.Struct('q') # large enough for pid_t\n\n#\n# Forkserver class\n#\n\nclass ForkServer(object):\n\n def __init__(self):\n self._forkserver_address = None\n self._forkserver_alive_fd = None\n self._forkserver_pid = None\n self._inherited_fds = None\n self._lock = threading.Lock()\n self._preload_modules = ['__main__']\n\n def _stop(self):\n # Method used by unit tests to stop the server\n with self._lock:\n self._stop_unlocked()\n\n def _stop_unlocked(self):\n if self._forkserver_pid is None:\n return\n\n # close the "alive" file descriptor asks the server to stop\n os.close(self._forkserver_alive_fd)\n self._forkserver_alive_fd = None\n\n os.waitpid(self._forkserver_pid, 0)\n self._forkserver_pid = None\n\n if not util.is_abstract_socket_namespace(self._forkserver_address):\n os.unlink(self._forkserver_address)\n self._forkserver_address = None\n\n def set_forkserver_preload(self, modules_names):\n '''Set list of module names to try to load in forkserver process.'''\n if not all(type(mod) is str for mod in modules_names):\n raise TypeError('module_names must be a list of strings')\n self._preload_modules = modules_names\n\n def get_inherited_fds(self):\n '''Return list of fds inherited from parent process.\n\n This returns None if the current process was not started by fork\n server.\n '''\n return self._inherited_fds\n\n def connect_to_new_process(self, fds):\n '''Request forkserver to create a child process.\n\n Returns a pair of fds (status_r, data_w). The calling process can read\n the child process's pid and (eventually) its returncode from status_r.\n The calling process should write to data_w the pickled preparation and\n process data.\n '''\n self.ensure_running()\n if len(fds) + 4 >= MAXFDS_TO_SEND:\n raise ValueError('too many fds')\n with socket.socket(socket.AF_UNIX) as client:\n client.connect(self._forkserver_address)\n parent_r, child_w = os.pipe()\n child_r, parent_w = os.pipe()\n allfds = [child_r, child_w, self._forkserver_alive_fd,\n resource_tracker.getfd()]\n allfds += fds\n try:\n reduction.sendfds(client, allfds)\n return parent_r, parent_w\n except:\n os.close(parent_r)\n os.close(parent_w)\n raise\n finally:\n os.close(child_r)\n os.close(child_w)\n\n def ensure_running(self):\n '''Make sure that a fork server is running.\n\n This can be called from any process. Note that usually a child\n process will just reuse the forkserver started by its parent, so\n ensure_running() will do nothing.\n '''\n with self._lock:\n resource_tracker.ensure_running()\n if self._forkserver_pid is not None:\n # forkserver was launched before, is it still running?\n pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG)\n if not pid:\n # still alive\n return\n # dead, launch it again\n os.close(self._forkserver_alive_fd)\n self._forkserver_address = None\n self._forkserver_alive_fd = None\n self._forkserver_pid = None\n\n cmd = ('from multiprocess.forkserver import main; ' +\n 'main(%d, %d, %r, **%r)')\n\n if self._preload_modules:\n desired_keys = {'main_path', 'sys_path'}\n data = spawn.get_preparation_data('ignore')\n data = {x: y for x, y in data.items() if x in desired_keys}\n else:\n data = {}\n\n with socket.socket(socket.AF_UNIX) as listener:\n address = connection.arbitrary_address('AF_UNIX')\n listener.bind(address)\n if not util.is_abstract_socket_namespace(address):\n os.chmod(address, 0o600)\n listener.listen()\n\n # all client processes own the write end of the "alive" pipe;\n # when they all terminate the read end becomes ready.\n alive_r, alive_w = os.pipe()\n try:\n fds_to_pass = [listener.fileno(), alive_r]\n cmd %= (listener.fileno(), alive_r, self._preload_modules,\n data)\n exe = spawn.get_executable()\n args = [exe] + util._args_from_interpreter_flags()\n args += ['-c', cmd]\n pid = util.spawnv_passfds(exe, args, fds_to_pass)\n except:\n os.close(alive_w)\n raise\n finally:\n os.close(alive_r)\n self._forkserver_address = address\n self._forkserver_alive_fd = alive_w\n self._forkserver_pid = pid\n\n#\n#\n#\n\ndef main(listener_fd, alive_r, preload, main_path=None, sys_path=None):\n '''Run forkserver.'''\n if preload:\n if '__main__' in preload and main_path is not None:\n process.current_process()._inheriting = True\n try:\n spawn.import_main_path(main_path)\n finally:\n del process.current_process()._inheriting\n for modname in preload:\n try:\n __import__(modname)\n except ImportError:\n pass\n\n util._close_stdin()\n\n sig_r, sig_w = os.pipe()\n os.set_blocking(sig_r, False)\n os.set_blocking(sig_w, False)\n\n def sigchld_handler(*_unused):\n # Dummy signal handler, doesn't do anything\n pass\n\n handlers = {\n # unblocking SIGCHLD allows the wakeup fd to notify our event loop\n signal.SIGCHLD: sigchld_handler,\n # protect the process from ^C\n signal.SIGINT: signal.SIG_IGN,\n }\n old_handlers = {sig: signal.signal(sig, val)\n for (sig, val) in handlers.items()}\n\n # calling os.write() in the Python signal handler is racy\n signal.set_wakeup_fd(sig_w)\n\n # map child pids to client fds\n pid_to_fd = {}\n\n with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \\n selectors.DefaultSelector() as selector:\n _forkserver._forkserver_address = listener.getsockname()\n\n selector.register(listener, selectors.EVENT_READ)\n selector.register(alive_r, selectors.EVENT_READ)\n selector.register(sig_r, selectors.EVENT_READ)\n\n while True:\n try:\n while True:\n rfds = [key.fileobj for (key, events) in selector.select()]\n if rfds:\n break\n\n if alive_r in rfds:\n # EOF because no more client processes left\n assert os.read(alive_r, 1) == b'', "Not at EOF?"\n raise SystemExit\n\n if sig_r in rfds:\n # Got SIGCHLD\n os.read(sig_r, 65536) # exhaust\n while True:\n # Scan for child processes\n try:\n pid, sts = os.waitpid(-1, os.WNOHANG)\n except ChildProcessError:\n break\n if pid == 0:\n break\n child_w = pid_to_fd.pop(pid, None)\n if child_w is not None:\n returncode = os.waitstatus_to_exitcode(sts)\n # Send exit code to client process\n try:\n write_signed(child_w, returncode)\n except BrokenPipeError:\n # client vanished\n pass\n os.close(child_w)\n else:\n # This shouldn't happen really\n warnings.warn('forkserver: waitpid returned '\n 'unexpected pid %d' % pid)\n\n if listener in rfds:\n # Incoming fork request\n with listener.accept()[0] as s:\n # Receive fds from client\n fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)\n if len(fds) > MAXFDS_TO_SEND:\n raise RuntimeError(\n "Too many ({0:n}) fds to send".format(\n len(fds)))\n child_r, child_w, *fds = fds\n s.close()\n pid = os.fork()\n if pid == 0:\n # Child\n code = 1\n try:\n listener.close()\n selector.close()\n unused_fds = [alive_r, child_w, sig_r, sig_w]\n unused_fds.extend(pid_to_fd.values())\n code = _serve_one(child_r, fds,\n unused_fds,\n old_handlers)\n except Exception:\n sys.excepthook(*sys.exc_info())\n sys.stderr.flush()\n finally:\n os._exit(code)\n else:\n # Send pid to client process\n try:\n write_signed(child_w, pid)\n except BrokenPipeError:\n # client vanished\n pass\n pid_to_fd[pid] = child_w\n os.close(child_r)\n for fd in fds:\n os.close(fd)\n\n except OSError as e:\n if e.errno != errno.ECONNABORTED:\n raise\n\n\ndef _serve_one(child_r, fds, unused_fds, handlers):\n # close unnecessary stuff and reset signal handlers\n signal.set_wakeup_fd(-1)\n for sig, val in handlers.items():\n signal.signal(sig, val)\n for fd in unused_fds:\n os.close(fd)\n\n (_forkserver._forkserver_alive_fd,\n resource_tracker._resource_tracker._fd,\n *_forkserver._inherited_fds) = fds\n\n # Run process object received over pipe\n parent_sentinel = os.dup(child_r)\n code = spawn._main(child_r, parent_sentinel)\n\n return code\n\n\n#\n# Read and write signed numbers\n#\n\ndef read_signed(fd):\n data = b''\n length = SIGNED_STRUCT.size\n while len(data) < length:\n s = os.read(fd, length - len(data))\n if not s:\n raise EOFError('unexpected EOF')\n data += s\n return SIGNED_STRUCT.unpack(data)[0]\n\ndef write_signed(fd, n):\n msg = SIGNED_STRUCT.pack(n)\n while msg:\n nbytes = os.write(fd, msg)\n if nbytes == 0:\n raise RuntimeError('should not get here')\n msg = msg[nbytes:]\n\n#\n#\n#\n\n_forkserver = ForkServer()\nensure_running = _forkserver.ensure_running\nget_inherited_fds = _forkserver.get_inherited_fds\nconnect_to_new_process = _forkserver.connect_to_new_process\nset_forkserver_preload = _forkserver.set_forkserver_preload\n | .venv\Lib\site-packages\multiprocess\forkserver.py | forkserver.py | Python | 12,130 | 0.95 | 0.178674 | 0.137584 | python-kit | 768 | 2024-08-02T10:40:33.551082 | MIT | false | f14a30149ba56af3b303bd7b5c4f0ff4 |
#\n# Module which supports allocation of memory from an mmap\n#\n# multiprocessing/heap.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\nimport bisect\nfrom collections import defaultdict\nimport mmap\nimport os\nimport sys\nimport tempfile\nimport threading\n\nfrom .context import reduction, assert_spawning\nfrom . import util\n\n__all__ = ['BufferWrapper']\n\n#\n# Inheritable class which wraps an mmap, and from which blocks can be allocated\n#\n\nif sys.platform == 'win32':\n\n import _winapi\n\n class Arena(object):\n """\n A shared memory area backed by anonymous memory (Windows).\n """\n\n _rand = tempfile._RandomNameSequence()\n\n def __init__(self, size):\n self.size = size\n for i in range(100):\n name = 'pym-%d-%s' % (os.getpid(), next(self._rand))\n buf = mmap.mmap(-1, size, tagname=name)\n if _winapi.GetLastError() == 0:\n break\n # We have reopened a preexisting mmap.\n buf.close()\n else:\n raise FileExistsError('Cannot find name for new mmap')\n self.name = name\n self.buffer = buf\n self._state = (self.size, self.name)\n\n def __getstate__(self):\n assert_spawning(self)\n return self._state\n\n def __setstate__(self, state):\n self.size, self.name = self._state = state\n # Reopen existing mmap\n self.buffer = mmap.mmap(-1, self.size, tagname=self.name)\n # XXX Temporarily preventing buildbot failures while determining\n # XXX the correct long-term fix. See issue 23060\n #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS\n\nelse:\n\n class Arena(object):\n """\n A shared memory area backed by a temporary file (POSIX).\n """\n\n if sys.platform == 'linux':\n _dir_candidates = ['/dev/shm']\n else:\n _dir_candidates = []\n\n def __init__(self, size, fd=-1):\n self.size = size\n self.fd = fd\n if fd == -1:\n # Arena is created anew (if fd != -1, it means we're coming\n # from rebuild_arena() below)\n self.fd, name = tempfile.mkstemp(\n prefix='pym-%d-'%os.getpid(),\n dir=self._choose_dir(size))\n os.unlink(name)\n util.Finalize(self, os.close, (self.fd,))\n os.ftruncate(self.fd, size)\n self.buffer = mmap.mmap(self.fd, self.size)\n\n def _choose_dir(self, size):\n # Choose a non-storage backed directory if possible,\n # to improve performance\n for d in self._dir_candidates:\n st = os.statvfs(d)\n if st.f_bavail * st.f_frsize >= size: # enough free space?\n return d\n return util.get_temp_dir()\n\n def reduce_arena(a):\n if a.fd == -1:\n raise ValueError('Arena is unpicklable because '\n 'forking was enabled when it was created')\n return rebuild_arena, (a.size, reduction.DupFd(a.fd))\n\n def rebuild_arena(size, dupfd):\n return Arena(size, dupfd.detach())\n\n reduction.register(Arena, reduce_arena)\n\n#\n# Class allowing allocation of chunks of memory from arenas\n#\n\nclass Heap(object):\n\n # Minimum malloc() alignment\n _alignment = 8\n\n _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB\n _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2\n\n def __init__(self, size=mmap.PAGESIZE):\n self._lastpid = os.getpid()\n self._lock = threading.Lock()\n # Current arena allocation size\n self._size = size\n # A sorted list of available block sizes in arenas\n self._lengths = []\n\n # Free block management:\n # - map each block size to a list of `(Arena, start, stop)` blocks\n self._len_to_seq = {}\n # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block\n # starting at that offset\n self._start_to_block = {}\n # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block\n # ending at that offset\n self._stop_to_block = {}\n\n # Map arenas to their `(Arena, start, stop)` blocks in use\n self._allocated_blocks = defaultdict(set)\n self._arenas = []\n\n # List of pending blocks to free - see comment in free() below\n self._pending_free_blocks = []\n\n # Statistics\n self._n_mallocs = 0\n self._n_frees = 0\n\n @staticmethod\n def _roundup(n, alignment):\n # alignment must be a power of 2\n mask = alignment - 1\n return (n + mask) & ~mask\n\n def _new_arena(self, size):\n # Create a new arena with at least the given *size*\n length = self._roundup(max(self._size, size), mmap.PAGESIZE)\n # We carve larger and larger arenas, for efficiency, until we\n # reach a large-ish size (roughly L3 cache-sized)\n if self._size < self._DOUBLE_ARENA_SIZE_UNTIL:\n self._size *= 2\n util.info('allocating a new mmap of length %d', length)\n arena = Arena(length)\n self._arenas.append(arena)\n return (arena, 0, length)\n\n def _discard_arena(self, arena):\n # Possibly delete the given (unused) arena\n length = arena.size\n # Reusing an existing arena is faster than creating a new one, so\n # we only reclaim space if it's large enough.\n if length < self._DISCARD_FREE_SPACE_LARGER_THAN:\n return\n blocks = self._allocated_blocks.pop(arena)\n assert not blocks\n del self._start_to_block[(arena, 0)]\n del self._stop_to_block[(arena, length)]\n self._arenas.remove(arena)\n seq = self._len_to_seq[length]\n seq.remove((arena, 0, length))\n if not seq:\n del self._len_to_seq[length]\n self._lengths.remove(length)\n\n def _malloc(self, size):\n # returns a large enough block -- it might be much larger\n i = bisect.bisect_left(self._lengths, size)\n if i == len(self._lengths):\n return self._new_arena(size)\n else:\n length = self._lengths[i]\n seq = self._len_to_seq[length]\n block = seq.pop()\n if not seq:\n del self._len_to_seq[length], self._lengths[i]\n\n (arena, start, stop) = block\n del self._start_to_block[(arena, start)]\n del self._stop_to_block[(arena, stop)]\n return block\n\n def _add_free_block(self, block):\n # make block available and try to merge with its neighbours in the arena\n (arena, start, stop) = block\n\n try:\n prev_block = self._stop_to_block[(arena, start)]\n except KeyError:\n pass\n else:\n start, _ = self._absorb(prev_block)\n\n try:\n next_block = self._start_to_block[(arena, stop)]\n except KeyError:\n pass\n else:\n _, stop = self._absorb(next_block)\n\n block = (arena, start, stop)\n length = stop - start\n\n try:\n self._len_to_seq[length].append(block)\n except KeyError:\n self._len_to_seq[length] = [block]\n bisect.insort(self._lengths, length)\n\n self._start_to_block[(arena, start)] = block\n self._stop_to_block[(arena, stop)] = block\n\n def _absorb(self, block):\n # deregister this block so it can be merged with a neighbour\n (arena, start, stop) = block\n del self._start_to_block[(arena, start)]\n del self._stop_to_block[(arena, stop)]\n\n length = stop - start\n seq = self._len_to_seq[length]\n seq.remove(block)\n if not seq:\n del self._len_to_seq[length]\n self._lengths.remove(length)\n\n return start, stop\n\n def _remove_allocated_block(self, block):\n arena, start, stop = block\n blocks = self._allocated_blocks[arena]\n blocks.remove((start, stop))\n if not blocks:\n # Arena is entirely free, discard it from this process\n self._discard_arena(arena)\n\n def _free_pending_blocks(self):\n # Free all the blocks in the pending list - called with the lock held.\n while True:\n try:\n block = self._pending_free_blocks.pop()\n except IndexError:\n break\n self._add_free_block(block)\n self._remove_allocated_block(block)\n\n def free(self, block):\n # free a block returned by malloc()\n # Since free() can be called asynchronously by the GC, it could happen\n # that it's called while self._lock is held: in that case,\n # self._lock.acquire() would deadlock (issue #12352). To avoid that, a\n # trylock is used instead, and if the lock can't be acquired\n # immediately, the block is added to a list of blocks to be freed\n # synchronously sometimes later from malloc() or free(), by calling\n # _free_pending_blocks() (appending and retrieving from a list is not\n # strictly thread-safe but under CPython it's atomic thanks to the GIL).\n if os.getpid() != self._lastpid:\n raise ValueError(\n "My pid ({0:n}) is not last pid {1:n}".format(\n os.getpid(),self._lastpid))\n if not self._lock.acquire(False):\n # can't acquire the lock right now, add the block to the list of\n # pending blocks to free\n self._pending_free_blocks.append(block)\n else:\n # we hold the lock\n try:\n self._n_frees += 1\n self._free_pending_blocks()\n self._add_free_block(block)\n self._remove_allocated_block(block)\n finally:\n self._lock.release()\n\n def malloc(self, size):\n # return a block of right size (possibly rounded up)\n if size < 0:\n raise ValueError("Size {0:n} out of range".format(size))\n if sys.maxsize <= size:\n raise OverflowError("Size {0:n} too large".format(size))\n if os.getpid() != self._lastpid:\n self.__init__() # reinitialize after fork\n with self._lock:\n self._n_mallocs += 1\n # allow pending blocks to be marked available\n self._free_pending_blocks()\n size = self._roundup(max(size, 1), self._alignment)\n (arena, start, stop) = self._malloc(size)\n real_stop = start + size\n if real_stop < stop:\n # if the returned block is larger than necessary, mark\n # the remainder available\n self._add_free_block((arena, real_stop, stop))\n self._allocated_blocks[arena].add((start, real_stop))\n return (arena, start, real_stop)\n\n#\n# Class wrapping a block allocated out of a Heap -- can be inherited by child process\n#\n\nclass BufferWrapper(object):\n\n _heap = Heap()\n\n def __init__(self, size):\n if size < 0:\n raise ValueError("Size {0:n} out of range".format(size))\n if sys.maxsize <= size:\n raise OverflowError("Size {0:n} too large".format(size))\n block = BufferWrapper._heap.malloc(size)\n self._state = (block, size)\n util.Finalize(self, BufferWrapper._heap.free, args=(block,))\n\n def create_memoryview(self):\n (arena, start, stop), size = self._state\n return memoryview(arena.buffer)[start:start+size]\n | .venv\Lib\site-packages\multiprocess\heap.py | heap.py | Python | 11,626 | 0.95 | 0.189911 | 0.230769 | python-kit | 955 | 2024-06-22T10:10:21.429119 | BSD-3-Clause | false | 399af6bd83731f60608f3658869d4f3b |
#\n# Module providing manager classes for dealing\n# with shared objects\n#\n# multiprocessing/managers.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\n__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]\n\n#\n# Imports\n#\n\nimport sys\nimport threading\nimport signal\nimport array\nimport queue\nimport time\nimport types\nimport os\nfrom os import getpid\n\nfrom traceback import format_exc\n\nfrom . import connection\nfrom .context import reduction, get_spawning_popen, ProcessError\nfrom . import pool\nfrom . import process\nfrom . import util\nfrom . import get_context\ntry:\n from . import shared_memory\nexcept ImportError:\n HAS_SHMEM = False\nelse:\n HAS_SHMEM = True\n __all__.append('SharedMemoryManager')\n\n#\n# Register some things for pickling\n#\n\ndef reduce_array(a):\n return array.array, (a.typecode, a.tobytes())\nreduction.register(array.array, reduce_array)\n\nview_types = [type(getattr({}, name)()) for name in ('items','keys','values')]\ndef rebuild_as_list(obj):\n return list, (list(obj),)\nfor view_type in view_types:\n reduction.register(view_type, rebuild_as_list)\ndel view_type, view_types\n\n#\n# Type for identifying shared objects\n#\n\nclass Token(object):\n '''\n Type to uniquely identify a shared object\n '''\n __slots__ = ('typeid', 'address', 'id')\n\n def __init__(self, typeid, address, id):\n (self.typeid, self.address, self.id) = (typeid, address, id)\n\n def __getstate__(self):\n return (self.typeid, self.address, self.id)\n\n def __setstate__(self, state):\n (self.typeid, self.address, self.id) = state\n\n def __repr__(self):\n return '%s(typeid=%r, address=%r, id=%r)' % \\n (self.__class__.__name__, self.typeid, self.address, self.id)\n\n#\n# Function for communication with a manager's server process\n#\n\ndef dispatch(c, id, methodname, args=(), kwds={}):\n '''\n Send a message to manager using connection `c` and return response\n '''\n c.send((id, methodname, args, kwds))\n kind, result = c.recv()\n if kind == '#RETURN':\n return result\n raise convert_to_error(kind, result)\n\ndef convert_to_error(kind, result):\n if kind == '#ERROR':\n return result\n elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):\n if not isinstance(result, str):\n raise TypeError(\n "Result {0!r} (kind '{1}') type is {2}, not str".format(\n result, kind, type(result)))\n if kind == '#UNSERIALIZABLE':\n return RemoteError('Unserializable message: %s\n' % result)\n else:\n return RemoteError(result)\n else:\n return ValueError('Unrecognized message type {!r}'.format(kind))\n\nclass RemoteError(Exception):\n def __str__(self):\n return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)\n\n#\n# Functions for finding the method names of an object\n#\n\ndef all_methods(obj):\n '''\n Return a list of names of methods of `obj`\n '''\n temp = []\n for name in dir(obj):\n func = getattr(obj, name)\n if callable(func):\n temp.append(name)\n return temp\n\ndef public_methods(obj):\n '''\n Return a list of names of methods of `obj` which do not start with '_'\n '''\n return [name for name in all_methods(obj) if name[0] != '_']\n\n#\n# Server which is run in a process controlled by a manager\n#\n\nclass Server(object):\n '''\n Server class which runs in a process controlled by a manager object\n '''\n public = ['shutdown', 'create', 'accept_connection', 'get_methods',\n 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']\n\n def __init__(self, registry, address, authkey, serializer):\n if not isinstance(authkey, bytes):\n raise TypeError(\n "Authkey {0!r} is type {1!s}, not bytes".format(\n authkey, type(authkey)))\n self.registry = registry\n self.authkey = process.AuthenticationString(authkey)\n Listener, Client = listener_client[serializer]\n\n # do authentication later\n self.listener = Listener(address=address, backlog=16)\n self.address = self.listener.address\n\n self.id_to_obj = {'0': (None, ())}\n self.id_to_refcount = {}\n self.id_to_local_proxy_obj = {}\n self.mutex = threading.Lock()\n\n def serve_forever(self):\n '''\n Run the server forever\n '''\n self.stop_event = threading.Event()\n process.current_process()._manager_server = self\n try:\n accepter = threading.Thread(target=self.accepter)\n accepter.daemon = True\n accepter.start()\n try:\n while not self.stop_event.is_set():\n self.stop_event.wait(1)\n except (KeyboardInterrupt, SystemExit):\n pass\n finally:\n if sys.stdout != sys.__stdout__: # what about stderr?\n util.debug('resetting stdout, stderr')\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n sys.exit(0)\n\n def accepter(self):\n while True:\n try:\n c = self.listener.accept()\n except OSError:\n continue\n t = threading.Thread(target=self.handle_request, args=(c,))\n t.daemon = True\n t.start()\n\n def _handle_request(self, c):\n request = None\n try:\n connection.deliver_challenge(c, self.authkey)\n connection.answer_challenge(c, self.authkey)\n request = c.recv()\n ignore, funcname, args, kwds = request\n assert funcname in self.public, '%r unrecognized' % funcname\n func = getattr(self, funcname)\n except Exception:\n msg = ('#TRACEBACK', format_exc())\n else:\n try:\n result = func(c, *args, **kwds)\n except Exception:\n msg = ('#TRACEBACK', format_exc())\n else:\n msg = ('#RETURN', result)\n\n try:\n c.send(msg)\n except Exception as e:\n try:\n c.send(('#TRACEBACK', format_exc()))\n except Exception:\n pass\n util.info('Failure to send message: %r', msg)\n util.info(' ... request was %r', request)\n util.info(' ... exception was %r', e)\n\n def handle_request(self, conn):\n '''\n Handle a new connection\n '''\n try:\n self._handle_request(conn)\n except SystemExit:\n # Server.serve_client() calls sys.exit(0) on EOF\n pass\n finally:\n conn.close()\n\n def serve_client(self, conn):\n '''\n Handle requests from the proxies in a particular process/thread\n '''\n util.debug('starting server thread to service %r',\n threading.current_thread().name)\n\n recv = conn.recv\n send = conn.send\n id_to_obj = self.id_to_obj\n\n while not self.stop_event.is_set():\n\n try:\n methodname = obj = None\n request = recv()\n ident, methodname, args, kwds = request\n try:\n obj, exposed, gettypeid = id_to_obj[ident]\n except KeyError as ke:\n try:\n obj, exposed, gettypeid = \\n self.id_to_local_proxy_obj[ident]\n except KeyError:\n raise ke\n\n if methodname not in exposed:\n raise AttributeError(\n 'method %r of %r object is not in exposed=%r' %\n (methodname, type(obj), exposed)\n )\n\n function = getattr(obj, methodname)\n\n try:\n res = function(*args, **kwds)\n except Exception as e:\n msg = ('#ERROR', e)\n else:\n typeid = gettypeid and gettypeid.get(methodname, None)\n if typeid:\n rident, rexposed = self.create(conn, typeid, res)\n token = Token(typeid, self.address, rident)\n msg = ('#PROXY', (rexposed, token))\n else:\n msg = ('#RETURN', res)\n\n except AttributeError:\n if methodname is None:\n msg = ('#TRACEBACK', format_exc())\n else:\n try:\n fallback_func = self.fallback_mapping[methodname]\n result = fallback_func(\n self, conn, ident, obj, *args, **kwds\n )\n msg = ('#RETURN', result)\n except Exception:\n msg = ('#TRACEBACK', format_exc())\n\n except EOFError:\n util.debug('got EOF -- exiting thread serving %r',\n threading.current_thread().name)\n sys.exit(0)\n\n except Exception:\n msg = ('#TRACEBACK', format_exc())\n\n try:\n try:\n send(msg)\n except Exception:\n send(('#UNSERIALIZABLE', format_exc()))\n except Exception as e:\n util.info('exception in thread serving %r',\n threading.current_thread().name)\n util.info(' ... message was %r', msg)\n util.info(' ... exception was %r', e)\n conn.close()\n sys.exit(1)\n\n def fallback_getvalue(self, conn, ident, obj):\n return obj\n\n def fallback_str(self, conn, ident, obj):\n return str(obj)\n\n def fallback_repr(self, conn, ident, obj):\n return repr(obj)\n\n fallback_mapping = {\n '__str__':fallback_str,\n '__repr__':fallback_repr,\n '#GETVALUE':fallback_getvalue\n }\n\n def dummy(self, c):\n pass\n\n def debug_info(self, c):\n '''\n Return some info --- useful to spot problems with refcounting\n '''\n # Perhaps include debug info about 'c'?\n with self.mutex:\n result = []\n keys = list(self.id_to_refcount.keys())\n keys.sort()\n for ident in keys:\n if ident != '0':\n result.append(' %s: refcount=%s\n %s' %\n (ident, self.id_to_refcount[ident],\n str(self.id_to_obj[ident][0])[:75]))\n return '\n'.join(result)\n\n def number_of_objects(self, c):\n '''\n Number of shared objects\n '''\n # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'\n return len(self.id_to_refcount)\n\n def shutdown(self, c):\n '''\n Shutdown this process\n '''\n try:\n util.debug('manager received shutdown message')\n c.send(('#RETURN', None))\n except:\n import traceback\n traceback.print_exc()\n finally:\n self.stop_event.set()\n\n def create(self, c, typeid, /, *args, **kwds):\n '''\n Create a new shared object and return its id\n '''\n with self.mutex:\n callable, exposed, method_to_typeid, proxytype = \\n self.registry[typeid]\n\n if callable is None:\n if kwds or (len(args) != 1):\n raise ValueError(\n "Without callable, must have one non-keyword argument")\n obj = args[0]\n else:\n obj = callable(*args, **kwds)\n\n if exposed is None:\n exposed = public_methods(obj)\n if method_to_typeid is not None:\n if not isinstance(method_to_typeid, dict):\n raise TypeError(\n "Method_to_typeid {0!r}: type {1!s}, not dict".format(\n method_to_typeid, type(method_to_typeid)))\n exposed = list(exposed) + list(method_to_typeid)\n\n ident = '%x' % id(obj) # convert to string because xmlrpclib\n # only has 32 bit signed integers\n util.debug('%r callable returned object with id %r', typeid, ident)\n\n self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)\n if ident not in self.id_to_refcount:\n self.id_to_refcount[ident] = 0\n\n self.incref(c, ident)\n return ident, tuple(exposed)\n\n def get_methods(self, c, token):\n '''\n Return the methods of the shared object indicated by token\n '''\n return tuple(self.id_to_obj[token.id][1])\n\n def accept_connection(self, c, name):\n '''\n Spawn a new thread to serve this connection\n '''\n threading.current_thread().name = name\n c.send(('#RETURN', None))\n self.serve_client(c)\n\n def incref(self, c, ident):\n with self.mutex:\n try:\n self.id_to_refcount[ident] += 1\n except KeyError as ke:\n # If no external references exist but an internal (to the\n # manager) still does and a new external reference is created\n # from it, restore the manager's tracking of it from the\n # previously stashed internal ref.\n if ident in self.id_to_local_proxy_obj:\n self.id_to_refcount[ident] = 1\n self.id_to_obj[ident] = \\n self.id_to_local_proxy_obj[ident]\n util.debug('Server re-enabled tracking & INCREF %r', ident)\n else:\n raise ke\n\n def decref(self, c, ident):\n if ident not in self.id_to_refcount and \\n ident in self.id_to_local_proxy_obj:\n util.debug('Server DECREF skipping %r', ident)\n return\n\n with self.mutex:\n if self.id_to_refcount[ident] <= 0:\n raise AssertionError(\n "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(\n ident, self.id_to_obj[ident],\n self.id_to_refcount[ident]))\n self.id_to_refcount[ident] -= 1\n if self.id_to_refcount[ident] == 0:\n del self.id_to_refcount[ident]\n\n if ident not in self.id_to_refcount:\n # Two-step process in case the object turns out to contain other\n # proxy objects (e.g. a managed list of managed lists).\n # Otherwise, deleting self.id_to_obj[ident] would trigger the\n # deleting of the stored value (another managed object) which would\n # in turn attempt to acquire the mutex that is already held here.\n self.id_to_obj[ident] = (None, (), None) # thread-safe\n util.debug('disposing of obj with id %r', ident)\n with self.mutex:\n del self.id_to_obj[ident]\n\n\n#\n# Class to represent state of a manager\n#\n\nclass State(object):\n __slots__ = ['value']\n INITIAL = 0\n STARTED = 1\n SHUTDOWN = 2\n\n#\n# Mapping from serializer name to Listener and Client types\n#\n\nlistener_client = { #XXX: register dill?\n 'pickle' : (connection.Listener, connection.Client),\n 'xmlrpclib' : (connection.XmlListener, connection.XmlClient)\n }\n\n#\n# Definition of BaseManager\n#\n\nclass BaseManager(object):\n '''\n Base class for managers\n '''\n _registry = {}\n _Server = Server\n\n def __init__(self, address=None, authkey=None, serializer='pickle',\n ctx=None, *, shutdown_timeout=1.0):\n if authkey is None:\n authkey = process.current_process().authkey\n self._address = address # XXX not final address if eg ('', 0)\n self._authkey = process.AuthenticationString(authkey)\n self._state = State()\n self._state.value = State.INITIAL\n self._serializer = serializer\n self._Listener, self._Client = listener_client[serializer]\n self._ctx = ctx or get_context()\n self._shutdown_timeout = shutdown_timeout\n\n def get_server(self):\n '''\n Return server object with serve_forever() method and address attribute\n '''\n if self._state.value != State.INITIAL:\n if self._state.value == State.STARTED:\n raise ProcessError("Already started server")\n elif self._state.value == State.SHUTDOWN:\n raise ProcessError("Manager has shut down")\n else:\n raise ProcessError(\n "Unknown state {!r}".format(self._state.value))\n return Server(self._registry, self._address,\n self._authkey, self._serializer)\n\n def connect(self):\n '''\n Connect manager object to the server process\n '''\n Listener, Client = listener_client[self._serializer]\n conn = Client(self._address, authkey=self._authkey)\n dispatch(conn, None, 'dummy')\n self._state.value = State.STARTED\n\n def start(self, initializer=None, initargs=()):\n '''\n Spawn a server process for this manager object\n '''\n if self._state.value != State.INITIAL:\n if self._state.value == State.STARTED:\n raise ProcessError("Already started server")\n elif self._state.value == State.SHUTDOWN:\n raise ProcessError("Manager has shut down")\n else:\n raise ProcessError(\n "Unknown state {!r}".format(self._state.value))\n\n if initializer is not None and not callable(initializer):\n raise TypeError('initializer must be a callable')\n\n # pipe over which we will retrieve address of server\n reader, writer = connection.Pipe(duplex=False)\n\n # spawn process which runs a server\n self._process = self._ctx.Process(\n target=type(self)._run_server,\n args=(self._registry, self._address, self._authkey,\n self._serializer, writer, initializer, initargs),\n )\n ident = ':'.join(str(i) for i in self._process._identity)\n self._process.name = type(self).__name__ + '-' + ident\n self._process.start()\n\n # get address of server\n writer.close()\n self._address = reader.recv()\n reader.close()\n\n # register a finalizer\n self._state.value = State.STARTED\n self.shutdown = util.Finalize(\n self, type(self)._finalize_manager,\n args=(self._process, self._address, self._authkey, self._state,\n self._Client, self._shutdown_timeout),\n exitpriority=0\n )\n\n @classmethod\n def _run_server(cls, registry, address, authkey, serializer, writer,\n initializer=None, initargs=()):\n '''\n Create a server, report its address and run it\n '''\n # bpo-36368: protect server process from KeyboardInterrupt signals\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n if initializer is not None:\n initializer(*initargs)\n\n # create server\n server = cls._Server(registry, address, authkey, serializer)\n\n # inform parent process of the server's address\n writer.send(server.address)\n writer.close()\n\n # run the manager\n util.info('manager serving at %r', server.address)\n server.serve_forever()\n\n def _create(self, typeid, /, *args, **kwds):\n '''\n Create a new shared object; return the token and exposed tuple\n '''\n assert self._state.value == State.STARTED, 'server not yet started'\n conn = self._Client(self._address, authkey=self._authkey)\n try:\n id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)\n finally:\n conn.close()\n return Token(typeid, self._address, id), exposed\n\n def join(self, timeout=None):\n '''\n Join the manager process (if it has been spawned)\n '''\n if self._process is not None:\n self._process.join(timeout)\n if not self._process.is_alive():\n self._process = None\n\n def _debug_info(self):\n '''\n Return some info about the servers shared objects and connections\n '''\n conn = self._Client(self._address, authkey=self._authkey)\n try:\n return dispatch(conn, None, 'debug_info')\n finally:\n conn.close()\n\n def _number_of_objects(self):\n '''\n Return the number of shared objects\n '''\n conn = self._Client(self._address, authkey=self._authkey)\n try:\n return dispatch(conn, None, 'number_of_objects')\n finally:\n conn.close()\n\n def __enter__(self):\n if self._state.value == State.INITIAL:\n self.start()\n if self._state.value != State.STARTED:\n if self._state.value == State.INITIAL:\n raise ProcessError("Unable to start server")\n elif self._state.value == State.SHUTDOWN:\n raise ProcessError("Manager has shut down")\n else:\n raise ProcessError(\n "Unknown state {!r}".format(self._state.value))\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.shutdown()\n\n @staticmethod\n def _finalize_manager(process, address, authkey, state, _Client,\n shutdown_timeout):\n '''\n Shutdown the manager process; will be registered as a finalizer\n '''\n if process.is_alive():\n util.info('sending shutdown message to manager')\n try:\n conn = _Client(address, authkey=authkey)\n try:\n dispatch(conn, None, 'shutdown')\n finally:\n conn.close()\n except Exception:\n pass\n\n process.join(timeout=shutdown_timeout)\n if process.is_alive():\n util.info('manager still alive')\n if hasattr(process, 'terminate'):\n util.info('trying to `terminate()` manager process')\n process.terminate()\n process.join(timeout=shutdown_timeout)\n if process.is_alive():\n util.info('manager still alive after terminate')\n process.kill()\n process.join()\n\n state.value = State.SHUTDOWN\n try:\n del BaseProxy._address_to_local[address]\n except KeyError:\n pass\n\n @property\n def address(self):\n return self._address\n\n @classmethod\n def register(cls, typeid, callable=None, proxytype=None, exposed=None,\n method_to_typeid=None, create_method=True):\n '''\n Register a typeid with the manager type\n '''\n if '_registry' not in cls.__dict__:\n cls._registry = cls._registry.copy()\n\n if proxytype is None:\n proxytype = AutoProxy\n\n exposed = exposed or getattr(proxytype, '_exposed_', None)\n\n method_to_typeid = method_to_typeid or \\n getattr(proxytype, '_method_to_typeid_', None)\n\n if method_to_typeid:\n for key, value in list(method_to_typeid.items()): # isinstance?\n assert type(key) is str, '%r is not a string' % key\n assert type(value) is str, '%r is not a string' % value\n\n cls._registry[typeid] = (\n callable, exposed, method_to_typeid, proxytype\n )\n\n if create_method:\n def temp(self, /, *args, **kwds):\n util.debug('requesting creation of a shared %r object', typeid)\n token, exp = self._create(typeid, *args, **kwds)\n proxy = proxytype(\n token, self._serializer, manager=self,\n authkey=self._authkey, exposed=exp\n )\n conn = self._Client(token.address, authkey=self._authkey)\n dispatch(conn, None, 'decref', (token.id,))\n return proxy\n temp.__name__ = typeid\n setattr(cls, typeid, temp)\n\n#\n# Subclass of set which get cleared after a fork\n#\n\nclass ProcessLocalSet(set):\n def __init__(self):\n util.register_after_fork(self, lambda obj: obj.clear())\n def __reduce__(self):\n return type(self), ()\n\n#\n# Definition of BaseProxy\n#\n\nclass BaseProxy(object):\n '''\n A base for proxies of shared objects\n '''\n _address_to_local = {}\n _mutex = util.ForkAwareThreadLock()\n\n def __init__(self, token, serializer, manager=None,\n authkey=None, exposed=None, incref=True, manager_owned=False):\n with BaseProxy._mutex:\n tls_idset = BaseProxy._address_to_local.get(token.address, None)\n if tls_idset is None:\n tls_idset = util.ForkAwareLocal(), ProcessLocalSet()\n BaseProxy._address_to_local[token.address] = tls_idset\n\n # self._tls is used to record the connection used by this\n # thread to communicate with the manager at token.address\n self._tls = tls_idset[0]\n\n # self._idset is used to record the identities of all shared\n # objects for which the current process owns references and\n # which are in the manager at token.address\n self._idset = tls_idset[1]\n\n self._token = token\n self._id = self._token.id\n self._manager = manager\n self._serializer = serializer\n self._Client = listener_client[serializer][1]\n\n # Should be set to True only when a proxy object is being created\n # on the manager server; primary use case: nested proxy objects.\n # RebuildProxy detects when a proxy is being created on the manager\n # and sets this value appropriately.\n self._owned_by_manager = manager_owned\n\n if authkey is not None:\n self._authkey = process.AuthenticationString(authkey)\n elif self._manager is not None:\n self._authkey = self._manager._authkey\n else:\n self._authkey = process.current_process().authkey\n\n if incref:\n self._incref()\n\n util.register_after_fork(self, BaseProxy._after_fork)\n\n def _connect(self):\n util.debug('making connection to manager')\n name = process.current_process().name\n if threading.current_thread().name != 'MainThread':\n name += '|' + threading.current_thread().name\n conn = self._Client(self._token.address, authkey=self._authkey)\n dispatch(conn, None, 'accept_connection', (name,))\n self._tls.connection = conn\n\n def _callmethod(self, methodname, args=(), kwds={}):\n '''\n Try to call a method of the referent and return a copy of the result\n '''\n try:\n conn = self._tls.connection\n except AttributeError:\n util.debug('thread %r does not own a connection',\n threading.current_thread().name)\n self._connect()\n conn = self._tls.connection\n\n conn.send((self._id, methodname, args, kwds))\n kind, result = conn.recv()\n\n if kind == '#RETURN':\n return result\n elif kind == '#PROXY':\n exposed, token = result\n proxytype = self._manager._registry[token.typeid][-1]\n token.address = self._token.address\n proxy = proxytype(\n token, self._serializer, manager=self._manager,\n authkey=self._authkey, exposed=exposed\n )\n conn = self._Client(token.address, authkey=self._authkey)\n dispatch(conn, None, 'decref', (token.id,))\n return proxy\n raise convert_to_error(kind, result)\n\n def _getvalue(self):\n '''\n Get a copy of the value of the referent\n '''\n return self._callmethod('#GETVALUE')\n\n def _incref(self):\n if self._owned_by_manager:\n util.debug('owned_by_manager skipped INCREF of %r', self._token.id)\n return\n\n conn = self._Client(self._token.address, authkey=self._authkey)\n dispatch(conn, None, 'incref', (self._id,))\n util.debug('INCREF %r', self._token.id)\n\n self._idset.add(self._id)\n\n state = self._manager and self._manager._state\n\n self._close = util.Finalize(\n self, BaseProxy._decref,\n args=(self._token, self._authkey, state,\n self._tls, self._idset, self._Client),\n exitpriority=10\n )\n\n @staticmethod\n def _decref(token, authkey, state, tls, idset, _Client):\n idset.discard(token.id)\n\n # check whether manager is still alive\n if state is None or state.value == State.STARTED:\n # tell manager this process no longer cares about referent\n try:\n util.debug('DECREF %r', token.id)\n conn = _Client(token.address, authkey=authkey)\n dispatch(conn, None, 'decref', (token.id,))\n except Exception as e:\n util.debug('... decref failed %s', e)\n\n else:\n util.debug('DECREF %r -- manager already shutdown', token.id)\n\n # check whether we can close this thread's connection because\n # the process owns no more references to objects for this manager\n if not idset and hasattr(tls, 'connection'):\n util.debug('thread %r has no more proxies so closing conn',\n threading.current_thread().name)\n tls.connection.close()\n del tls.connection\n\n def _after_fork(self):\n self._manager = None\n try:\n self._incref()\n except Exception as e:\n # the proxy may just be for a manager which has shutdown\n util.info('incref failed: %s' % e)\n\n def __reduce__(self):\n kwds = {}\n if get_spawning_popen() is not None:\n kwds['authkey'] = self._authkey\n\n if getattr(self, '_isauto', False):\n kwds['exposed'] = self._exposed_\n return (RebuildProxy,\n (AutoProxy, self._token, self._serializer, kwds))\n else:\n return (RebuildProxy,\n (type(self), self._token, self._serializer, kwds))\n\n def __deepcopy__(self, memo):\n return self._getvalue()\n\n def __repr__(self):\n return '<%s object, typeid %r at %#x>' % \\n (type(self).__name__, self._token.typeid, id(self))\n\n def __str__(self):\n '''\n Return representation of the referent (or a fall-back if that fails)\n '''\n try:\n return self._callmethod('__repr__')\n except Exception:\n return repr(self)[:-1] + "; '__str__()' failed>"\n\n#\n# Function used for unpickling\n#\n\ndef RebuildProxy(func, token, serializer, kwds):\n '''\n Function used for unpickling proxy objects.\n '''\n server = getattr(process.current_process(), '_manager_server', None)\n if server and server.address == token.address:\n util.debug('Rebuild a proxy owned by manager, token=%r', token)\n kwds['manager_owned'] = True\n if token.id not in server.id_to_local_proxy_obj:\n server.id_to_local_proxy_obj[token.id] = \\n server.id_to_obj[token.id]\n incref = (\n kwds.pop('incref', True) and\n not getattr(process.current_process(), '_inheriting', False)\n )\n return func(token, serializer, incref=incref, **kwds)\n\n#\n# Functions to create proxies and proxy types\n#\n\ndef MakeProxyType(name, exposed, _cache={}):\n '''\n Return a proxy type whose methods are given by `exposed`\n '''\n exposed = tuple(exposed)\n try:\n return _cache[(name, exposed)]\n except KeyError:\n pass\n\n dic = {}\n\n for meth in exposed:\n exec('''def %s(self, /, *args, **kwds):\n return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)\n\n ProxyType = type(name, (BaseProxy,), dic)\n ProxyType._exposed_ = exposed\n _cache[(name, exposed)] = ProxyType\n return ProxyType\n\n\ndef AutoProxy(token, serializer, manager=None, authkey=None,\n exposed=None, incref=True, manager_owned=False):\n '''\n Return an auto-proxy for `token`\n '''\n _Client = listener_client[serializer][1]\n\n if exposed is None:\n conn = _Client(token.address, authkey=authkey)\n try:\n exposed = dispatch(conn, None, 'get_methods', (token,))\n finally:\n conn.close()\n\n if authkey is None and manager is not None:\n authkey = manager._authkey\n if authkey is None:\n authkey = process.current_process().authkey\n\n ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)\n proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,\n incref=incref, manager_owned=manager_owned)\n proxy._isauto = True\n return proxy\n\n#\n# Types/callables which we will register with SyncManager\n#\n\nclass Namespace(object):\n def __init__(self, /, **kwds):\n self.__dict__.update(kwds)\n def __repr__(self):\n items = list(self.__dict__.items())\n temp = []\n for name, value in items:\n if not name.startswith('_'):\n temp.append('%s=%r' % (name, value))\n temp.sort()\n return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))\n\nclass Value(object):\n def __init__(self, typecode, value, lock=True):\n self._typecode = typecode\n self._value = value\n def get(self):\n return self._value\n def set(self, value):\n self._value = value\n def __repr__(self):\n return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)\n value = property(get, set)\n\ndef Array(typecode, sequence, lock=True):\n return array.array(typecode, sequence)\n\n#\n# Proxy types used by SyncManager\n#\n\nclass IteratorProxy(BaseProxy):\n _exposed_ = ('__next__', 'send', 'throw', 'close')\n def __iter__(self):\n return self\n def __next__(self, *args):\n return self._callmethod('__next__', args)\n def send(self, *args):\n return self._callmethod('send', args)\n def throw(self, *args):\n return self._callmethod('throw', args)\n def close(self, *args):\n return self._callmethod('close', args)\n\n\nclass AcquirerProxy(BaseProxy):\n _exposed_ = ('acquire', 'release')\n def acquire(self, blocking=True, timeout=None):\n args = (blocking,) if timeout is None else (blocking, timeout)\n return self._callmethod('acquire', args)\n def release(self):\n return self._callmethod('release')\n def __enter__(self):\n return self._callmethod('acquire')\n def __exit__(self, exc_type, exc_val, exc_tb):\n return self._callmethod('release')\n\n\nclass ConditionProxy(AcquirerProxy):\n _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')\n def wait(self, timeout=None):\n return self._callmethod('wait', (timeout,))\n def notify(self, n=1):\n return self._callmethod('notify', (n,))\n def notify_all(self):\n return self._callmethod('notify_all')\n def wait_for(self, predicate, timeout=None):\n result = predicate()\n if result:\n return result\n if timeout is not None:\n endtime = getattr(time,'monotonic',time.time)() + timeout\n else:\n endtime = None\n waittime = None\n while not result:\n if endtime is not None:\n waittime = endtime - getattr(time,'monotonic',time.time)()\n if waittime <= 0:\n break\n self.wait(waittime)\n result = predicate()\n return result\n\n\nclass EventProxy(BaseProxy):\n _exposed_ = ('is_set', 'set', 'clear', 'wait')\n def is_set(self):\n return self._callmethod('is_set')\n def set(self):\n return self._callmethod('set')\n def clear(self):\n return self._callmethod('clear')\n def wait(self, timeout=None):\n return self._callmethod('wait', (timeout,))\n\n\nclass BarrierProxy(BaseProxy):\n _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')\n def wait(self, timeout=None):\n return self._callmethod('wait', (timeout,))\n def abort(self):\n return self._callmethod('abort')\n def reset(self):\n return self._callmethod('reset')\n @property\n def parties(self):\n return self._callmethod('__getattribute__', ('parties',))\n @property\n def n_waiting(self):\n return self._callmethod('__getattribute__', ('n_waiting',))\n @property\n def broken(self):\n return self._callmethod('__getattribute__', ('broken',))\n\n\nclass NamespaceProxy(BaseProxy):\n _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')\n def __getattr__(self, key):\n if key[0] == '_':\n return object.__getattribute__(self, key)\n callmethod = object.__getattribute__(self, '_callmethod')\n return callmethod('__getattribute__', (key,))\n def __setattr__(self, key, value):\n if key[0] == '_':\n return object.__setattr__(self, key, value)\n callmethod = object.__getattribute__(self, '_callmethod')\n return callmethod('__setattr__', (key, value))\n def __delattr__(self, key):\n if key[0] == '_':\n return object.__delattr__(self, key)\n callmethod = object.__getattribute__(self, '_callmethod')\n return callmethod('__delattr__', (key,))\n\n\nclass ValueProxy(BaseProxy):\n _exposed_ = ('get', 'set')\n def get(self):\n return self._callmethod('get')\n def set(self, value):\n return self._callmethod('set', (value,))\n value = property(get, set)\n\n __class_getitem__ = classmethod(types.GenericAlias)\n\n\nBaseListProxy = MakeProxyType('BaseListProxy', (\n '__add__', '__contains__', '__delitem__', '__getitem__', '__len__',\n '__mul__', '__reversed__', '__rmul__', '__setitem__',\n 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',\n 'reverse', 'sort', '__imul__'\n ))\nclass ListProxy(BaseListProxy):\n def __iadd__(self, value):\n self._callmethod('extend', (value,))\n return self\n def __imul__(self, value):\n self._callmethod('__imul__', (value,))\n return self\n\n\nDictProxy = MakeProxyType('DictProxy', (\n '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',\n '__setitem__', 'clear', 'copy', 'get', 'items',\n 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'\n ))\nDictProxy._method_to_typeid_ = {\n '__iter__': 'Iterator',\n }\n\n\nArrayProxy = MakeProxyType('ArrayProxy', (\n '__len__', '__getitem__', '__setitem__'\n ))\n\n\nBasePoolProxy = MakeProxyType('PoolProxy', (\n 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',\n 'map', 'map_async', 'starmap', 'starmap_async', 'terminate',\n ))\nBasePoolProxy._method_to_typeid_ = {\n 'apply_async': 'AsyncResult',\n 'map_async': 'AsyncResult',\n 'starmap_async': 'AsyncResult',\n 'imap': 'Iterator',\n 'imap_unordered': 'Iterator'\n }\nclass PoolProxy(BasePoolProxy):\n def __enter__(self):\n return self\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.terminate()\n\n#\n# Definition of SyncManager\n#\n\nclass SyncManager(BaseManager):\n '''\n Subclass of `BaseManager` which supports a number of shared object types.\n\n The types registered are those intended for the synchronization\n of threads, plus `dict`, `list` and `Namespace`.\n\n The `multiprocess.Manager()` function creates started instances of\n this class.\n '''\n\nSyncManager.register('Queue', queue.Queue)\nSyncManager.register('JoinableQueue', queue.Queue)\nSyncManager.register('Event', threading.Event, EventProxy)\nSyncManager.register('Lock', threading.Lock, AcquirerProxy)\nSyncManager.register('RLock', threading.RLock, AcquirerProxy)\nSyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)\nSyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,\n AcquirerProxy)\nSyncManager.register('Condition', threading.Condition, ConditionProxy)\nSyncManager.register('Barrier', threading.Barrier, BarrierProxy)\nSyncManager.register('Pool', pool.Pool, PoolProxy)\nSyncManager.register('list', list, ListProxy)\nSyncManager.register('dict', dict, DictProxy)\nSyncManager.register('Value', Value, ValueProxy)\nSyncManager.register('Array', Array, ArrayProxy)\nSyncManager.register('Namespace', Namespace, NamespaceProxy)\n\n# types returned by methods of PoolProxy\nSyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)\nSyncManager.register('AsyncResult', create_method=False)\n\n#\n# Definition of SharedMemoryManager and SharedMemoryServer\n#\n\nif HAS_SHMEM:\n class _SharedMemoryTracker:\n "Manages one or more shared memory segments."\n\n def __init__(self, name, segment_names=[]):\n self.shared_memory_context_name = name\n self.segment_names = segment_names\n\n def register_segment(self, segment_name):\n "Adds the supplied shared memory block name to tracker."\n util.debug(f"Register segment {segment_name!r} in pid {getpid()}")\n self.segment_names.append(segment_name)\n\n def destroy_segment(self, segment_name):\n """Calls unlink() on the shared memory block with the supplied name\n and removes it from the list of blocks being tracked."""\n util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")\n self.segment_names.remove(segment_name)\n segment = shared_memory.SharedMemory(segment_name)\n segment.close()\n segment.unlink()\n\n def unlink(self):\n "Calls destroy_segment() on all tracked shared memory blocks."\n for segment_name in self.segment_names[:]:\n self.destroy_segment(segment_name)\n\n def __del__(self):\n util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")\n self.unlink()\n\n def __getstate__(self):\n return (self.shared_memory_context_name, self.segment_names)\n\n def __setstate__(self, state):\n self.__init__(*state)\n\n\n class SharedMemoryServer(Server):\n\n public = Server.public + \\n ['track_segment', 'release_segment', 'list_segments']\n\n def __init__(self, *args, **kwargs):\n Server.__init__(self, *args, **kwargs)\n address = self.address\n # The address of Linux abstract namespaces can be bytes\n if isinstance(address, bytes):\n address = os.fsdecode(address)\n self.shared_memory_context = \\n _SharedMemoryTracker(f"shm_{address}_{getpid()}")\n util.debug(f"SharedMemoryServer started by pid {getpid()}")\n\n def create(self, c, typeid, /, *args, **kwargs):\n """Create a new distributed-shared object (not backed by a shared\n memory block) and return its id to be used in a Proxy Object."""\n # Unless set up as a shared proxy, don't make shared_memory_context\n # a standard part of kwargs. This makes things easier for supplying\n # simple functions.\n if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):\n kwargs['shared_memory_context'] = self.shared_memory_context\n return Server.create(self, c, typeid, *args, **kwargs)\n\n def shutdown(self, c):\n "Call unlink() on all tracked shared memory, terminate the Server."\n self.shared_memory_context.unlink()\n return Server.shutdown(self, c)\n\n def track_segment(self, c, segment_name):\n "Adds the supplied shared memory block name to Server's tracker."\n self.shared_memory_context.register_segment(segment_name)\n\n def release_segment(self, c, segment_name):\n """Calls unlink() on the shared memory block with the supplied name\n and removes it from the tracker instance inside the Server."""\n self.shared_memory_context.destroy_segment(segment_name)\n\n def list_segments(self, c):\n """Returns a list of names of shared memory blocks that the Server\n is currently tracking."""\n return self.shared_memory_context.segment_names\n\n\n class SharedMemoryManager(BaseManager):\n """Like SyncManager but uses SharedMemoryServer instead of Server.\n\n It provides methods for creating and returning SharedMemory instances\n and for creating a list-like object (ShareableList) backed by shared\n memory. It also provides methods that create and return Proxy Objects\n that support synchronization across processes (i.e. multi-process-safe\n locks and semaphores).\n """\n\n _Server = SharedMemoryServer\n\n def __init__(self, *args, **kwargs):\n if os.name == "posix":\n # bpo-36867: Ensure the resource_tracker is running before\n # launching the manager process, so that concurrent\n # shared_memory manipulation both in the manager and in the\n # current process does not create two resource_tracker\n # processes.\n from . import resource_tracker\n resource_tracker.ensure_running()\n BaseManager.__init__(self, *args, **kwargs)\n util.debug(f"{self.__class__.__name__} created by pid {getpid()}")\n\n def __del__(self):\n util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")\n\n def get_server(self):\n 'Better than monkeypatching for now; merge into Server ultimately'\n if self._state.value != State.INITIAL:\n if self._state.value == State.STARTED:\n raise ProcessError("Already started SharedMemoryServer")\n elif self._state.value == State.SHUTDOWN:\n raise ProcessError("SharedMemoryManager has shut down")\n else:\n raise ProcessError(\n "Unknown state {!r}".format(self._state.value))\n return self._Server(self._registry, self._address,\n self._authkey, self._serializer)\n\n def SharedMemory(self, size):\n """Returns a new SharedMemory instance with the specified size in\n bytes, to be tracked by the manager."""\n with self._Client(self._address, authkey=self._authkey) as conn:\n sms = shared_memory.SharedMemory(None, create=True, size=size)\n try:\n dispatch(conn, None, 'track_segment', (sms.name,))\n except BaseException as e:\n sms.unlink()\n raise e\n return sms\n\n def ShareableList(self, sequence):\n """Returns a new ShareableList instance populated with the values\n from the input sequence, to be tracked by the manager."""\n with self._Client(self._address, authkey=self._authkey) as conn:\n sl = shared_memory.ShareableList(sequence)\n try:\n dispatch(conn, None, 'track_segment', (sl.shm.name,))\n except BaseException as e:\n sl.shm.unlink()\n raise e\n return sl\n | .venv\Lib\site-packages\multiprocess\managers.py | managers.py | Python | 47,675 | 0.95 | 0.207971 | 0.090136 | python-kit | 451 | 2024-02-04T21:34:49.690924 | MIT | false | b844e22f4b8f54712a87348b2f85620d |
#\n# Module providing the `Pool` class for managing a process pool\n#\n# multiprocessing/pool.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\n__all__ = ['Pool', 'ThreadPool']\n\n#\n# Imports\n#\n\nimport collections\nimport itertools\nimport os\nimport queue\nimport threading\nimport time\nimport traceback\nimport types\nimport warnings\n\n# If threading is available then ThreadPool should be provided. Therefore\n# we avoid top-level imports which are liable to fail on some systems.\nfrom . import util\nfrom . import get_context, TimeoutError\nfrom .connection import wait\n\n#\n# Constants representing the state of a pool\n#\n\nINIT = "INIT"\nRUN = "RUN"\nCLOSE = "CLOSE"\nTERMINATE = "TERMINATE"\n\n#\n# Miscellaneous\n#\n\njob_counter = itertools.count()\n\ndef mapstar(args):\n return list(map(*args))\n\ndef starmapstar(args):\n return list(itertools.starmap(args[0], args[1]))\n\n#\n# Hack to embed stringification of remote traceback in local traceback\n#\n\nclass RemoteTraceback(Exception):\n def __init__(self, tb):\n self.tb = tb\n def __str__(self):\n return self.tb\n\nclass ExceptionWithTraceback:\n def __init__(self, exc, tb):\n tb = traceback.format_exception(type(exc), exc, tb)\n tb = ''.join(tb)\n self.exc = exc\n self.tb = '\n"""\n%s"""' % tb\n def __reduce__(self):\n return rebuild_exc, (self.exc, self.tb)\n\ndef rebuild_exc(exc, tb):\n exc.__cause__ = RemoteTraceback(tb)\n return exc\n\n#\n# Code run by worker processes\n#\n\nclass MaybeEncodingError(Exception):\n """Wraps possible unpickleable errors, so they can be\n safely sent through the socket."""\n\n def __init__(self, exc, value):\n self.exc = repr(exc)\n self.value = repr(value)\n super(MaybeEncodingError, self).__init__(self.exc, self.value)\n\n def __str__(self):\n return "Error sending result: '%s'. Reason: '%s'" % (self.value,\n self.exc)\n\n def __repr__(self):\n return "<%s: %s>" % (self.__class__.__name__, self)\n\n\ndef worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,\n wrap_exception=False):\n if (maxtasks is not None) and not (isinstance(maxtasks, int)\n and maxtasks >= 1):\n raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks))\n put = outqueue.put\n get = inqueue.get\n if hasattr(inqueue, '_writer'):\n inqueue._writer.close()\n outqueue._reader.close()\n\n if initializer is not None:\n initializer(*initargs)\n\n completed = 0\n while maxtasks is None or (maxtasks and completed < maxtasks):\n try:\n task = get()\n except (EOFError, OSError):\n util.debug('worker got EOFError or OSError -- exiting')\n break\n\n if task is None:\n util.debug('worker got sentinel -- exiting')\n break\n\n job, i, func, args, kwds = task\n try:\n result = (True, func(*args, **kwds))\n except Exception as e:\n if wrap_exception and func is not _helper_reraises_exception:\n e = ExceptionWithTraceback(e, e.__traceback__)\n result = (False, e)\n try:\n put((job, i, result))\n except Exception as e:\n wrapped = MaybeEncodingError(e, result[1])\n util.debug("Possible encoding error while sending result: %s" % (\n wrapped))\n put((job, i, (False, wrapped)))\n\n task = job = result = func = args = kwds = None\n completed += 1\n util.debug('worker exiting after %d tasks' % completed)\n\ndef _helper_reraises_exception(ex):\n 'Pickle-able helper function for use by _guarded_task_generation.'\n raise ex\n\n#\n# Class representing a process pool\n#\n\nclass _PoolCache(dict):\n """\n Class that implements a cache for the Pool class that will notify\n the pool management threads every time the cache is emptied. The\n notification is done by the use of a queue that is provided when\n instantiating the cache.\n """\n def __init__(self, /, *args, notifier=None, **kwds):\n self.notifier = notifier\n super().__init__(*args, **kwds)\n\n def __delitem__(self, item):\n super().__delitem__(item)\n\n # Notify that the cache is empty. This is important because the\n # pool keeps maintaining workers until the cache gets drained. This\n # eliminates a race condition in which a task is finished after the\n # the pool's _handle_workers method has enter another iteration of the\n # loop. In this situation, the only event that can wake up the pool\n # is the cache to be emptied (no more tasks available).\n if not self:\n self.notifier.put(None)\n\nclass Pool(object):\n '''\n Class which supports an async version of applying functions to arguments.\n '''\n _wrap_exception = True\n\n @staticmethod\n def Process(ctx, *args, **kwds):\n return ctx.Process(*args, **kwds)\n\n def __init__(self, processes=None, initializer=None, initargs=(),\n maxtasksperchild=None, context=None):\n # Attributes initialized early to make sure that they exist in\n # __del__() if __init__() raises an exception\n self._pool = []\n self._state = INIT\n\n self._ctx = context or get_context()\n self._setup_queues()\n self._taskqueue = queue.SimpleQueue()\n # The _change_notifier queue exist to wake up self._handle_workers()\n # when the cache (self._cache) is empty or when there is a change in\n # the _state variable of the thread that runs _handle_workers.\n self._change_notifier = self._ctx.SimpleQueue()\n self._cache = _PoolCache(notifier=self._change_notifier)\n self._maxtasksperchild = maxtasksperchild\n self._initializer = initializer\n self._initargs = initargs\n\n if processes is None:\n processes = os.cpu_count() or 1\n if processes < 1:\n raise ValueError("Number of processes must be at least 1")\n if maxtasksperchild is not None:\n if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0:\n raise ValueError("maxtasksperchild must be a positive int or None")\n\n if initializer is not None and not callable(initializer):\n raise TypeError('initializer must be a callable')\n\n self._processes = processes\n try:\n self._repopulate_pool()\n except Exception:\n for p in self._pool:\n if p.exitcode is None:\n p.terminate()\n for p in self._pool:\n p.join()\n raise\n\n sentinels = self._get_sentinels()\n\n self._worker_handler = threading.Thread(\n target=Pool._handle_workers,\n args=(self._cache, self._taskqueue, self._ctx, self.Process,\n self._processes, self._pool, self._inqueue, self._outqueue,\n self._initializer, self._initargs, self._maxtasksperchild,\n self._wrap_exception, sentinels, self._change_notifier)\n )\n self._worker_handler.daemon = True\n self._worker_handler._state = RUN\n self._worker_handler.start()\n\n\n self._task_handler = threading.Thread(\n target=Pool._handle_tasks,\n args=(self._taskqueue, self._quick_put, self._outqueue,\n self._pool, self._cache)\n )\n self._task_handler.daemon = True\n self._task_handler._state = RUN\n self._task_handler.start()\n\n self._result_handler = threading.Thread(\n target=Pool._handle_results,\n args=(self._outqueue, self._quick_get, self._cache)\n )\n self._result_handler.daemon = True\n self._result_handler._state = RUN\n self._result_handler.start()\n\n self._terminate = util.Finalize(\n self, self._terminate_pool,\n args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,\n self._change_notifier, self._worker_handler, self._task_handler,\n self._result_handler, self._cache),\n exitpriority=15\n )\n self._state = RUN\n\n # Copy globals as function locals to make sure that they are available\n # during Python shutdown when the Pool is destroyed.\n def __del__(self, _warn=warnings.warn, RUN=RUN):\n if self._state == RUN:\n _warn(f"unclosed running multiprocessing pool {self!r}",\n ResourceWarning, source=self)\n if getattr(self, '_change_notifier', None) is not None:\n self._change_notifier.put(None)\n\n def __repr__(self):\n cls = self.__class__\n return (f'<{cls.__module__}.{cls.__qualname__} '\n f'state={self._state} '\n f'pool_size={len(self._pool)}>')\n\n def _get_sentinels(self):\n task_queue_sentinels = [self._outqueue._reader]\n self_notifier_sentinels = [self._change_notifier._reader]\n return [*task_queue_sentinels, *self_notifier_sentinels]\n\n @staticmethod\n def _get_worker_sentinels(workers):\n return [worker.sentinel for worker in\n workers if hasattr(worker, "sentinel")]\n\n @staticmethod\n def _join_exited_workers(pool):\n """Cleanup after any worker processes which have exited due to reaching\n their specified lifetime. Returns True if any workers were cleaned up.\n """\n cleaned = False\n for i in reversed(range(len(pool))):\n worker = pool[i]\n if worker.exitcode is not None:\n # worker exited\n util.debug('cleaning up worker %d' % i)\n worker.join()\n cleaned = True\n del pool[i]\n return cleaned\n\n def _repopulate_pool(self):\n return self._repopulate_pool_static(self._ctx, self.Process,\n self._processes,\n self._pool, self._inqueue,\n self._outqueue, self._initializer,\n self._initargs,\n self._maxtasksperchild,\n self._wrap_exception)\n\n @staticmethod\n def _repopulate_pool_static(ctx, Process, processes, pool, inqueue,\n outqueue, initializer, initargs,\n maxtasksperchild, wrap_exception):\n """Bring the number of pool processes up to the specified number,\n for use after reaping workers which have exited.\n """\n for i in range(processes - len(pool)):\n w = Process(ctx, target=worker,\n args=(inqueue, outqueue,\n initializer,\n initargs, maxtasksperchild,\n wrap_exception))\n w.name = w.name.replace('Process', 'PoolWorker')\n w.daemon = True\n w.start()\n pool.append(w)\n util.debug('added worker')\n\n @staticmethod\n def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue,\n initializer, initargs, maxtasksperchild,\n wrap_exception):\n """Clean up any exited workers and start replacements for them.\n """\n if Pool._join_exited_workers(pool):\n Pool._repopulate_pool_static(ctx, Process, processes, pool,\n inqueue, outqueue, initializer,\n initargs, maxtasksperchild,\n wrap_exception)\n\n def _setup_queues(self):\n self._inqueue = self._ctx.SimpleQueue()\n self._outqueue = self._ctx.SimpleQueue()\n self._quick_put = self._inqueue._writer.send\n self._quick_get = self._outqueue._reader.recv\n\n def _check_running(self):\n if self._state != RUN:\n raise ValueError("Pool not running")\n\n def apply(self, func, args=(), kwds={}):\n '''\n Equivalent of `func(*args, **kwds)`.\n Pool must be running.\n '''\n return self.apply_async(func, args, kwds).get()\n\n def map(self, func, iterable, chunksize=None):\n '''\n Apply `func` to each element in `iterable`, collecting the results\n in a list that is returned.\n '''\n return self._map_async(func, iterable, mapstar, chunksize).get()\n\n def starmap(self, func, iterable, chunksize=None):\n '''\n Like `map()` method but the elements of the `iterable` are expected to\n be iterables as well and will be unpacked as arguments. Hence\n `func` and (a, b) becomes func(a, b).\n '''\n return self._map_async(func, iterable, starmapstar, chunksize).get()\n\n def starmap_async(self, func, iterable, chunksize=None, callback=None,\n error_callback=None):\n '''\n Asynchronous version of `starmap()` method.\n '''\n return self._map_async(func, iterable, starmapstar, chunksize,\n callback, error_callback)\n\n def _guarded_task_generation(self, result_job, func, iterable):\n '''Provides a generator of tasks for imap and imap_unordered with\n appropriate handling for iterables which throw exceptions during\n iteration.'''\n try:\n i = -1\n for i, x in enumerate(iterable):\n yield (result_job, i, func, (x,), {})\n except Exception as e:\n yield (result_job, i+1, _helper_reraises_exception, (e,), {})\n\n def imap(self, func, iterable, chunksize=1):\n '''\n Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.\n '''\n self._check_running()\n if chunksize == 1:\n result = IMapIterator(self)\n self._taskqueue.put(\n (\n self._guarded_task_generation(result._job, func, iterable),\n result._set_length\n ))\n return result\n else:\n if chunksize < 1:\n raise ValueError(\n "Chunksize must be 1+, not {0:n}".format(\n chunksize))\n task_batches = Pool._get_tasks(func, iterable, chunksize)\n result = IMapIterator(self)\n self._taskqueue.put(\n (\n self._guarded_task_generation(result._job,\n mapstar,\n task_batches),\n result._set_length\n ))\n return (item for chunk in result for item in chunk)\n\n def imap_unordered(self, func, iterable, chunksize=1):\n '''\n Like `imap()` method but ordering of results is arbitrary.\n '''\n self._check_running()\n if chunksize == 1:\n result = IMapUnorderedIterator(self)\n self._taskqueue.put(\n (\n self._guarded_task_generation(result._job, func, iterable),\n result._set_length\n ))\n return result\n else:\n if chunksize < 1:\n raise ValueError(\n "Chunksize must be 1+, not {0!r}".format(chunksize))\n task_batches = Pool._get_tasks(func, iterable, chunksize)\n result = IMapUnorderedIterator(self)\n self._taskqueue.put(\n (\n self._guarded_task_generation(result._job,\n mapstar,\n task_batches),\n result._set_length\n ))\n return (item for chunk in result for item in chunk)\n\n def apply_async(self, func, args=(), kwds={}, callback=None,\n error_callback=None):\n '''\n Asynchronous version of `apply()` method.\n '''\n self._check_running()\n result = ApplyResult(self, callback, error_callback)\n self._taskqueue.put(([(result._job, 0, func, args, kwds)], None))\n return result\n\n def map_async(self, func, iterable, chunksize=None, callback=None,\n error_callback=None):\n '''\n Asynchronous version of `map()` method.\n '''\n return self._map_async(func, iterable, mapstar, chunksize, callback,\n error_callback)\n\n def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,\n error_callback=None):\n '''\n Helper function to implement map, starmap and their async counterparts.\n '''\n self._check_running()\n if not hasattr(iterable, '__len__'):\n iterable = list(iterable)\n\n if chunksize is None:\n chunksize, extra = divmod(len(iterable), len(self._pool) * 4)\n if extra:\n chunksize += 1\n if len(iterable) == 0:\n chunksize = 0\n\n task_batches = Pool._get_tasks(func, iterable, chunksize)\n result = MapResult(self, chunksize, len(iterable), callback,\n error_callback=error_callback)\n self._taskqueue.put(\n (\n self._guarded_task_generation(result._job,\n mapper,\n task_batches),\n None\n )\n )\n return result\n\n @staticmethod\n def _wait_for_updates(sentinels, change_notifier, timeout=None):\n wait(sentinels, timeout=timeout)\n while not change_notifier.empty():\n change_notifier.get()\n\n @classmethod\n def _handle_workers(cls, cache, taskqueue, ctx, Process, processes,\n pool, inqueue, outqueue, initializer, initargs,\n maxtasksperchild, wrap_exception, sentinels,\n change_notifier):\n thread = threading.current_thread()\n\n # Keep maintaining workers until the cache gets drained, unless the pool\n # is terminated.\n while thread._state == RUN or (cache and thread._state != TERMINATE):\n cls._maintain_pool(ctx, Process, processes, pool, inqueue,\n outqueue, initializer, initargs,\n maxtasksperchild, wrap_exception)\n\n current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels]\n\n cls._wait_for_updates(current_sentinels, change_notifier)\n # send sentinel to stop workers\n taskqueue.put(None)\n util.debug('worker handler exiting')\n\n @staticmethod\n def _handle_tasks(taskqueue, put, outqueue, pool, cache):\n thread = threading.current_thread()\n\n for taskseq, set_length in iter(taskqueue.get, None):\n task = None\n try:\n # iterating taskseq cannot fail\n for task in taskseq:\n if thread._state != RUN:\n util.debug('task handler found thread._state != RUN')\n break\n try:\n put(task)\n except Exception as e:\n job, idx = task[:2]\n try:\n cache[job]._set(idx, (False, e))\n except KeyError:\n pass\n else:\n if set_length:\n util.debug('doing set_length()')\n idx = task[1] if task else -1\n set_length(idx + 1)\n continue\n break\n finally:\n task = taskseq = job = None\n else:\n util.debug('task handler got sentinel')\n\n try:\n # tell result handler to finish when cache is empty\n util.debug('task handler sending sentinel to result handler')\n outqueue.put(None)\n\n # tell workers there is no more work\n util.debug('task handler sending sentinel to workers')\n for p in pool:\n put(None)\n except OSError:\n util.debug('task handler got OSError when sending sentinels')\n\n util.debug('task handler exiting')\n\n @staticmethod\n def _handle_results(outqueue, get, cache):\n thread = threading.current_thread()\n\n while 1:\n try:\n task = get()\n except (OSError, EOFError):\n util.debug('result handler got EOFError/OSError -- exiting')\n return\n\n if thread._state != RUN:\n assert thread._state == TERMINATE, "Thread not in TERMINATE"\n util.debug('result handler found thread._state=TERMINATE')\n break\n\n if task is None:\n util.debug('result handler got sentinel')\n break\n\n job, i, obj = task\n try:\n cache[job]._set(i, obj)\n except KeyError:\n pass\n task = job = obj = None\n\n while cache and thread._state != TERMINATE:\n try:\n task = get()\n except (OSError, EOFError):\n util.debug('result handler got EOFError/OSError -- exiting')\n return\n\n if task is None:\n util.debug('result handler ignoring extra sentinel')\n continue\n job, i, obj = task\n try:\n cache[job]._set(i, obj)\n except KeyError:\n pass\n task = job = obj = None\n\n if hasattr(outqueue, '_reader'):\n util.debug('ensuring that outqueue is not full')\n # If we don't make room available in outqueue then\n # attempts to add the sentinel (None) to outqueue may\n # block. There is guaranteed to be no more than 2 sentinels.\n try:\n for i in range(10):\n if not outqueue._reader.poll():\n break\n get()\n except (OSError, EOFError):\n pass\n\n util.debug('result handler exiting: len(cache)=%s, thread._state=%s',\n len(cache), thread._state)\n\n @staticmethod\n def _get_tasks(func, it, size):\n it = iter(it)\n while 1:\n x = tuple(itertools.islice(it, size))\n if not x:\n return\n yield (func, x)\n\n def __reduce__(self):\n raise NotImplementedError(\n 'pool objects cannot be passed between processes or pickled'\n )\n\n def close(self):\n util.debug('closing pool')\n if self._state == RUN:\n self._state = CLOSE\n self._worker_handler._state = CLOSE\n self._change_notifier.put(None)\n\n def terminate(self):\n util.debug('terminating pool')\n self._state = TERMINATE\n self._terminate()\n\n def join(self):\n util.debug('joining pool')\n if self._state == RUN:\n raise ValueError("Pool is still running")\n elif self._state not in (CLOSE, TERMINATE):\n raise ValueError("In unknown state")\n self._worker_handler.join()\n self._task_handler.join()\n self._result_handler.join()\n for p in self._pool:\n p.join()\n\n @staticmethod\n def _help_stuff_finish(inqueue, task_handler, size):\n # task_handler may be blocked trying to put items on inqueue\n util.debug('removing tasks from inqueue until task handler finished')\n inqueue._rlock.acquire()\n while task_handler.is_alive() and inqueue._reader.poll():\n inqueue._reader.recv()\n time.sleep(0)\n\n @classmethod\n def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier,\n worker_handler, task_handler, result_handler, cache):\n # this is guaranteed to only be called once\n util.debug('finalizing pool')\n\n # Notify that the worker_handler state has been changed so the\n # _handle_workers loop can be unblocked (and exited) in order to\n # send the finalization sentinel all the workers.\n worker_handler._state = TERMINATE\n change_notifier.put(None)\n\n task_handler._state = TERMINATE\n\n util.debug('helping task handler/workers to finish')\n cls._help_stuff_finish(inqueue, task_handler, len(pool))\n\n if (not result_handler.is_alive()) and (len(cache) != 0):\n raise AssertionError(\n "Cannot have cache with result_handler not alive")\n\n result_handler._state = TERMINATE\n change_notifier.put(None)\n outqueue.put(None) # sentinel\n\n # We must wait for the worker handler to exit before terminating\n # workers because we don't want workers to be restarted behind our back.\n util.debug('joining worker handler')\n if threading.current_thread() is not worker_handler:\n worker_handler.join()\n\n # Terminate workers which haven't already finished.\n if pool and hasattr(pool[0], 'terminate'):\n util.debug('terminating workers')\n for p in pool:\n if p.exitcode is None:\n p.terminate()\n\n util.debug('joining task handler')\n if threading.current_thread() is not task_handler:\n task_handler.join()\n\n util.debug('joining result handler')\n if threading.current_thread() is not result_handler:\n result_handler.join()\n\n if pool and hasattr(pool[0], 'terminate'):\n util.debug('joining pool workers')\n for p in pool:\n if p.is_alive():\n # worker has not yet exited\n util.debug('cleaning up worker %d' % p.pid)\n p.join()\n\n def __enter__(self):\n self._check_running()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.terminate()\n\n#\n# Class whose instances are returned by `Pool.apply_async()`\n#\n\nclass ApplyResult(object):\n\n def __init__(self, pool, callback, error_callback):\n self._pool = pool\n self._event = threading.Event()\n self._job = next(job_counter)\n self._cache = pool._cache\n self._callback = callback\n self._error_callback = error_callback\n self._cache[self._job] = self\n\n def ready(self):\n return self._event.is_set()\n\n def successful(self):\n if not self.ready():\n raise ValueError("{0!r} not ready".format(self))\n return self._success\n\n def wait(self, timeout=None):\n self._event.wait(timeout)\n\n def get(self, timeout=None):\n self.wait(timeout)\n if not self.ready():\n raise TimeoutError\n if self._success:\n return self._value\n else:\n raise self._value\n\n def _set(self, i, obj):\n self._success, self._value = obj\n if self._callback and self._success:\n self._callback(self._value)\n if self._error_callback and not self._success:\n self._error_callback(self._value)\n self._event.set()\n del self._cache[self._job]\n self._pool = None\n\n __class_getitem__ = classmethod(types.GenericAlias)\n\nAsyncResult = ApplyResult # create alias -- see #17805\n\n#\n# Class whose instances are returned by `Pool.map_async()`\n#\n\nclass MapResult(ApplyResult):\n\n def __init__(self, pool, chunksize, length, callback, error_callback):\n ApplyResult.__init__(self, pool, callback,\n error_callback=error_callback)\n self._success = True\n self._value = [None] * length\n self._chunksize = chunksize\n if chunksize <= 0:\n self._number_left = 0\n self._event.set()\n del self._cache[self._job]\n else:\n self._number_left = length//chunksize + bool(length % chunksize)\n\n def _set(self, i, success_result):\n self._number_left -= 1\n success, result = success_result\n if success and self._success:\n self._value[i*self._chunksize:(i+1)*self._chunksize] = result\n if self._number_left == 0:\n if self._callback:\n self._callback(self._value)\n del self._cache[self._job]\n self._event.set()\n self._pool = None\n else:\n if not success and self._success:\n # only store first exception\n self._success = False\n self._value = result\n if self._number_left == 0:\n # only consider the result ready once all jobs are done\n if self._error_callback:\n self._error_callback(self._value)\n del self._cache[self._job]\n self._event.set()\n self._pool = None\n\n#\n# Class whose instances are returned by `Pool.imap()`\n#\n\nclass IMapIterator(object):\n\n def __init__(self, pool):\n self._pool = pool\n self._cond = threading.Condition(threading.Lock())\n self._job = next(job_counter)\n self._cache = pool._cache\n self._items = collections.deque()\n self._index = 0\n self._length = None\n self._unsorted = {}\n self._cache[self._job] = self\n\n def __iter__(self):\n return self\n\n def next(self, timeout=None):\n with self._cond:\n try:\n item = self._items.popleft()\n except IndexError:\n if self._index == self._length:\n self._pool = None\n raise StopIteration from None\n self._cond.wait(timeout)\n try:\n item = self._items.popleft()\n except IndexError:\n if self._index == self._length:\n self._pool = None\n raise StopIteration from None\n raise TimeoutError from None\n\n success, value = item\n if success:\n return value\n raise value\n\n __next__ = next # XXX\n\n def _set(self, i, obj):\n with self._cond:\n if self._index == i:\n self._items.append(obj)\n self._index += 1\n while self._index in self._unsorted:\n obj = self._unsorted.pop(self._index)\n self._items.append(obj)\n self._index += 1\n self._cond.notify()\n else:\n self._unsorted[i] = obj\n\n if self._index == self._length:\n del self._cache[self._job]\n self._pool = None\n\n def _set_length(self, length):\n with self._cond:\n self._length = length\n if self._index == self._length:\n self._cond.notify()\n del self._cache[self._job]\n self._pool = None\n\n#\n# Class whose instances are returned by `Pool.imap_unordered()`\n#\n\nclass IMapUnorderedIterator(IMapIterator):\n\n def _set(self, i, obj):\n with self._cond:\n self._items.append(obj)\n self._index += 1\n self._cond.notify()\n if self._index == self._length:\n del self._cache[self._job]\n self._pool = None\n\n#\n#\n#\n\nclass ThreadPool(Pool):\n _wrap_exception = False\n\n @staticmethod\n def Process(ctx, *args, **kwds):\n from .dummy import Process\n return Process(*args, **kwds)\n\n def __init__(self, processes=None, initializer=None, initargs=()):\n Pool.__init__(self, processes, initializer, initargs)\n\n def _setup_queues(self):\n self._inqueue = queue.SimpleQueue()\n self._outqueue = queue.SimpleQueue()\n self._quick_put = self._inqueue.put\n self._quick_get = self._outqueue.get\n\n def _get_sentinels(self):\n return [self._change_notifier._reader]\n\n @staticmethod\n def _get_worker_sentinels(workers):\n return []\n\n @staticmethod\n def _help_stuff_finish(inqueue, task_handler, size):\n # drain inqueue, and put sentinels at its head to make workers finish\n try:\n while True:\n inqueue.get(block=False)\n except queue.Empty:\n pass\n for i in range(size):\n inqueue.put(None)\n\n def _wait_for_updates(self, sentinels, change_notifier, timeout):\n time.sleep(timeout)\n | .venv\Lib\site-packages\multiprocess\pool.py | pool.py | Python | 32,760 | 0.95 | 0.213166 | 0.095471 | vue-tools | 41 | 2025-03-18T03:43:33.125286 | GPL-3.0 | false | acea15a0f22e801e4c3bf5ac4df87dc6 |
import os\nimport signal\n\nfrom . import util\n\n__all__ = ['Popen']\n\n#\n# Start child process using fork\n#\n\nclass Popen(object):\n method = 'fork'\n\n def __init__(self, process_obj):\n util._flush_std_streams()\n self.returncode = None\n self.finalizer = None\n self._launch(process_obj)\n\n def duplicate_for_child(self, fd):\n return fd\n\n def poll(self, flag=os.WNOHANG):\n if self.returncode is None:\n try:\n pid, sts = os.waitpid(self.pid, flag)\n except OSError:\n # Child process not yet created. See #1731717\n # e.errno == errno.ECHILD == 10\n return None\n if pid == self.pid:\n self.returncode = os.waitstatus_to_exitcode(sts)\n return self.returncode\n\n def wait(self, timeout=None):\n if self.returncode is None:\n if timeout is not None:\n from multiprocess.connection import wait\n if not wait([self.sentinel], timeout):\n return None\n # This shouldn't block if wait() returned successfully.\n return self.poll(os.WNOHANG if timeout == 0.0 else 0)\n return self.returncode\n\n def _send_signal(self, sig):\n if self.returncode is None:\n try:\n os.kill(self.pid, sig)\n except ProcessLookupError:\n pass\n except OSError:\n if self.wait(timeout=0.1) is None:\n raise\n\n def terminate(self):\n self._send_signal(signal.SIGTERM)\n\n def kill(self):\n self._send_signal(signal.SIGKILL)\n\n def _launch(self, process_obj):\n code = 1\n parent_r, child_w = os.pipe()\n child_r, parent_w = os.pipe()\n self.pid = os.fork()\n if self.pid == 0:\n try:\n os.close(parent_r)\n os.close(parent_w)\n code = process_obj._bootstrap(parent_sentinel=child_r)\n finally:\n os._exit(code)\n else:\n os.close(child_w)\n os.close(child_r)\n self.finalizer = util.Finalize(self, util.close_fds,\n (parent_r, parent_w,))\n self.sentinel = parent_r\n\n def close(self):\n if self.finalizer is not None:\n self.finalizer()\n | .venv\Lib\site-packages\multiprocess\popen_fork.py | popen_fork.py | Python | 2,374 | 0.95 | 0.289157 | 0.085714 | awesome-app | 441 | 2025-03-24T22:42:16.095632 | BSD-3-Clause | false | 442b11d55359ac4805014c3fab69126b |
import io\nimport os\n\nfrom .context import reduction, set_spawning_popen\nif not reduction.HAVE_SEND_HANDLE:\n raise ImportError('No support for sending fds between processes')\nfrom . import forkserver\nfrom . import popen_fork\nfrom . import spawn\nfrom . import util\n\n\n__all__ = ['Popen']\n\n#\n# Wrapper for an fd used while launching a process\n#\n\nclass _DupFd(object):\n def __init__(self, ind):\n self.ind = ind\n def detach(self):\n return forkserver.get_inherited_fds()[self.ind]\n\n#\n# Start child process using a server process\n#\n\nclass Popen(popen_fork.Popen):\n method = 'forkserver'\n DupFd = _DupFd\n\n def __init__(self, process_obj):\n self._fds = []\n super().__init__(process_obj)\n\n def duplicate_for_child(self, fd):\n self._fds.append(fd)\n return len(self._fds) - 1\n\n def _launch(self, process_obj):\n prep_data = spawn.get_preparation_data(process_obj._name)\n buf = io.BytesIO()\n set_spawning_popen(self)\n try:\n reduction.dump(prep_data, buf)\n reduction.dump(process_obj, buf)\n finally:\n set_spawning_popen(None)\n\n self.sentinel, w = forkserver.connect_to_new_process(self._fds)\n # Keep a duplicate of the data pipe's write end as a sentinel of the\n # parent process used by the child process.\n _parent_w = os.dup(w)\n self.finalizer = util.Finalize(self, util.close_fds,\n (_parent_w, self.sentinel))\n with open(w, 'wb', closefd=True) as f:\n f.write(buf.getbuffer())\n self.pid = forkserver.read_signed(self.sentinel)\n\n def poll(self, flag=os.WNOHANG):\n if self.returncode is None:\n from multiprocess.connection import wait\n timeout = 0 if flag == os.WNOHANG else None\n if not wait([self.sentinel], timeout):\n return None\n try:\n self.returncode = forkserver.read_signed(self.sentinel)\n except (OSError, EOFError):\n # This should not happen usually, but perhaps the forkserver\n # process itself got killed\n self.returncode = 255\n\n return self.returncode\n | .venv\Lib\site-packages\multiprocess\popen_forkserver.py | popen_forkserver.py | Python | 2,227 | 0.95 | 0.22973 | 0.163934 | vue-tools | 428 | 2023-07-12T04:53:27.783873 | GPL-3.0 | false | eb505f52cdc72b160429aaafbbb60c82 |
import io\nimport os\n\nfrom .context import reduction, set_spawning_popen\nfrom . import popen_fork\nfrom . import spawn\nfrom . import util\n\n__all__ = ['Popen']\n\n\n#\n# Wrapper for an fd used while launching a process\n#\n\nclass _DupFd(object):\n def __init__(self, fd):\n self.fd = fd\n def detach(self):\n return self.fd\n\n#\n# Start child process using a fresh interpreter\n#\n\nclass Popen(popen_fork.Popen):\n method = 'spawn'\n DupFd = _DupFd\n\n def __init__(self, process_obj):\n self._fds = []\n super().__init__(process_obj)\n\n def duplicate_for_child(self, fd):\n self._fds.append(fd)\n return fd\n\n def _launch(self, process_obj):\n from . import resource_tracker\n tracker_fd = resource_tracker.getfd()\n self._fds.append(tracker_fd)\n prep_data = spawn.get_preparation_data(process_obj._name)\n fp = io.BytesIO()\n set_spawning_popen(self)\n try:\n reduction.dump(prep_data, fp)\n reduction.dump(process_obj, fp)\n finally:\n set_spawning_popen(None)\n\n parent_r = child_w = child_r = parent_w = None\n try:\n parent_r, child_w = os.pipe()\n child_r, parent_w = os.pipe()\n cmd = spawn.get_command_line(tracker_fd=tracker_fd,\n pipe_handle=child_r)\n self._fds.extend([child_r, child_w])\n self.pid = util.spawnv_passfds(spawn.get_executable(),\n cmd, self._fds)\n self.sentinel = parent_r\n with open(parent_w, 'wb', closefd=False) as f:\n f.write(fp.getbuffer())\n finally:\n fds_to_close = []\n for fd in (parent_r, parent_w):\n if fd is not None:\n fds_to_close.append(fd)\n self.finalizer = util.Finalize(self, util.close_fds, fds_to_close)\n\n for fd in (child_r, child_w):\n if fd is not None:\n os.close(fd)\n | .venv\Lib\site-packages\multiprocess\popen_spawn_posix.py | popen_spawn_posix.py | Python | 2,029 | 0.95 | 0.208333 | 0.1 | node-utils | 757 | 2024-12-25T10:18:26.443057 | GPL-3.0 | false | 2bd77248af91f0a2405fd8873bcc7634 |
import os\nimport msvcrt\nimport signal\nimport sys\nimport _winapi\n\nfrom .context import reduction, get_spawning_popen, set_spawning_popen\nfrom . import spawn\nfrom . import util\n\n__all__ = ['Popen']\n\n#\n#\n#\n\n# Exit code used by Popen.terminate()\nTERMINATE = 0x10000\nWINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))\nWINSERVICE = sys.executable.lower().endswith("pythonservice.exe")\n\n\ndef _path_eq(p1, p2):\n return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)\n\nWINENV = not _path_eq(sys.executable, sys._base_executable)\n\n\ndef _close_handles(*handles):\n for handle in handles:\n _winapi.CloseHandle(handle)\n\n\n#\n# We define a Popen class similar to the one from subprocess, but\n# whose constructor takes a process object as its argument.\n#\n\nclass Popen(object):\n '''\n Start a subprocess to run the code of a process object\n '''\n method = 'spawn'\n\n def __init__(self, process_obj):\n prep_data = spawn.get_preparation_data(process_obj._name)\n\n # read end of pipe will be duplicated by the child process\n # -- see spawn_main() in spawn.py.\n #\n # bpo-33929: Previously, the read end of pipe was "stolen" by the child\n # process, but it leaked a handle if the child process had been\n # terminated before it could steal the handle from the parent process.\n rhandle, whandle = _winapi.CreatePipe(None, 0)\n wfd = msvcrt.open_osfhandle(whandle, 0)\n cmd = spawn.get_command_line(parent_pid=os.getpid(),\n pipe_handle=rhandle)\n\n python_exe = spawn.get_executable()\n\n # bpo-35797: When running in a venv, we bypass the redirect\n # executor and launch our base Python.\n if WINENV and _path_eq(python_exe, sys.executable):\n cmd[0] = python_exe = sys._base_executable\n env = os.environ.copy()\n env["__PYVENV_LAUNCHER__"] = sys.executable\n else:\n env = None\n\n cmd = ' '.join('"%s"' % x for x in cmd)\n\n with open(wfd, 'wb', closefd=True) as to_child:\n # start process\n try:\n hp, ht, pid, tid = _winapi.CreateProcess(\n python_exe, cmd,\n None, None, False, 0, env, None, None)\n _winapi.CloseHandle(ht)\n except:\n _winapi.CloseHandle(rhandle)\n raise\n\n # set attributes of self\n self.pid = pid\n self.returncode = None\n self._handle = hp\n self.sentinel = int(hp)\n self.finalizer = util.Finalize(self, _close_handles,\n (self.sentinel, int(rhandle)))\n\n # send information to child\n set_spawning_popen(self)\n try:\n reduction.dump(prep_data, to_child)\n reduction.dump(process_obj, to_child)\n finally:\n set_spawning_popen(None)\n\n def duplicate_for_child(self, handle):\n assert self is get_spawning_popen()\n return reduction.duplicate(handle, self.sentinel)\n\n def wait(self, timeout=None):\n if self.returncode is None:\n if timeout is None:\n msecs = _winapi.INFINITE\n else:\n msecs = max(0, int(timeout * 1000 + 0.5))\n\n res = _winapi.WaitForSingleObject(int(self._handle), msecs)\n if res == _winapi.WAIT_OBJECT_0:\n code = _winapi.GetExitCodeProcess(self._handle)\n if code == TERMINATE:\n code = -signal.SIGTERM\n self.returncode = code\n\n return self.returncode\n\n def poll(self):\n return self.wait(timeout=0)\n\n def terminate(self):\n if self.returncode is None:\n try:\n _winapi.TerminateProcess(int(self._handle), TERMINATE)\n except PermissionError:\n # ERROR_ACCESS_DENIED (winerror 5) is received when the\n # process already died.\n code = _winapi.GetExitCodeProcess(int(self._handle))\n if code == _winapi.STILL_ACTIVE:\n raise\n self.returncode = code\n else:\n self.returncode = -signal.SIGTERM\n\n kill = terminate\n\n def close(self):\n self.finalizer()\n | .venv\Lib\site-packages\multiprocess\popen_spawn_win32.py | popen_spawn_win32.py | Python | 4,353 | 0.95 | 0.165468 | 0.189189 | python-kit | 690 | 2023-11-18T05:17:36.722365 | MIT | false | af518d5bd1bcfe2563f1370631da81ad |
#\n# Module providing the `Process` class which emulates `threading.Thread`\n#\n# multiprocessing/process.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\n__all__ = ['BaseProcess', 'current_process', 'active_children',\n 'parent_process']\n\n#\n# Imports\n#\n\nimport os\nimport sys\nimport signal\nimport itertools\nimport threading\nfrom _weakrefset import WeakSet\n\n#\n#\n#\n\ntry:\n ORIGINAL_DIR = os.path.abspath(os.getcwd())\nexcept OSError:\n ORIGINAL_DIR = None\n\n#\n# Public functions\n#\n\ndef current_process():\n '''\n Return process object representing the current process\n '''\n return _current_process\n\ndef active_children():\n '''\n Return list of process objects corresponding to live child processes\n '''\n _cleanup()\n return list(_children)\n\n\ndef parent_process():\n '''\n Return process object representing the parent process\n '''\n return _parent_process\n\n#\n#\n#\n\ndef _cleanup():\n # check for processes which have finished\n for p in list(_children):\n if (child_popen := p._popen) and child_popen.poll() is not None:\n _children.discard(p)\n\n#\n# The `Process` class\n#\n\nclass BaseProcess(object):\n '''\n Process objects represent activity that is run in a separate process\n\n The class is analogous to `threading.Thread`\n '''\n def _Popen(self):\n raise NotImplementedError\n\n def __init__(self, group=None, target=None, name=None, args=(), kwargs={},\n *, daemon=None):\n assert group is None, 'group argument must be None for now'\n count = next(_process_counter)\n self._identity = _current_process._identity + (count,)\n self._config = _current_process._config.copy()\n self._parent_pid = os.getpid()\n self._parent_name = _current_process.name\n self._popen = None\n self._closed = False\n self._target = target\n self._args = tuple(args)\n self._kwargs = dict(kwargs)\n self._name = name or type(self).__name__ + '-' + \\n ':'.join(str(i) for i in self._identity)\n if daemon is not None:\n self.daemon = daemon\n _dangling.add(self)\n\n def _check_closed(self):\n if self._closed:\n raise ValueError("process object is closed")\n\n def run(self):\n '''\n Method to be run in sub-process; can be overridden in sub-class\n '''\n if self._target:\n self._target(*self._args, **self._kwargs)\n\n def start(self):\n '''\n Start child process\n '''\n self._check_closed()\n assert self._popen is None, 'cannot start a process twice'\n assert self._parent_pid == os.getpid(), \\n 'can only start a process object created by current process'\n assert not _current_process._config.get('daemon'), \\n 'daemonic processes are not allowed to have children'\n _cleanup()\n self._popen = self._Popen(self)\n self._sentinel = self._popen.sentinel\n # Avoid a refcycle if the target function holds an indirect\n # reference to the process object (see bpo-30775)\n del self._target, self._args, self._kwargs\n _children.add(self)\n\n def terminate(self):\n '''\n Terminate process; sends SIGTERM signal or uses TerminateProcess()\n '''\n self._check_closed()\n self._popen.terminate()\n\n def kill(self):\n '''\n Terminate process; sends SIGKILL signal or uses TerminateProcess()\n '''\n self._check_closed()\n self._popen.kill()\n\n def join(self, timeout=None):\n '''\n Wait until child process terminates\n '''\n self._check_closed()\n assert self._parent_pid == os.getpid(), 'can only join a child process'\n assert self._popen is not None, 'can only join a started process'\n res = self._popen.wait(timeout)\n if res is not None:\n _children.discard(self)\n\n def is_alive(self):\n '''\n Return whether process is alive\n '''\n self._check_closed()\n if self is _current_process:\n return True\n assert self._parent_pid == os.getpid(), 'can only test a child process'\n\n if self._popen is None:\n return False\n\n returncode = self._popen.poll()\n if returncode is None:\n return True\n else:\n _children.discard(self)\n return False\n\n def close(self):\n '''\n Close the Process object.\n\n This method releases resources held by the Process object. It is\n an error to call this method if the child process is still running.\n '''\n if self._popen is not None:\n if self._popen.poll() is None:\n raise ValueError("Cannot close a process while it is still running. "\n "You should first call join() or terminate().")\n self._popen.close()\n self._popen = None\n del self._sentinel\n _children.discard(self)\n self._closed = True\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n assert isinstance(name, str), 'name must be a string'\n self._name = name\n\n @property\n def daemon(self):\n '''\n Return whether process is a daemon\n '''\n return self._config.get('daemon', False)\n\n @daemon.setter\n def daemon(self, daemonic):\n '''\n Set whether process is a daemon\n '''\n assert self._popen is None, 'process has already started'\n self._config['daemon'] = daemonic\n\n @property\n def authkey(self):\n return self._config['authkey']\n\n @authkey.setter\n def authkey(self, authkey):\n '''\n Set authorization key of process\n '''\n self._config['authkey'] = AuthenticationString(authkey)\n\n @property\n def exitcode(self):\n '''\n Return exit code of process or `None` if it has yet to stop\n '''\n self._check_closed()\n if self._popen is None:\n return self._popen\n return self._popen.poll()\n\n @property\n def ident(self):\n '''\n Return identifier (PID) of process or `None` if it has yet to start\n '''\n self._check_closed()\n if self is _current_process:\n return os.getpid()\n else:\n return self._popen and self._popen.pid\n\n pid = ident\n\n @property\n def sentinel(self):\n '''\n Return a file descriptor (Unix) or handle (Windows) suitable for\n waiting for process termination.\n '''\n self._check_closed()\n try:\n return self._sentinel\n except AttributeError:\n raise ValueError("process not started") from None\n\n def __repr__(self):\n exitcode = None\n if self is _current_process:\n status = 'started'\n elif self._closed:\n status = 'closed'\n elif self._parent_pid != os.getpid():\n status = 'unknown'\n elif self._popen is None:\n status = 'initial'\n else:\n exitcode = self._popen.poll()\n if exitcode is not None:\n status = 'stopped'\n else:\n status = 'started'\n\n info = [type(self).__name__, 'name=%r' % self._name]\n if self._popen is not None:\n info.append('pid=%s' % self._popen.pid)\n info.append('parent=%s' % self._parent_pid)\n info.append(status)\n if exitcode is not None:\n exitcode = _exitcode_to_name.get(exitcode, exitcode)\n info.append('exitcode=%s' % exitcode)\n if self.daemon:\n info.append('daemon')\n return '<%s>' % ' '.join(info)\n\n ##\n\n def _bootstrap(self, parent_sentinel=None):\n from . import util, context\n global _current_process, _parent_process, _process_counter, _children\n\n try:\n if self._start_method is not None:\n context._force_start_method(self._start_method)\n _process_counter = itertools.count(1)\n _children = set()\n util._close_stdin()\n old_process = _current_process\n _current_process = self\n _parent_process = _ParentProcess(\n self._parent_name, self._parent_pid, parent_sentinel)\n if threading._HAVE_THREAD_NATIVE_ID:\n threading.main_thread()._set_native_id()\n try:\n self._after_fork()\n finally:\n # delay finalization of the old process object until after\n # _run_after_forkers() is executed\n del old_process\n util.info('child process calling self.run()')\n try:\n self.run()\n exitcode = 0\n finally:\n util._exit_function()\n except SystemExit as e:\n if e.code is None:\n exitcode = 0\n elif isinstance(e.code, int):\n exitcode = e.code\n else:\n sys.stderr.write(str(e.code) + '\n')\n exitcode = 1\n except:\n exitcode = 1\n import traceback\n sys.stderr.write('Process %s:\n' % self.name)\n traceback.print_exc()\n finally:\n threading._shutdown()\n util.info('process exiting with exitcode %d' % exitcode)\n util._flush_std_streams()\n\n return exitcode\n\n @staticmethod\n def _after_fork():\n from . import util\n util._finalizer_registry.clear()\n util._run_after_forkers()\n\n\n#\n# We subclass bytes to avoid accidental transmission of auth keys over network\n#\n\nclass AuthenticationString(bytes):\n def __reduce__(self):\n from .context import get_spawning_popen\n if get_spawning_popen() is None:\n raise TypeError(\n 'Pickling an AuthenticationString object is '\n 'disallowed for security reasons'\n )\n return AuthenticationString, (bytes(self),)\n\n\n#\n# Create object representing the parent process\n#\n\nclass _ParentProcess(BaseProcess):\n\n def __init__(self, name, pid, sentinel):\n self._identity = ()\n self._name = name\n self._pid = pid\n self._parent_pid = None\n self._popen = None\n self._closed = False\n self._sentinel = sentinel\n self._config = {}\n\n def is_alive(self):\n from multiprocess.connection import wait\n return not wait([self._sentinel], timeout=0)\n\n @property\n def ident(self):\n return self._pid\n\n def join(self, timeout=None):\n '''\n Wait until parent process terminates\n '''\n from multiprocess.connection import wait\n wait([self._sentinel], timeout=timeout)\n\n pid = ident\n\n#\n# Create object representing the main process\n#\n\nclass _MainProcess(BaseProcess):\n\n def __init__(self):\n self._identity = ()\n self._name = 'MainProcess'\n self._parent_pid = None\n self._popen = None\n self._closed = False\n self._config = {'authkey': AuthenticationString(os.urandom(32)),\n 'semprefix': '/mp'}\n # Note that some versions of FreeBSD only allow named\n # semaphores to have names of up to 14 characters. Therefore\n # we choose a short prefix.\n #\n # On MacOSX in a sandbox it may be necessary to use a\n # different prefix -- see #19478.\n #\n # Everything in self._config will be inherited by descendant\n # processes.\n\n def close(self):\n pass\n\n\n_parent_process = None\n_current_process = _MainProcess()\n_process_counter = itertools.count(1)\n_children = set()\ndel _MainProcess\n\n#\n# Give names to some return codes\n#\n\n_exitcode_to_name = {}\n\nfor name, signum in list(signal.__dict__.items()):\n if name[:3]=='SIG' and '_' not in name:\n _exitcode_to_name[-signum] = f'-{name}'\ndel name, signum\n\n# For debug and leak testing\n_dangling = WeakSet()\n | .venv\Lib\site-packages\multiprocess\process.py | process.py | Python | 12,133 | 0.95 | 0.186788 | 0.139037 | awesome-app | 34 | 2025-05-13T20:32:01.015193 | GPL-3.0 | false | e1191a8bee5edb4a60c62bd92205a25a |
#\n# Module implementing queues\n#\n# multiprocessing/queues.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\n__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']\n\nimport sys\nimport os\nimport threading\nimport collections\nimport time\nimport types\nimport weakref\nimport errno\n\nfrom queue import Empty, Full\n\ntry:\n import _multiprocess as _multiprocessing\nexcept ImportError:\n import _multiprocessing\n\nfrom . import connection\nfrom . import context\n_ForkingPickler = context.reduction.ForkingPickler\n\nfrom .util import debug, info, Finalize, register_after_fork, is_exiting\n\n#\n# Queue type using a pipe, buffer and thread\n#\n\nclass Queue(object):\n\n def __init__(self, maxsize=0, *, ctx):\n if maxsize <= 0:\n # Can raise ImportError (see issues #3770 and #23400)\n from .synchronize import SEM_VALUE_MAX as maxsize\n self._maxsize = maxsize\n self._reader, self._writer = connection.Pipe(duplex=False)\n self._rlock = ctx.Lock()\n self._opid = os.getpid()\n if sys.platform == 'win32':\n self._wlock = None\n else:\n self._wlock = ctx.Lock()\n self._sem = ctx.BoundedSemaphore(maxsize)\n # For use by concurrent.futures\n self._ignore_epipe = False\n self._reset()\n\n if sys.platform != 'win32':\n register_after_fork(self, Queue._after_fork)\n\n def __getstate__(self):\n context.assert_spawning(self)\n return (self._ignore_epipe, self._maxsize, self._reader, self._writer,\n self._rlock, self._wlock, self._sem, self._opid)\n\n def __setstate__(self, state):\n (self._ignore_epipe, self._maxsize, self._reader, self._writer,\n self._rlock, self._wlock, self._sem, self._opid) = state\n self._reset()\n\n def _after_fork(self):\n debug('Queue._after_fork()')\n self._reset(after_fork=True)\n\n def _reset(self, after_fork=False):\n if after_fork:\n self._notempty._at_fork_reinit()\n else:\n self._notempty = threading.Condition(threading.Lock())\n self._buffer = collections.deque()\n self._thread = None\n self._jointhread = None\n self._joincancelled = False\n self._closed = False\n self._close = None\n self._send_bytes = self._writer.send_bytes\n self._recv_bytes = self._reader.recv_bytes\n self._poll = self._reader.poll\n\n def put(self, obj, block=True, timeout=None):\n if self._closed:\n raise ValueError(f"Queue {self!r} is closed")\n if not self._sem.acquire(block, timeout):\n raise Full\n\n with self._notempty:\n if self._thread is None:\n self._start_thread()\n self._buffer.append(obj)\n self._notempty.notify()\n\n def get(self, block=True, timeout=None):\n if self._closed:\n raise ValueError(f"Queue {self!r} is closed")\n if block and timeout is None:\n with self._rlock:\n res = self._recv_bytes()\n self._sem.release()\n else:\n if block:\n deadline = getattr(time,'monotonic',time.time)() + timeout\n if not self._rlock.acquire(block, timeout):\n raise Empty\n try:\n if block:\n timeout = deadline - getattr(time,'monotonic',time.time)()\n if not self._poll(timeout):\n raise Empty\n elif not self._poll():\n raise Empty\n res = self._recv_bytes()\n self._sem.release()\n finally:\n self._rlock.release()\n # unserialize the data after having released the lock\n return _ForkingPickler.loads(res)\n\n def qsize(self):\n # Raises NotImplementedError on Mac OSX because of broken sem_getvalue()\n return self._maxsize - self._sem._semlock._get_value()\n\n def empty(self):\n return not self._poll()\n\n def full(self):\n return self._sem._semlock._is_zero()\n\n def get_nowait(self):\n return self.get(False)\n\n def put_nowait(self, obj):\n return self.put(obj, False)\n\n def close(self):\n self._closed = True\n close = self._close\n if close:\n self._close = None\n close()\n\n def join_thread(self):\n debug('Queue.join_thread()')\n assert self._closed, "Queue {0!r} not closed".format(self)\n if self._jointhread:\n self._jointhread()\n\n def cancel_join_thread(self):\n debug('Queue.cancel_join_thread()')\n self._joincancelled = True\n try:\n self._jointhread.cancel()\n except AttributeError:\n pass\n\n def _terminate_broken(self):\n # Close a Queue on error.\n\n # gh-94777: Prevent queue writing to a pipe which is no longer read.\n self._reader.close()\n\n self.close()\n self.join_thread()\n\n def _start_thread(self):\n debug('Queue._start_thread()')\n\n # Start thread which transfers data from buffer to pipe\n self._buffer.clear()\n self._thread = threading.Thread(\n target=Queue._feed,\n args=(self._buffer, self._notempty, self._send_bytes,\n self._wlock, self._reader.close, self._writer.close,\n self._ignore_epipe, self._on_queue_feeder_error,\n self._sem),\n name='QueueFeederThread',\n daemon=True,\n )\n\n try:\n debug('doing self._thread.start()')\n self._thread.start()\n debug('... done self._thread.start()')\n except:\n # gh-109047: During Python finalization, creating a thread\n # can fail with RuntimeError.\n self._thread = None\n raise\n\n if not self._joincancelled:\n self._jointhread = Finalize(\n self._thread, Queue._finalize_join,\n [weakref.ref(self._thread)],\n exitpriority=-5\n )\n\n # Send sentinel to the thread queue object when garbage collected\n self._close = Finalize(\n self, Queue._finalize_close,\n [self._buffer, self._notempty],\n exitpriority=10\n )\n\n @staticmethod\n def _finalize_join(twr):\n debug('joining queue thread')\n thread = twr()\n if thread is not None:\n thread.join()\n debug('... queue thread joined')\n else:\n debug('... queue thread already dead')\n\n @staticmethod\n def _finalize_close(buffer, notempty):\n debug('telling queue thread to quit')\n with notempty:\n buffer.append(_sentinel)\n notempty.notify()\n\n @staticmethod\n def _feed(buffer, notempty, send_bytes, writelock, reader_close,\n writer_close, ignore_epipe, onerror, queue_sem):\n debug('starting thread to feed data to pipe')\n nacquire = notempty.acquire\n nrelease = notempty.release\n nwait = notempty.wait\n bpopleft = buffer.popleft\n sentinel = _sentinel\n if sys.platform != 'win32':\n wacquire = writelock.acquire\n wrelease = writelock.release\n else:\n wacquire = None\n\n while 1:\n try:\n nacquire()\n try:\n if not buffer:\n nwait()\n finally:\n nrelease()\n try:\n while 1:\n obj = bpopleft()\n if obj is sentinel:\n debug('feeder thread got sentinel -- exiting')\n reader_close()\n writer_close()\n return\n\n # serialize the data before acquiring the lock\n obj = _ForkingPickler.dumps(obj)\n if wacquire is None:\n send_bytes(obj)\n else:\n wacquire()\n try:\n send_bytes(obj)\n finally:\n wrelease()\n except IndexError:\n pass\n except Exception as e:\n if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:\n return\n # Since this runs in a daemon thread the resources it uses\n # may be become unusable while the process is cleaning up.\n # We ignore errors which happen after the process has\n # started to cleanup.\n if is_exiting():\n info('error in queue thread: %s', e)\n return\n else:\n # Since the object has not been sent in the queue, we need\n # to decrease the size of the queue. The error acts as\n # if the object had been silently removed from the queue\n # and this step is necessary to have a properly working\n # queue.\n queue_sem.release()\n onerror(e, obj)\n\n @staticmethod\n def _on_queue_feeder_error(e, obj):\n """\n Private API hook called when feeding data in the background thread\n raises an exception. For overriding by concurrent.futures.\n """\n import traceback\n traceback.print_exc()\n\n __class_getitem__ = classmethod(types.GenericAlias)\n\n\n_sentinel = object()\n\n#\n# A queue type which also supports join() and task_done() methods\n#\n# Note that if you do not call task_done() for each finished task then\n# eventually the counter's semaphore may overflow causing Bad Things\n# to happen.\n#\n\nclass JoinableQueue(Queue):\n\n def __init__(self, maxsize=0, *, ctx):\n Queue.__init__(self, maxsize, ctx=ctx)\n self._unfinished_tasks = ctx.Semaphore(0)\n self._cond = ctx.Condition()\n\n def __getstate__(self):\n return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)\n\n def __setstate__(self, state):\n Queue.__setstate__(self, state[:-2])\n self._cond, self._unfinished_tasks = state[-2:]\n\n def put(self, obj, block=True, timeout=None):\n if self._closed:\n raise ValueError(f"Queue {self!r} is closed")\n if not self._sem.acquire(block, timeout):\n raise Full\n\n with self._notempty, self._cond:\n if self._thread is None:\n self._start_thread()\n self._buffer.append(obj)\n self._unfinished_tasks.release()\n self._notempty.notify()\n\n def task_done(self):\n with self._cond:\n if not self._unfinished_tasks.acquire(False):\n raise ValueError('task_done() called too many times')\n if self._unfinished_tasks._semlock._is_zero():\n self._cond.notify_all()\n\n def join(self):\n with self._cond:\n if not self._unfinished_tasks._semlock._is_zero():\n self._cond.wait()\n\n#\n# Simplified Queue type -- really just a locked pipe\n#\n\nclass SimpleQueue(object):\n\n def __init__(self, *, ctx):\n self._reader, self._writer = connection.Pipe(duplex=False)\n self._rlock = ctx.Lock()\n self._poll = self._reader.poll\n if sys.platform == 'win32':\n self._wlock = None\n else:\n self._wlock = ctx.Lock()\n\n def close(self):\n self._reader.close()\n self._writer.close()\n\n def empty(self):\n return not self._poll()\n\n def __getstate__(self):\n context.assert_spawning(self)\n return (self._reader, self._writer, self._rlock, self._wlock)\n\n def __setstate__(self, state):\n (self._reader, self._writer, self._rlock, self._wlock) = state\n self._poll = self._reader.poll\n\n def get(self):\n with self._rlock:\n res = self._reader.recv_bytes()\n # unserialize the data after having released the lock\n return _ForkingPickler.loads(res)\n\n def put(self, obj):\n # serialize the data before acquiring the lock\n obj = _ForkingPickler.dumps(obj)\n if self._wlock is None:\n # writes to a message oriented win32 pipe are atomic\n self._writer.send_bytes(obj)\n else:\n with self._wlock:\n self._writer.send_bytes(obj)\n\n __class_getitem__ = classmethod(types.GenericAlias)\n | .venv\Lib\site-packages\multiprocess\queues.py | queues.py | Python | 12,615 | 0.95 | 0.205514 | 0.130178 | python-kit | 437 | 2025-02-09T15:36:24.118225 | BSD-3-Clause | false | 4a1340efef784d71953ae8efab4a9f43 |
#\n# Module which deals with pickling of objects.\n#\n# multiprocessing/reduction.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\nfrom abc import ABCMeta\nimport copyreg\nimport functools\nimport io\nimport os\ntry:\n import dill as pickle\nexcept ImportError:\n import pickle\nimport socket\nimport sys\n\nfrom . import context\n\n__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']\n\n\nHAVE_SEND_HANDLE = (sys.platform == 'win32' or\n (hasattr(socket, 'CMSG_LEN') and\n hasattr(socket, 'SCM_RIGHTS') and\n hasattr(socket.socket, 'sendmsg')))\n\n#\n# Pickler subclass\n#\n\nclass ForkingPickler(pickle.Pickler):\n '''Pickler subclass used by multiprocess.'''\n _extra_reducers = {}\n _copyreg_dispatch_table = copyreg.dispatch_table\n\n def __init__(self, *args, **kwds):\n super().__init__(*args, **kwds)\n self.dispatch_table = self._copyreg_dispatch_table.copy()\n self.dispatch_table.update(self._extra_reducers)\n\n @classmethod\n def register(cls, type, reduce):\n '''Register a reduce function for a type.'''\n cls._extra_reducers[type] = reduce\n\n @classmethod\n def dumps(cls, obj, protocol=None, *args, **kwds):\n buf = io.BytesIO()\n cls(buf, protocol, *args, **kwds).dump(obj)\n return buf.getbuffer()\n\n loads = pickle.loads\n\nregister = ForkingPickler.register\n\ndef dump(obj, file, protocol=None, *args, **kwds):\n '''Replacement for pickle.dump() using ForkingPickler.'''\n ForkingPickler(file, protocol, *args, **kwds).dump(obj)\n\n#\n# Platform specific definitions\n#\n\nif sys.platform == 'win32':\n # Windows\n __all__ += ['DupHandle', 'duplicate', 'steal_handle']\n import _winapi\n\n def duplicate(handle, target_process=None, inheritable=False,\n *, source_process=None):\n '''Duplicate a handle. (target_process is a handle not a pid!)'''\n current_process = _winapi.GetCurrentProcess()\n if source_process is None:\n source_process = current_process\n if target_process is None:\n target_process = current_process\n return _winapi.DuplicateHandle(\n source_process, handle, target_process,\n 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)\n\n def steal_handle(source_pid, handle):\n '''Steal a handle from process identified by source_pid.'''\n source_process_handle = _winapi.OpenProcess(\n _winapi.PROCESS_DUP_HANDLE, False, source_pid)\n try:\n return _winapi.DuplicateHandle(\n source_process_handle, handle,\n _winapi.GetCurrentProcess(), 0, False,\n _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)\n finally:\n _winapi.CloseHandle(source_process_handle)\n\n def send_handle(conn, handle, destination_pid):\n '''Send a handle over a local connection.'''\n dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)\n conn.send(dh)\n\n def recv_handle(conn):\n '''Receive a handle over a local connection.'''\n return conn.recv().detach()\n\n class DupHandle(object):\n '''Picklable wrapper for a handle.'''\n def __init__(self, handle, access, pid=None):\n if pid is None:\n # We just duplicate the handle in the current process and\n # let the receiving process steal the handle.\n pid = os.getpid()\n proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)\n try:\n self._handle = _winapi.DuplicateHandle(\n _winapi.GetCurrentProcess(),\n handle, proc, access, False, 0)\n finally:\n _winapi.CloseHandle(proc)\n self._access = access\n self._pid = pid\n\n def detach(self):\n '''Get the handle. This should only be called once.'''\n # retrieve handle from process which currently owns it\n if self._pid == os.getpid():\n # The handle has already been duplicated for this process.\n return self._handle\n # We must steal the handle from the process whose pid is self._pid.\n proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,\n self._pid)\n try:\n return _winapi.DuplicateHandle(\n proc, self._handle, _winapi.GetCurrentProcess(),\n self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)\n finally:\n _winapi.CloseHandle(proc)\n\nelse:\n # Unix\n __all__ += ['DupFd', 'sendfds', 'recvfds']\n import array\n\n # On MacOSX we should acknowledge receipt of fds -- see Issue14669\n ACKNOWLEDGE = sys.platform == 'darwin'\n\n def sendfds(sock, fds):\n '''Send an array of fds over an AF_UNIX socket.'''\n fds = array.array('i', fds)\n msg = bytes([len(fds) % 256])\n sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])\n if ACKNOWLEDGE and sock.recv(1) != b'A':\n raise RuntimeError('did not receive acknowledgement of fd')\n\n def recvfds(sock, size):\n '''Receive an array of fds over an AF_UNIX socket.'''\n a = array.array('i')\n bytes_size = a.itemsize * size\n msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size))\n if not msg and not ancdata:\n raise EOFError\n try:\n if ACKNOWLEDGE:\n sock.send(b'A')\n if len(ancdata) != 1:\n raise RuntimeError('received %d items of ancdata' %\n len(ancdata))\n cmsg_level, cmsg_type, cmsg_data = ancdata[0]\n if (cmsg_level == socket.SOL_SOCKET and\n cmsg_type == socket.SCM_RIGHTS):\n if len(cmsg_data) % a.itemsize != 0:\n raise ValueError\n a.frombytes(cmsg_data)\n if len(a) % 256 != msg[0]:\n raise AssertionError(\n "Len is {0:n} but msg[0] is {1!r}".format(\n len(a), msg[0]))\n return list(a)\n except (ValueError, IndexError):\n pass\n raise RuntimeError('Invalid data received')\n\n def send_handle(conn, handle, destination_pid):\n '''Send a handle over a local connection.'''\n with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:\n sendfds(s, [handle])\n\n def recv_handle(conn):\n '''Receive a handle over a local connection.'''\n with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:\n return recvfds(s, 1)[0]\n\n def DupFd(fd):\n '''Return a wrapper for an fd.'''\n popen_obj = context.get_spawning_popen()\n if popen_obj is not None:\n return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))\n elif HAVE_SEND_HANDLE:\n from . import resource_sharer\n return resource_sharer.DupFd(fd)\n else:\n raise ValueError('SCM_RIGHTS appears not to be available')\n\n#\n# Try making some callable types picklable\n#\n\ndef _reduce_method(m):\n if m.__self__ is None:\n return getattr, (m.__class__, m.__func__.__name__)\n else:\n return getattr, (m.__self__, m.__func__.__name__)\nclass _C:\n def f(self):\n pass\nregister(type(_C().f), _reduce_method)\n\n\ndef _reduce_method_descriptor(m):\n return getattr, (m.__objclass__, m.__name__)\nregister(type(list.append), _reduce_method_descriptor)\nregister(type(int.__add__), _reduce_method_descriptor)\n\n\ndef _reduce_partial(p):\n return _rebuild_partial, (p.func, p.args, p.keywords or {})\ndef _rebuild_partial(func, args, keywords):\n return functools.partial(func, *args, **keywords)\nregister(functools.partial, _reduce_partial)\n\n#\n# Make sockets picklable\n#\n\nif sys.platform == 'win32':\n def _reduce_socket(s):\n from .resource_sharer import DupSocket\n return _rebuild_socket, (DupSocket(s),)\n def _rebuild_socket(ds):\n return ds.detach()\n register(socket.socket, _reduce_socket)\n\nelse:\n def _reduce_socket(s):\n df = DupFd(s.fileno())\n return _rebuild_socket, (df, s.family, s.type, s.proto)\n def _rebuild_socket(df, family, type, proto):\n fd = df.detach()\n return socket.socket(family, type, proto, fileno=fd)\n register(socket.socket, _reduce_socket)\n\n\nclass AbstractReducer(metaclass=ABCMeta):\n '''Abstract base class for use in implementing a Reduction class\n suitable for use in replacing the standard reduction mechanism\n used in multiprocess.'''\n ForkingPickler = ForkingPickler\n register = register\n dump = dump\n send_handle = send_handle\n recv_handle = recv_handle\n\n if sys.platform == 'win32':\n steal_handle = steal_handle\n duplicate = duplicate\n DupHandle = DupHandle\n else:\n sendfds = sendfds\n recvfds = recvfds\n DupFd = DupFd\n\n _reduce_method = _reduce_method\n _reduce_method_descriptor = _reduce_method_descriptor\n _rebuild_partial = _rebuild_partial\n _reduce_socket = _reduce_socket\n _rebuild_socket = _rebuild_socket\n\n def __init__(self, *args):\n register(type(_C().f), _reduce_method)\n register(type(list.append), _reduce_method_descriptor)\n register(type(int.__add__), _reduce_method_descriptor)\n register(functools.partial, _reduce_partial)\n register(socket.socket, _reduce_socket)\n | .venv\Lib\site-packages\multiprocess\reduction.py | reduction.py | Python | 9,637 | 0.95 | 0.211268 | 0.119835 | awesome-app | 980 | 2025-04-29T06:42:42.380286 | MIT | false | 2fc913b1bd96ec777d929fc0024fd234 |
#\n# We use a background thread for sharing fds on Unix, and for sharing sockets on\n# Windows.\n#\n# A client which wants to pickle a resource registers it with the resource\n# sharer and gets an identifier in return. The unpickling process will connect\n# to the resource sharer, sends the identifier and its pid, and then receives\n# the resource.\n#\n\nimport os\nimport signal\nimport socket\nimport sys\nimport threading\n\nfrom . import process\nfrom .context import reduction\nfrom . import util\n\n__all__ = ['stop']\n\n\nif sys.platform == 'win32':\n __all__ += ['DupSocket']\n\n class DupSocket(object):\n '''Picklable wrapper for a socket.'''\n def __init__(self, sock):\n new_sock = sock.dup()\n def send(conn, pid):\n share = new_sock.share(pid)\n conn.send_bytes(share)\n self._id = _resource_sharer.register(send, new_sock.close)\n\n def detach(self):\n '''Get the socket. This should only be called once.'''\n with _resource_sharer.get_connection(self._id) as conn:\n share = conn.recv_bytes()\n return socket.fromshare(share)\n\nelse:\n __all__ += ['DupFd']\n\n class DupFd(object):\n '''Wrapper for fd which can be used at any time.'''\n def __init__(self, fd):\n new_fd = os.dup(fd)\n def send(conn, pid):\n reduction.send_handle(conn, new_fd, pid)\n def close():\n os.close(new_fd)\n self._id = _resource_sharer.register(send, close)\n\n def detach(self):\n '''Get the fd. This should only be called once.'''\n with _resource_sharer.get_connection(self._id) as conn:\n return reduction.recv_handle(conn)\n\n\nclass _ResourceSharer(object):\n '''Manager for resources using background thread.'''\n def __init__(self):\n self._key = 0\n self._cache = {}\n self._lock = threading.Lock()\n self._listener = None\n self._address = None\n self._thread = None\n util.register_after_fork(self, _ResourceSharer._afterfork)\n\n def register(self, send, close):\n '''Register resource, returning an identifier.'''\n with self._lock:\n if self._address is None:\n self._start()\n self._key += 1\n self._cache[self._key] = (send, close)\n return (self._address, self._key)\n\n @staticmethod\n def get_connection(ident):\n '''Return connection from which to receive identified resource.'''\n from .connection import Client\n address, key = ident\n c = Client(address, authkey=process.current_process().authkey)\n c.send((key, os.getpid()))\n return c\n\n def stop(self, timeout=None):\n '''Stop the background thread and clear registered resources.'''\n from .connection import Client\n with self._lock:\n if self._address is not None:\n c = Client(self._address,\n authkey=process.current_process().authkey)\n c.send(None)\n c.close()\n self._thread.join(timeout)\n if self._thread.is_alive():\n util.sub_warning('_ResourceSharer thread did '\n 'not stop when asked')\n self._listener.close()\n self._thread = None\n self._address = None\n self._listener = None\n for key, (send, close) in self._cache.items():\n close()\n self._cache.clear()\n\n def _afterfork(self):\n for key, (send, close) in self._cache.items():\n close()\n self._cache.clear()\n self._lock._at_fork_reinit()\n if self._listener is not None:\n self._listener.close()\n self._listener = None\n self._address = None\n self._thread = None\n\n def _start(self):\n from .connection import Listener\n assert self._listener is None, "Already have Listener"\n util.debug('starting listener and thread for sending handles')\n self._listener = Listener(authkey=process.current_process().authkey)\n self._address = self._listener.address\n t = threading.Thread(target=self._serve)\n t.daemon = True\n t.start()\n self._thread = t\n\n def _serve(self):\n if hasattr(signal, 'pthread_sigmask'):\n signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())\n while 1:\n try:\n with self._listener.accept() as conn:\n msg = conn.recv()\n if msg is None:\n break\n key, destination_pid = msg\n send, close = self._cache.pop(key)\n try:\n send(conn, destination_pid)\n finally:\n close()\n except:\n if not util.is_exiting():\n sys.excepthook(*sys.exc_info())\n\n\n_resource_sharer = _ResourceSharer()\nstop = _resource_sharer.stop\n | .venv\Lib\site-packages\multiprocess\resource_sharer.py | resource_sharer.py | Python | 5,132 | 0.95 | 0.233766 | 0.067164 | vue-tools | 172 | 2023-09-15T11:07:47.415112 | MIT | false | 4dfb928bcd3c025b515a241999545653 |
###############################################################################\n# Server process to keep track of unlinked resources (like shared memory\n# segments, semaphores etc.) and clean them.\n#\n# On Unix we run a server process which keeps track of unlinked\n# resources. The server ignores SIGINT and SIGTERM and reads from a\n# pipe. Every other process of the program has a copy of the writable\n# end of the pipe, so we get EOF when all other processes have exited.\n# Then the server process unlinks any remaining resource names.\n#\n# This is important because there may be system limits for such resources: for\n# instance, the system only supports a limited number of named semaphores, and\n# shared-memory segments live in the RAM. If a python process leaks such a\n# resource, this resource will not be removed till the next reboot. Without\n# this resource tracker process, "killall python" would probably leave unlinked\n# resources.\n\nimport os\nimport signal\nimport sys\nimport threading\nimport warnings\n\nfrom . import spawn\nfrom . import util\n\n__all__ = ['ensure_running', 'register', 'unregister']\n\n_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')\n_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)\n\n_CLEANUP_FUNCS = {\n 'noop': lambda: None,\n}\n\nif os.name == 'posix':\n try:\n import _multiprocess as _multiprocessing\n except ImportError:\n import _multiprocessing\n import _posixshmem\n\n # Use sem_unlink() to clean up named semaphores.\n #\n # sem_unlink() may be missing if the Python build process detected the\n # absence of POSIX named semaphores. In that case, no named semaphores were\n # ever opened, so no cleanup would be necessary.\n if hasattr(_multiprocessing, 'sem_unlink'):\n _CLEANUP_FUNCS.update({\n 'semaphore': _multiprocessing.sem_unlink,\n })\n _CLEANUP_FUNCS.update({\n 'shared_memory': _posixshmem.shm_unlink,\n })\n\n\nclass ReentrantCallError(RuntimeError):\n pass\n\n\nclass ResourceTracker(object):\n\n def __init__(self):\n self._lock = threading.RLock()\n self._fd = None\n self._pid = None\n\n def _reentrant_call_error(self):\n # gh-109629: this happens if an explicit call to the ResourceTracker\n # gets interrupted by a garbage collection, invoking a finalizer (*)\n # that itself calls back into ResourceTracker.\n # (*) for example the SemLock finalizer\n raise ReentrantCallError(\n "Reentrant call into the multiprocessing resource tracker")\n\n def _stop(self):\n with self._lock:\n # This should not happen (_stop() isn't called by a finalizer)\n # but we check for it anyway.\n if getattr(self._lock, "_recursion_count", int)() > 1:\n return self._reentrant_call_error()\n if self._fd is None:\n # not running\n return\n\n # closing the "alive" file descriptor stops main()\n os.close(self._fd)\n self._fd = None\n\n os.waitpid(self._pid, 0)\n self._pid = None\n\n def getfd(self):\n self.ensure_running()\n return self._fd\n\n def ensure_running(self):\n '''Make sure that resource tracker process is running.\n\n This can be run from any process. Usually a child process will use\n the resource created by its parent.'''\n with self._lock:\n if getattr(self._lock, "_recursion_count", int)() > 1:\n # The code below is certainly not reentrant-safe, so bail out\n return self._reentrant_call_error()\n if self._fd is not None:\n # resource tracker was launched before, is it still running?\n if self._check_alive():\n # => still alive\n return\n # => dead, launch it again\n os.close(self._fd)\n\n # Clean-up to avoid dangling processes.\n try:\n # _pid can be None if this process is a child from another\n # python process, which has started the resource_tracker.\n if self._pid is not None:\n os.waitpid(self._pid, 0)\n except ChildProcessError:\n # The resource_tracker has already been terminated.\n pass\n self._fd = None\n self._pid = None\n\n warnings.warn('resource_tracker: process died unexpectedly, '\n 'relaunching. Some resources might leak.')\n\n fds_to_pass = []\n try:\n fds_to_pass.append(sys.stderr.fileno())\n except Exception:\n pass\n cmd = 'from multiprocess.resource_tracker import main;main(%d)'\n r, w = os.pipe()\n try:\n fds_to_pass.append(r)\n # process will out live us, so no need to wait on pid\n exe = spawn.get_executable()\n args = [exe] + util._args_from_interpreter_flags()\n args += ['-c', cmd % r]\n # bpo-33613: Register a signal mask that will block the signals.\n # This signal mask will be inherited by the child that is going\n # to be spawned and will protect the child from a race condition\n # that can make the child die before it registers signal handlers\n # for SIGINT and SIGTERM. The mask is unregistered after spawning\n # the child.\n try:\n if _HAVE_SIGMASK:\n signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)\n pid = util.spawnv_passfds(exe, args, fds_to_pass)\n finally:\n if _HAVE_SIGMASK:\n signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)\n except:\n os.close(w)\n raise\n else:\n self._fd = w\n self._pid = pid\n finally:\n os.close(r)\n\n def _check_alive(self):\n '''Check that the pipe has not been closed by sending a probe.'''\n try:\n # We cannot use send here as it calls ensure_running, creating\n # a cycle.\n os.write(self._fd, b'PROBE:0:noop\n')\n except OSError:\n return False\n else:\n return True\n\n def register(self, name, rtype):\n '''Register name of resource with resource tracker.'''\n self._send('REGISTER', name, rtype)\n\n def unregister(self, name, rtype):\n '''Unregister name of resource with resource tracker.'''\n self._send('UNREGISTER', name, rtype)\n\n def _send(self, cmd, name, rtype):\n try:\n self.ensure_running()\n except ReentrantCallError:\n # The code below might or might not work, depending on whether\n # the resource tracker was already running and still alive.\n # Better warn the user.\n # (XXX is warnings.warn itself reentrant-safe? :-)\n warnings.warn(\n f"ResourceTracker called reentrantly for resource cleanup, "\n f"which is unsupported. "\n f"The {rtype} object {name!r} might leak.")\n msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')\n if len(msg) > 512:\n # posix guarantees that writes to a pipe of less than PIPE_BUF\n # bytes are atomic, and that PIPE_BUF >= 512\n raise ValueError('msg too long')\n nbytes = os.write(self._fd, msg)\n assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format(\n nbytes, len(msg))\n\n\n_resource_tracker = ResourceTracker()\nensure_running = _resource_tracker.ensure_running\nregister = _resource_tracker.register\nunregister = _resource_tracker.unregister\ngetfd = _resource_tracker.getfd\n\n\ndef main(fd):\n '''Run resource tracker.'''\n # protect the process from ^C and "killall python" etc\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n signal.signal(signal.SIGTERM, signal.SIG_IGN)\n if _HAVE_SIGMASK:\n signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)\n\n for f in (sys.stdin, sys.stdout):\n try:\n f.close()\n except Exception:\n pass\n\n cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()}\n try:\n # keep track of registered/unregistered resources\n with open(fd, 'rb') as f:\n for line in f:\n try:\n cmd, name, rtype = line.strip().decode('ascii').split(':')\n cleanup_func = _CLEANUP_FUNCS.get(rtype, None)\n if cleanup_func is None:\n raise ValueError(\n f'Cannot register {name} for automatic cleanup: '\n f'unknown resource type {rtype}')\n\n if cmd == 'REGISTER':\n cache[rtype].add(name)\n elif cmd == 'UNREGISTER':\n cache[rtype].remove(name)\n elif cmd == 'PROBE':\n pass\n else:\n raise RuntimeError('unrecognized command %r' % cmd)\n except Exception:\n try:\n sys.excepthook(*sys.exc_info())\n except:\n pass\n finally:\n # all processes have terminated; cleanup any remaining resources\n for rtype, rtype_cache in cache.items():\n if rtype_cache:\n try:\n warnings.warn('resource_tracker: There appear to be %d '\n 'leaked %s objects to clean up at shutdown' %\n (len(rtype_cache), rtype))\n except Exception:\n pass\n for name in rtype_cache:\n # For some reason the process which created and registered this\n # resource has failed to unregister it. Presumably it has\n # died. We therefore unlink it.\n try:\n try:\n _CLEANUP_FUNCS[rtype](name)\n except Exception as e:\n warnings.warn('resource_tracker: %r: %s' % (name, e))\n finally:\n pass\n | .venv\Lib\site-packages\multiprocess\resource_tracker.py | resource_tracker.py | Python | 10,449 | 0.95 | 0.205882 | 0.242678 | awesome-app | 894 | 2024-04-23T17:24:09.640486 | BSD-3-Clause | false | 5ec27cc426ce5b5581280c203d224818 |
#\n# Module which supports allocation of ctypes objects from shared memory\n#\n# multiprocessing/sharedctypes.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\nimport ctypes\nimport weakref\n\nfrom . import heap\nfrom . import get_context\n\nfrom .context import reduction, assert_spawning\n_ForkingPickler = reduction.ForkingPickler\n\n__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']\n\n#\n#\n#\n\ntypecode_to_type = {\n 'c': ctypes.c_char, 'u': ctypes.c_wchar,\n 'b': ctypes.c_byte, 'B': ctypes.c_ubyte,\n 'h': ctypes.c_short, 'H': ctypes.c_ushort,\n 'i': ctypes.c_int, 'I': ctypes.c_uint,\n 'l': ctypes.c_long, 'L': ctypes.c_ulong,\n 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong,\n 'f': ctypes.c_float, 'd': ctypes.c_double\n }\n\n#\n#\n#\n\ndef _new_value(type_):\n size = ctypes.sizeof(type_)\n wrapper = heap.BufferWrapper(size)\n return rebuild_ctype(type_, wrapper, None)\n\ndef RawValue(typecode_or_type, *args):\n '''\n Returns a ctypes object allocated from shared memory\n '''\n type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)\n obj = _new_value(type_)\n ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))\n obj.__init__(*args)\n return obj\n\ndef RawArray(typecode_or_type, size_or_initializer):\n '''\n Returns a ctypes array allocated from shared memory\n '''\n type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)\n if isinstance(size_or_initializer, int):\n type_ = type_ * size_or_initializer\n obj = _new_value(type_)\n ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))\n return obj\n else:\n type_ = type_ * len(size_or_initializer)\n result = _new_value(type_)\n result.__init__(*size_or_initializer)\n return result\n\ndef Value(typecode_or_type, *args, lock=True, ctx=None):\n '''\n Return a synchronization wrapper for a Value\n '''\n obj = RawValue(typecode_or_type, *args)\n if lock is False:\n return obj\n if lock in (True, None):\n ctx = ctx or get_context()\n lock = ctx.RLock()\n if not hasattr(lock, 'acquire'):\n raise AttributeError("%r has no method 'acquire'" % lock)\n return synchronized(obj, lock, ctx=ctx)\n\ndef Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None):\n '''\n Return a synchronization wrapper for a RawArray\n '''\n obj = RawArray(typecode_or_type, size_or_initializer)\n if lock is False:\n return obj\n if lock in (True, None):\n ctx = ctx or get_context()\n lock = ctx.RLock()\n if not hasattr(lock, 'acquire'):\n raise AttributeError("%r has no method 'acquire'" % lock)\n return synchronized(obj, lock, ctx=ctx)\n\ndef copy(obj):\n new_obj = _new_value(type(obj))\n ctypes.pointer(new_obj)[0] = obj\n return new_obj\n\ndef synchronized(obj, lock=None, ctx=None):\n assert not isinstance(obj, SynchronizedBase), 'object already synchronized'\n ctx = ctx or get_context()\n\n if isinstance(obj, ctypes._SimpleCData):\n return Synchronized(obj, lock, ctx)\n elif isinstance(obj, ctypes.Array):\n if obj._type_ is ctypes.c_char:\n return SynchronizedString(obj, lock, ctx)\n return SynchronizedArray(obj, lock, ctx)\n else:\n cls = type(obj)\n try:\n scls = class_cache[cls]\n except KeyError:\n names = [field[0] for field in cls._fields_]\n d = {name: make_property(name) for name in names}\n classname = 'Synchronized' + cls.__name__\n scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)\n return scls(obj, lock, ctx)\n\n#\n# Functions for pickling/unpickling\n#\n\ndef reduce_ctype(obj):\n assert_spawning(obj)\n if isinstance(obj, ctypes.Array):\n return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)\n else:\n return rebuild_ctype, (type(obj), obj._wrapper, None)\n\ndef rebuild_ctype(type_, wrapper, length):\n if length is not None:\n type_ = type_ * length\n _ForkingPickler.register(type_, reduce_ctype)\n buf = wrapper.create_memoryview()\n obj = type_.from_buffer(buf)\n obj._wrapper = wrapper\n return obj\n\n#\n# Function to create properties\n#\n\ndef make_property(name):\n try:\n return prop_cache[name]\n except KeyError:\n d = {}\n exec(template % ((name,)*7), d)\n prop_cache[name] = d[name]\n return d[name]\n\ntemplate = '''\ndef get%s(self):\n self.acquire()\n try:\n return self._obj.%s\n finally:\n self.release()\ndef set%s(self, value):\n self.acquire()\n try:\n self._obj.%s = value\n finally:\n self.release()\n%s = property(get%s, set%s)\n'''\n\nprop_cache = {}\nclass_cache = weakref.WeakKeyDictionary()\n\n#\n# Synchronized wrappers\n#\n\nclass SynchronizedBase(object):\n\n def __init__(self, obj, lock=None, ctx=None):\n self._obj = obj\n if lock:\n self._lock = lock\n else:\n ctx = ctx or get_context(force=True)\n self._lock = ctx.RLock()\n self.acquire = self._lock.acquire\n self.release = self._lock.release\n\n def __enter__(self):\n return self._lock.__enter__()\n\n def __exit__(self, *args):\n return self._lock.__exit__(*args)\n\n def __reduce__(self):\n assert_spawning(self)\n return synchronized, (self._obj, self._lock)\n\n def get_obj(self):\n return self._obj\n\n def get_lock(self):\n return self._lock\n\n def __repr__(self):\n return '<%s wrapper for %s>' % (type(self).__name__, self._obj)\n\n\nclass Synchronized(SynchronizedBase):\n value = make_property('value')\n\n\nclass SynchronizedArray(SynchronizedBase):\n\n def __len__(self):\n return len(self._obj)\n\n def __getitem__(self, i):\n with self:\n return self._obj[i]\n\n def __setitem__(self, i, value):\n with self:\n self._obj[i] = value\n\n def __getslice__(self, start, stop):\n with self:\n return self._obj[start:stop]\n\n def __setslice__(self, start, stop, values):\n with self:\n self._obj[start:stop] = values\n\n\nclass SynchronizedString(SynchronizedArray):\n value = make_property('value')\n raw = make_property('raw')\n | .venv\Lib\site-packages\multiprocess\sharedctypes.py | sharedctypes.py | Python | 6,306 | 0.95 | 0.208333 | 0.116162 | node-utils | 199 | 2024-07-26T16:33:59.986884 | GPL-3.0 | false | 12da49a7cd8a8a411bd4e270e8af14a2 |
"""Provides shared memory for direct access across processes.\n\nThe API of this package is currently provisional. Refer to the\ndocumentation for details.\n"""\n\n\n__all__ = [ 'SharedMemory', 'ShareableList' ]\n\n\nfrom functools import partial\nimport mmap\nimport os\nimport errno\nimport struct\nimport secrets\nimport types\n\nif os.name == "nt":\n import _winapi\n _USE_POSIX = False\nelse:\n import _posixshmem\n _USE_POSIX = True\n\nfrom . import resource_tracker\n\n_O_CREX = os.O_CREAT | os.O_EXCL\n\n# FreeBSD (and perhaps other BSDs) limit names to 14 characters.\n_SHM_SAFE_NAME_LENGTH = 14\n\n# Shared memory block name prefix\nif _USE_POSIX:\n _SHM_NAME_PREFIX = '/psm_'\nelse:\n _SHM_NAME_PREFIX = 'wnsm_'\n\n\ndef _make_filename():\n "Create a random filename for the shared memory object."\n # number of random bytes to use for name\n nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2\n assert nbytes >= 2, '_SHM_NAME_PREFIX too long'\n name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes)\n assert len(name) <= _SHM_SAFE_NAME_LENGTH\n return name\n\n\nclass SharedMemory:\n """Creates a new shared memory block or attaches to an existing\n shared memory block.\n\n Every shared memory block is assigned a unique name. This enables\n one process to create a shared memory block with a particular name\n so that a different process can attach to that same shared memory\n block using that same name.\n\n As a resource for sharing data across processes, shared memory blocks\n may outlive the original process that created them. When one process\n no longer needs access to a shared memory block that might still be\n needed by other processes, the close() method should be called.\n When a shared memory block is no longer needed by any process, the\n unlink() method should be called to ensure proper cleanup."""\n\n # Defaults; enables close() and unlink() to run without errors.\n _name = None\n _fd = -1\n _mmap = None\n _buf = None\n _flags = os.O_RDWR\n _mode = 0o600\n _prepend_leading_slash = True if _USE_POSIX else False\n\n def __init__(self, name=None, create=False, size=0):\n if not size >= 0:\n raise ValueError("'size' must be a positive integer")\n if create:\n self._flags = _O_CREX | os.O_RDWR\n if size == 0:\n raise ValueError("'size' must be a positive number different from zero")\n if name is None and not self._flags & os.O_EXCL:\n raise ValueError("'name' can only be None if create=True")\n\n if _USE_POSIX:\n\n # POSIX Shared Memory\n\n if name is None:\n while True:\n name = _make_filename()\n try:\n self._fd = _posixshmem.shm_open(\n name,\n self._flags,\n mode=self._mode\n )\n except FileExistsError:\n continue\n self._name = name\n break\n else:\n name = "/" + name if self._prepend_leading_slash else name\n self._fd = _posixshmem.shm_open(\n name,\n self._flags,\n mode=self._mode\n )\n self._name = name\n try:\n if create and size:\n os.ftruncate(self._fd, size)\n stats = os.fstat(self._fd)\n size = stats.st_size\n self._mmap = mmap.mmap(self._fd, size)\n except OSError:\n self.unlink()\n raise\n\n resource_tracker.register(self._name, "shared_memory")\n\n else:\n\n # Windows Named Shared Memory\n\n if create:\n while True:\n temp_name = _make_filename() if name is None else name\n # Create and reserve shared memory block with this name\n # until it can be attached to by mmap.\n h_map = _winapi.CreateFileMapping(\n _winapi.INVALID_HANDLE_VALUE,\n _winapi.NULL,\n _winapi.PAGE_READWRITE,\n (size >> 32) & 0xFFFFFFFF,\n size & 0xFFFFFFFF,\n temp_name\n )\n try:\n last_error_code = _winapi.GetLastError()\n if last_error_code == _winapi.ERROR_ALREADY_EXISTS:\n if name is not None:\n raise FileExistsError(\n errno.EEXIST,\n os.strerror(errno.EEXIST),\n name,\n _winapi.ERROR_ALREADY_EXISTS\n )\n else:\n continue\n self._mmap = mmap.mmap(-1, size, tagname=temp_name)\n finally:\n _winapi.CloseHandle(h_map)\n self._name = temp_name\n break\n\n else:\n self._name = name\n # Dynamically determine the existing named shared memory\n # block's size which is likely a multiple of mmap.PAGESIZE.\n h_map = _winapi.OpenFileMapping(\n _winapi.FILE_MAP_READ,\n False,\n name\n )\n try:\n p_buf = _winapi.MapViewOfFile(\n h_map,\n _winapi.FILE_MAP_READ,\n 0,\n 0,\n 0\n )\n finally:\n _winapi.CloseHandle(h_map)\n try:\n size = _winapi.VirtualQuerySize(p_buf)\n finally:\n _winapi.UnmapViewOfFile(p_buf)\n self._mmap = mmap.mmap(-1, size, tagname=name)\n\n self._size = size\n self._buf = memoryview(self._mmap)\n\n def __del__(self):\n try:\n self.close()\n except OSError:\n pass\n\n def __reduce__(self):\n return (\n self.__class__,\n (\n self.name,\n False,\n self.size,\n ),\n )\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self.name!r}, size={self.size})'\n\n @property\n def buf(self):\n "A memoryview of contents of the shared memory block."\n return self._buf\n\n @property\n def name(self):\n "Unique name that identifies the shared memory block."\n reported_name = self._name\n if _USE_POSIX and self._prepend_leading_slash:\n if self._name.startswith("/"):\n reported_name = self._name[1:]\n return reported_name\n\n @property\n def size(self):\n "Size in bytes."\n return self._size\n\n def close(self):\n """Closes access to the shared memory from this instance but does\n not destroy the shared memory block."""\n if self._buf is not None:\n self._buf.release()\n self._buf = None\n if self._mmap is not None:\n self._mmap.close()\n self._mmap = None\n if _USE_POSIX and self._fd >= 0:\n os.close(self._fd)\n self._fd = -1\n\n def unlink(self):\n """Requests that the underlying shared memory block be destroyed.\n\n In order to ensure proper cleanup of resources, unlink should be\n called once (and only once) across all processes which have access\n to the shared memory block."""\n if _USE_POSIX and self._name:\n _posixshmem.shm_unlink(self._name)\n resource_tracker.unregister(self._name, "shared_memory")\n\n\n_encoding = "utf8"\n\nclass ShareableList:\n """Pattern for a mutable list-like object shareable via a shared\n memory block. It differs from the built-in list type in that these\n lists can not change their overall length (i.e. no append, insert,\n etc.)\n\n Because values are packed into a memoryview as bytes, the struct\n packing format for any storable value must require no more than 8\n characters to describe its format."""\n\n # The shared memory area is organized as follows:\n # - 8 bytes: number of items (N) as a 64-bit integer\n # - (N + 1) * 8 bytes: offsets of each element from the start of the\n # data area\n # - K bytes: the data area storing item values (with encoding and size\n # depending on their respective types)\n # - N * 8 bytes: `struct` format string for each element\n # - N bytes: index into _back_transforms_mapping for each element\n # (for reconstructing the corresponding Python value)\n _types_mapping = {\n int: "q",\n float: "d",\n bool: "xxxxxxx?",\n str: "%ds",\n bytes: "%ds",\n None.__class__: "xxxxxx?x",\n }\n _alignment = 8\n _back_transforms_mapping = {\n 0: lambda value: value, # int, float, bool\n 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str\n 2: lambda value: value.rstrip(b'\x00'), # bytes\n 3: lambda _value: None, # None\n }\n\n @staticmethod\n def _extract_recreation_code(value):\n """Used in concert with _back_transforms_mapping to convert values\n into the appropriate Python objects when retrieving them from\n the list as well as when storing them."""\n if not isinstance(value, (str, bytes, None.__class__)):\n return 0\n elif isinstance(value, str):\n return 1\n elif isinstance(value, bytes):\n return 2\n else:\n return 3 # NoneType\n\n def __init__(self, sequence=None, *, name=None):\n if name is None or sequence is not None:\n sequence = sequence or ()\n _formats = [\n self._types_mapping[type(item)]\n if not isinstance(item, (str, bytes))\n else self._types_mapping[type(item)] % (\n self._alignment * (len(item) // self._alignment + 1),\n )\n for item in sequence\n ]\n self._list_len = len(_formats)\n assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len\n offset = 0\n # The offsets of each list element into the shared memory's\n # data area (0 meaning the start of the data area, not the start\n # of the shared memory area).\n self._allocated_offsets = [0]\n for fmt in _formats:\n offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1])\n self._allocated_offsets.append(offset)\n _recreation_codes = [\n self._extract_recreation_code(item) for item in sequence\n ]\n requested_size = struct.calcsize(\n "q" + self._format_size_metainfo +\n "".join(_formats) +\n self._format_packing_metainfo +\n self._format_back_transform_codes\n )\n\n self.shm = SharedMemory(name, create=True, size=requested_size)\n else:\n self.shm = SharedMemory(name)\n\n if sequence is not None:\n _enc = _encoding\n struct.pack_into(\n "q" + self._format_size_metainfo,\n self.shm.buf,\n 0,\n self._list_len,\n *(self._allocated_offsets)\n )\n struct.pack_into(\n "".join(_formats),\n self.shm.buf,\n self._offset_data_start,\n *(v.encode(_enc) if isinstance(v, str) else v for v in sequence)\n )\n struct.pack_into(\n self._format_packing_metainfo,\n self.shm.buf,\n self._offset_packing_formats,\n *(v.encode(_enc) for v in _formats)\n )\n struct.pack_into(\n self._format_back_transform_codes,\n self.shm.buf,\n self._offset_back_transform_codes,\n *(_recreation_codes)\n )\n\n else:\n self._list_len = len(self) # Obtains size from offset 0 in buffer.\n self._allocated_offsets = list(\n struct.unpack_from(\n self._format_size_metainfo,\n self.shm.buf,\n 1 * 8\n )\n )\n\n def _get_packing_format(self, position):\n "Gets the packing format for a single value stored in the list."\n position = position if position >= 0 else position + self._list_len\n if (position >= self._list_len) or (self._list_len < 0):\n raise IndexError("Requested position out of range.")\n\n v = struct.unpack_from(\n "8s",\n self.shm.buf,\n self._offset_packing_formats + position * 8\n )[0]\n fmt = v.rstrip(b'\x00')\n fmt_as_str = fmt.decode(_encoding)\n\n return fmt_as_str\n\n def _get_back_transform(self, position):\n "Gets the back transformation function for a single value."\n\n if (position >= self._list_len) or (self._list_len < 0):\n raise IndexError("Requested position out of range.")\n\n transform_code = struct.unpack_from(\n "b",\n self.shm.buf,\n self._offset_back_transform_codes + position\n )[0]\n transform_function = self._back_transforms_mapping[transform_code]\n\n return transform_function\n\n def _set_packing_format_and_transform(self, position, fmt_as_str, value):\n """Sets the packing format and back transformation code for a\n single value in the list at the specified position."""\n\n if (position >= self._list_len) or (self._list_len < 0):\n raise IndexError("Requested position out of range.")\n\n struct.pack_into(\n "8s",\n self.shm.buf,\n self._offset_packing_formats + position * 8,\n fmt_as_str.encode(_encoding)\n )\n\n transform_code = self._extract_recreation_code(value)\n struct.pack_into(\n "b",\n self.shm.buf,\n self._offset_back_transform_codes + position,\n transform_code\n )\n\n def __getitem__(self, position):\n position = position if position >= 0 else position + self._list_len\n try:\n offset = self._offset_data_start + self._allocated_offsets[position]\n (v,) = struct.unpack_from(\n self._get_packing_format(position),\n self.shm.buf,\n offset\n )\n except IndexError:\n raise IndexError("index out of range")\n\n back_transform = self._get_back_transform(position)\n v = back_transform(v)\n\n return v\n\n def __setitem__(self, position, value):\n position = position if position >= 0 else position + self._list_len\n try:\n item_offset = self._allocated_offsets[position]\n offset = self._offset_data_start + item_offset\n current_format = self._get_packing_format(position)\n except IndexError:\n raise IndexError("assignment index out of range")\n\n if not isinstance(value, (str, bytes)):\n new_format = self._types_mapping[type(value)]\n encoded_value = value\n else:\n allocated_length = self._allocated_offsets[position + 1] - item_offset\n\n encoded_value = (value.encode(_encoding)\n if isinstance(value, str) else value)\n if len(encoded_value) > allocated_length:\n raise ValueError("bytes/str item exceeds available storage")\n if current_format[-1] == "s":\n new_format = current_format\n else:\n new_format = self._types_mapping[str] % (\n allocated_length,\n )\n\n self._set_packing_format_and_transform(\n position,\n new_format,\n value\n )\n struct.pack_into(new_format, self.shm.buf, offset, encoded_value)\n\n def __reduce__(self):\n return partial(self.__class__, name=self.shm.name), ()\n\n def __len__(self):\n return struct.unpack_from("q", self.shm.buf, 0)[0]\n\n def __repr__(self):\n return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})'\n\n @property\n def format(self):\n "The struct packing format used by all currently stored items."\n return "".join(\n self._get_packing_format(i) for i in range(self._list_len)\n )\n\n @property\n def _format_size_metainfo(self):\n "The struct packing format used for the items' storage offsets."\n return "q" * (self._list_len + 1)\n\n @property\n def _format_packing_metainfo(self):\n "The struct packing format used for the items' packing formats."\n return "8s" * self._list_len\n\n @property\n def _format_back_transform_codes(self):\n "The struct packing format used for the items' back transforms."\n return "b" * self._list_len\n\n @property\n def _offset_data_start(self):\n # - 8 bytes for the list length\n # - (N + 1) * 8 bytes for the element offsets\n return (self._list_len + 2) * 8\n\n @property\n def _offset_packing_formats(self):\n return self._offset_data_start + self._allocated_offsets[-1]\n\n @property\n def _offset_back_transform_codes(self):\n return self._offset_packing_formats + self._list_len * 8\n\n def count(self, value):\n "L.count(value) -> integer -- return number of occurrences of value."\n\n return sum(value == entry for entry in self)\n\n def index(self, value):\n """L.index(value) -> integer -- return first index of value.\n Raises ValueError if the value is not present."""\n\n for position, entry in enumerate(self):\n if value == entry:\n return position\n else:\n raise ValueError(f"{value!r} not in this container")\n\n __class_getitem__ = classmethod(types.GenericAlias)\n | .venv\Lib\site-packages\multiprocess\shared_memory.py | shared_memory.py | Python | 18,458 | 0.95 | 0.20412 | 0.061538 | python-kit | 505 | 2024-10-21T23:56:04.622602 | GPL-3.0 | false | 0a48402a42233e9df692416c2fb255a8 |
#\n# Code used to start processes when using the spawn or forkserver\n# start methods.\n#\n# multiprocessing/spawn.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\nimport os\nimport sys\nimport runpy\nimport types\n\nfrom . import get_start_method, set_start_method\nfrom . import process\nfrom .context import reduction\nfrom . import util\n\n__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',\n 'get_preparation_data', 'get_command_line', 'import_main_path']\n\n#\n# _python_exe is the assumed path to the python executable.\n# People embedding Python want to modify it.\n#\n\nif sys.platform != 'win32':\n WINEXE = False\n WINSERVICE = False\nelse:\n WINEXE = getattr(sys, 'frozen', False)\n WINSERVICE = sys.executable and sys.executable.lower().endswith("pythonservice.exe")\n\ndef set_executable(exe):\n global _python_exe\n if exe is None:\n _python_exe = exe\n elif sys.platform == 'win32':\n _python_exe = os.fsdecode(exe)\n else:\n _python_exe = os.fsencode(exe)\n\ndef get_executable():\n return _python_exe\n\nif WINSERVICE:\n set_executable(os.path.join(sys.exec_prefix, 'python.exe'))\nelse:\n set_executable(sys.executable)\n\n#\n#\n#\n\ndef is_forking(argv):\n '''\n Return whether commandline indicates we are forking\n '''\n if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':\n return True\n else:\n return False\n\n\ndef freeze_support():\n '''\n Run code for process object if this in not the main process\n '''\n if is_forking(sys.argv):\n kwds = {}\n for arg in sys.argv[2:]:\n name, value = arg.split('=')\n if value == 'None':\n kwds[name] = None\n else:\n kwds[name] = int(value)\n spawn_main(**kwds)\n sys.exit()\n\n\ndef get_command_line(**kwds):\n '''\n Returns prefix of command line used for spawning a child process\n '''\n if getattr(sys, 'frozen', False):\n return ([sys.executable, '--multiprocessing-fork'] +\n ['%s=%r' % item for item in kwds.items()])\n else:\n prog = 'from multiprocess.spawn import spawn_main; spawn_main(%s)'\n prog %= ', '.join('%s=%r' % item for item in kwds.items())\n opts = util._args_from_interpreter_flags()\n exe = get_executable()\n return [exe] + opts + ['-c', prog, '--multiprocessing-fork']\n\n\ndef spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):\n '''\n Run code specified by data received over pipe\n '''\n assert is_forking(sys.argv), "Not forking"\n if sys.platform == 'win32':\n import msvcrt\n import _winapi\n\n if parent_pid is not None:\n source_process = _winapi.OpenProcess(\n _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE,\n False, parent_pid)\n else:\n source_process = None\n new_handle = reduction.duplicate(pipe_handle,\n source_process=source_process)\n fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)\n parent_sentinel = source_process\n else:\n from . import resource_tracker\n resource_tracker._resource_tracker._fd = tracker_fd\n fd = pipe_handle\n parent_sentinel = os.dup(pipe_handle)\n exitcode = _main(fd, parent_sentinel)\n sys.exit(exitcode)\n\n\ndef _main(fd, parent_sentinel):\n with os.fdopen(fd, 'rb', closefd=True) as from_parent:\n process.current_process()._inheriting = True\n try:\n preparation_data = reduction.pickle.load(from_parent)\n prepare(preparation_data)\n self = reduction.pickle.load(from_parent)\n finally:\n del process.current_process()._inheriting\n return self._bootstrap(parent_sentinel)\n\n\ndef _check_not_importing_main():\n if getattr(process.current_process(), '_inheriting', False):\n raise RuntimeError('''\n An attempt has been made to start a new process before the\n current process has finished its bootstrapping phase.\n\n This probably means that you are not using fork to start your\n child processes and you have forgotten to use the proper idiom\n in the main module:\n\n if __name__ == '__main__':\n freeze_support()\n ...\n\n The "freeze_support()" line can be omitted if the program\n is not going to be frozen to produce an executable.\n\n To fix this issue, refer to the "Safe importing of main module"\n section in https://docs.python.org/3/library/multiprocessing.html\n ''')\n\n\ndef get_preparation_data(name):\n '''\n Return info about parent needed by child to unpickle process object\n '''\n _check_not_importing_main()\n d = dict(\n log_to_stderr=util._log_to_stderr,\n authkey=process.current_process().authkey,\n )\n\n if util._logger is not None:\n d['log_level'] = util._logger.getEffectiveLevel()\n\n sys_path=sys.path.copy()\n try:\n i = sys_path.index('')\n except ValueError:\n pass\n else:\n sys_path[i] = process.ORIGINAL_DIR\n\n d.update(\n name=name,\n sys_path=sys_path,\n sys_argv=sys.argv,\n orig_dir=process.ORIGINAL_DIR,\n dir=os.getcwd(),\n start_method=get_start_method(),\n )\n\n # Figure out whether to initialise main in the subprocess as a module\n # or through direct execution (or to leave it alone entirely)\n main_module = sys.modules['__main__']\n main_mod_name = getattr(main_module.__spec__, "name", None)\n if main_mod_name is not None:\n d['init_main_from_name'] = main_mod_name\n elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):\n main_path = getattr(main_module, '__file__', None)\n if main_path is not None:\n if (not os.path.isabs(main_path) and\n process.ORIGINAL_DIR is not None):\n main_path = os.path.join(process.ORIGINAL_DIR, main_path)\n d['init_main_from_path'] = os.path.normpath(main_path)\n\n return d\n\n#\n# Prepare current process\n#\n\nold_main_modules = []\n\ndef prepare(data):\n '''\n Try to get current process ready to unpickle process object\n '''\n if 'name' in data:\n process.current_process().name = data['name']\n\n if 'authkey' in data:\n process.current_process().authkey = data['authkey']\n\n if 'log_to_stderr' in data and data['log_to_stderr']:\n util.log_to_stderr()\n\n if 'log_level' in data:\n util.get_logger().setLevel(data['log_level'])\n\n if 'sys_path' in data:\n sys.path = data['sys_path']\n\n if 'sys_argv' in data:\n sys.argv = data['sys_argv']\n\n if 'dir' in data:\n os.chdir(data['dir'])\n\n if 'orig_dir' in data:\n process.ORIGINAL_DIR = data['orig_dir']\n\n if 'start_method' in data:\n set_start_method(data['start_method'], force=True)\n\n if 'init_main_from_name' in data:\n _fixup_main_from_name(data['init_main_from_name'])\n elif 'init_main_from_path' in data:\n _fixup_main_from_path(data['init_main_from_path'])\n\n# Multiprocessing module helpers to fix up the main module in\n# spawned subprocesses\ndef _fixup_main_from_name(mod_name):\n # __main__.py files for packages, directories, zip archives, etc, run\n # their "main only" code unconditionally, so we don't even try to\n # populate anything in __main__, nor do we make any changes to\n # __main__ attributes\n current_main = sys.modules['__main__']\n if mod_name == "__main__" or mod_name.endswith(".__main__"):\n return\n\n # If this process was forked, __main__ may already be populated\n if getattr(current_main.__spec__, "name", None) == mod_name:\n return\n\n # Otherwise, __main__ may contain some non-main code where we need to\n # support unpickling it properly. We rerun it as __mp_main__ and make\n # the normal __main__ an alias to that\n old_main_modules.append(current_main)\n main_module = types.ModuleType("__mp_main__")\n main_content = runpy.run_module(mod_name,\n run_name="__mp_main__",\n alter_sys=True)\n main_module.__dict__.update(main_content)\n sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module\n\n\ndef _fixup_main_from_path(main_path):\n # If this process was forked, __main__ may already be populated\n current_main = sys.modules['__main__']\n\n # Unfortunately, the main ipython launch script historically had no\n # "if __name__ == '__main__'" guard, so we work around that\n # by treating it like a __main__.py file\n # See https://github.com/ipython/ipython/issues/4698\n main_name = os.path.splitext(os.path.basename(main_path))[0]\n if main_name == 'ipython':\n return\n\n # Otherwise, if __file__ already has the setting we expect,\n # there's nothing more to do\n if getattr(current_main, '__file__', None) == main_path:\n return\n\n # If the parent process has sent a path through rather than a module\n # name we assume it is an executable script that may contain\n # non-main code that needs to be executed\n old_main_modules.append(current_main)\n main_module = types.ModuleType("__mp_main__")\n main_content = runpy.run_path(main_path,\n run_name="__mp_main__")\n main_module.__dict__.update(main_content)\n sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module\n\n\ndef import_main_path(main_path):\n '''\n Set sys.modules['__main__'] to module at main_path\n '''\n _fixup_main_from_path(main_path)\n | .venv\Lib\site-packages\multiprocess\spawn.py | spawn.py | Python | 9,641 | 0.95 | 0.179153 | 0.162055 | react-lib | 84 | 2024-04-28T14:05:41.379843 | MIT | false | ce82ab738fae56f8e4fc36bbacec205b |
#\n# Module implementing synchronization primitives\n#\n# multiprocessing/synchronize.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\n__all__ = [\n 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'\n ]\n\nimport threading\nimport sys\nimport tempfile\ntry:\n import _multiprocess as _multiprocessing\nexcept ImportError:\n import _multiprocessing\nimport time\n\nfrom . import context\nfrom . import process\nfrom . import util\n\n# Try to import the mp.synchronize module cleanly, if it fails\n# raise ImportError for platforms lacking a working sem_open implementation.\n# See issue 3770\ntry:\n from _multiprocess import SemLock, sem_unlink\nexcept ImportError:\n try:\n from _multiprocessing import SemLock, sem_unlink\n except (ImportError):\n raise ImportError("This platform lacks a functioning sem_open" +\n " implementation, therefore, the required" +\n " synchronization primitives needed will not" +\n " function, see issue 3770.")\n\n#\n# Constants\n#\n\nRECURSIVE_MUTEX, SEMAPHORE = list(range(2))\nSEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX\n\n#\n# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`\n#\n\nclass SemLock(object):\n\n _rand = tempfile._RandomNameSequence()\n\n def __init__(self, kind, value, maxvalue, *, ctx):\n if ctx is None:\n ctx = context._default_context.get_context()\n self._is_fork_ctx = ctx.get_start_method() == 'fork'\n unlink_now = sys.platform == 'win32' or self._is_fork_ctx\n for i in range(100):\n try:\n sl = self._semlock = _multiprocessing.SemLock(\n kind, value, maxvalue, self._make_name(),\n unlink_now)\n except FileExistsError:\n pass\n else:\n break\n else:\n raise FileExistsError('cannot find name for semaphore')\n\n util.debug('created semlock with handle %s' % sl.handle)\n self._make_methods()\n\n if sys.platform != 'win32':\n def _after_fork(obj):\n obj._semlock._after_fork()\n util.register_after_fork(self, _after_fork)\n\n if self._semlock.name is not None:\n # We only get here if we are on Unix with forking\n # disabled. When the object is garbage collected or the\n # process shuts down we unlink the semaphore name\n from .resource_tracker import register\n register(self._semlock.name, "semaphore")\n util.Finalize(self, SemLock._cleanup, (self._semlock.name,),\n exitpriority=0)\n\n @staticmethod\n def _cleanup(name):\n from .resource_tracker import unregister\n sem_unlink(name)\n unregister(name, "semaphore")\n\n def _make_methods(self):\n self.acquire = self._semlock.acquire\n self.release = self._semlock.release\n\n def __enter__(self):\n return self._semlock.__enter__()\n\n def __exit__(self, *args):\n return self._semlock.__exit__(*args)\n\n def __getstate__(self):\n context.assert_spawning(self)\n sl = self._semlock\n if sys.platform == 'win32':\n h = context.get_spawning_popen().duplicate_for_child(sl.handle)\n else:\n if self._is_fork_ctx:\n raise RuntimeError('A SemLock created in a fork context is being ' \n 'shared with a process in a spawn context. This is ' \n 'not supported. Please use the same context to create ' \n 'multiprocess objects and Process.')\n h = sl.handle\n return (h, sl.kind, sl.maxvalue, sl.name)\n\n def __setstate__(self, state):\n self._semlock = _multiprocessing.SemLock._rebuild(*state)\n util.debug('recreated blocker with handle %r' % state[0])\n self._make_methods()\n # Ensure that deserialized SemLock can be serialized again (gh-108520).\n self._is_fork_ctx = False\n\n @staticmethod\n def _make_name():\n return '%s-%s' % (process.current_process()._config['semprefix'],\n next(SemLock._rand))\n\n#\n# Semaphore\n#\n\nclass Semaphore(SemLock):\n\n def __init__(self, value=1, *, ctx):\n SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)\n\n def get_value(self):\n return self._semlock._get_value()\n\n def __repr__(self):\n try:\n value = self._semlock._get_value()\n except Exception:\n value = 'unknown'\n return '<%s(value=%s)>' % (self.__class__.__name__, value)\n\n#\n# Bounded semaphore\n#\n\nclass BoundedSemaphore(Semaphore):\n\n def __init__(self, value=1, *, ctx):\n SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)\n\n def __repr__(self):\n try:\n value = self._semlock._get_value()\n except Exception:\n value = 'unknown'\n return '<%s(value=%s, maxvalue=%s)>' % \\n (self.__class__.__name__, value, self._semlock.maxvalue)\n\n#\n# Non-recursive lock\n#\n\nclass Lock(SemLock):\n\n def __init__(self, *, ctx):\n SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)\n\n def __repr__(self):\n try:\n if self._semlock._is_mine():\n name = process.current_process().name\n if threading.current_thread().name != 'MainThread':\n name += '|' + threading.current_thread().name\n elif self._semlock._get_value() == 1:\n name = 'None'\n elif self._semlock._count() > 0:\n name = 'SomeOtherThread'\n else:\n name = 'SomeOtherProcess'\n except Exception:\n name = 'unknown'\n return '<%s(owner=%s)>' % (self.__class__.__name__, name)\n\n#\n# Recursive lock\n#\n\nclass RLock(SemLock):\n\n def __init__(self, *, ctx):\n SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)\n\n def __repr__(self):\n try:\n if self._semlock._is_mine():\n name = process.current_process().name\n if threading.current_thread().name != 'MainThread':\n name += '|' + threading.current_thread().name\n count = self._semlock._count()\n elif self._semlock._get_value() == 1:\n name, count = 'None', 0\n elif self._semlock._count() > 0:\n name, count = 'SomeOtherThread', 'nonzero'\n else:\n name, count = 'SomeOtherProcess', 'nonzero'\n except Exception:\n name, count = 'unknown', 'unknown'\n return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)\n\n#\n# Condition variable\n#\n\nclass Condition(object):\n\n def __init__(self, lock=None, *, ctx):\n self._lock = lock or ctx.RLock()\n self._sleeping_count = ctx.Semaphore(0)\n self._woken_count = ctx.Semaphore(0)\n self._wait_semaphore = ctx.Semaphore(0)\n self._make_methods()\n\n def __getstate__(self):\n context.assert_spawning(self)\n return (self._lock, self._sleeping_count,\n self._woken_count, self._wait_semaphore)\n\n def __setstate__(self, state):\n (self._lock, self._sleeping_count,\n self._woken_count, self._wait_semaphore) = state\n self._make_methods()\n\n def __enter__(self):\n return self._lock.__enter__()\n\n def __exit__(self, *args):\n return self._lock.__exit__(*args)\n\n def _make_methods(self):\n self.acquire = self._lock.acquire\n self.release = self._lock.release\n\n def __repr__(self):\n try:\n num_waiters = (self._sleeping_count._semlock._get_value() -\n self._woken_count._semlock._get_value())\n except Exception:\n num_waiters = 'unknown'\n return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters)\n\n def wait(self, timeout=None):\n assert self._lock._semlock._is_mine(), \\n 'must acquire() condition before using wait()'\n\n # indicate that this thread is going to sleep\n self._sleeping_count.release()\n\n # release lock\n count = self._lock._semlock._count()\n for i in range(count):\n self._lock.release()\n\n try:\n # wait for notification or timeout\n return self._wait_semaphore.acquire(True, timeout)\n finally:\n # indicate that this thread has woken\n self._woken_count.release()\n\n # reacquire lock\n for i in range(count):\n self._lock.acquire()\n\n def notify(self, n=1):\n assert self._lock._semlock._is_mine(), 'lock is not owned'\n assert not self._wait_semaphore.acquire(\n False), ('notify: Should not have been able to acquire '\n + '_wait_semaphore')\n\n # to take account of timeouts since last notify*() we subtract\n # woken_count from sleeping_count and rezero woken_count\n while self._woken_count.acquire(False):\n res = self._sleeping_count.acquire(False)\n assert res, ('notify: Bug in sleeping_count.acquire'\n + '- res should not be False')\n\n sleepers = 0\n while sleepers < n and self._sleeping_count.acquire(False):\n self._wait_semaphore.release() # wake up one sleeper\n sleepers += 1\n\n if sleepers:\n for i in range(sleepers):\n self._woken_count.acquire() # wait for a sleeper to wake\n\n # rezero wait_semaphore in case some timeouts just happened\n while self._wait_semaphore.acquire(False):\n pass\n\n def notify_all(self):\n self.notify(n=sys.maxsize)\n\n def wait_for(self, predicate, timeout=None):\n result = predicate()\n if result:\n return result\n if timeout is not None:\n endtime = getattr(time,'monotonic',time.time)() + timeout\n else:\n endtime = None\n waittime = None\n while not result:\n if endtime is not None:\n waittime = endtime - getattr(time,'monotonic',time.time)()\n if waittime <= 0:\n break\n self.wait(waittime)\n result = predicate()\n return result\n\n#\n# Event\n#\n\nclass Event(object):\n\n def __init__(self, *, ctx):\n self._cond = ctx.Condition(ctx.Lock())\n self._flag = ctx.Semaphore(0)\n\n def is_set(self):\n with self._cond:\n if self._flag.acquire(False):\n self._flag.release()\n return True\n return False\n\n def set(self):\n with self._cond:\n self._flag.acquire(False)\n self._flag.release()\n self._cond.notify_all()\n\n def clear(self):\n with self._cond:\n self._flag.acquire(False)\n\n def wait(self, timeout=None):\n with self._cond:\n if self._flag.acquire(False):\n self._flag.release()\n else:\n self._cond.wait(timeout)\n\n if self._flag.acquire(False):\n self._flag.release()\n return True\n return False\n\n def __repr__(self) -> str:\n set_status = 'set' if self.is_set() else 'unset'\n return f"<{type(self).__qualname__} at {id(self):#x} {set_status}>"\n#\n# Barrier\n#\n\nclass Barrier(threading.Barrier):\n\n def __init__(self, parties, action=None, timeout=None, *, ctx):\n import struct\n from .heap import BufferWrapper\n wrapper = BufferWrapper(struct.calcsize('i') * 2)\n cond = ctx.Condition()\n self.__setstate__((parties, action, timeout, cond, wrapper))\n self._state = 0\n self._count = 0\n\n def __setstate__(self, state):\n (self._parties, self._action, self._timeout,\n self._cond, self._wrapper) = state\n self._array = self._wrapper.create_memoryview().cast('i')\n\n def __getstate__(self):\n return (self._parties, self._action, self._timeout,\n self._cond, self._wrapper)\n\n @property\n def _state(self):\n return self._array[0]\n\n @_state.setter\n def _state(self, value):\n self._array[0] = value\n\n @property\n def _count(self):\n return self._array[1]\n\n @_count.setter\n def _count(self, value):\n self._array[1] = value\n | .venv\Lib\site-packages\multiprocess\synchronize.py | synchronize.py | Python | 12,506 | 0.95 | 0.231707 | 0.149254 | node-utils | 664 | 2024-12-11T19:52:02.514177 | Apache-2.0 | false | c7735aca0b9918fd329b27640c348238 |
#\n# Module providing various facilities to other parts of the package\n#\n# multiprocessing/util.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\nimport os\nimport itertools\nimport sys\nimport weakref\nimport atexit\nimport threading # we want threading to install it's\n # cleanup function before multiprocessing does\nfrom subprocess import _args_from_interpreter_flags\n\nfrom . import process\n\n__all__ = [\n 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',\n 'log_to_stderr', 'get_temp_dir', 'register_after_fork',\n 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',\n 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',\n ]\n\n#\n# Logging\n#\n\nNOTSET = 0\nSUBDEBUG = 5\nDEBUG = 10\nINFO = 20\nSUBWARNING = 25\n\nLOGGER_NAME = 'multiprocess'\nDEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'\n\n_logger = None\n_log_to_stderr = False\n\ndef sub_debug(msg, *args):\n if _logger:\n _logger.log(SUBDEBUG, msg, *args)\n\ndef debug(msg, *args):\n if _logger:\n _logger.log(DEBUG, msg, *args)\n\ndef info(msg, *args):\n if _logger:\n _logger.log(INFO, msg, *args)\n\ndef sub_warning(msg, *args):\n if _logger:\n _logger.log(SUBWARNING, msg, *args)\n\ndef get_logger():\n '''\n Returns logger used by multiprocess\n '''\n global _logger\n import logging\n\n logging._acquireLock()\n try:\n if not _logger:\n\n _logger = logging.getLogger(LOGGER_NAME)\n _logger.propagate = 0\n\n # XXX multiprocessing should cleanup before logging\n if hasattr(atexit, 'unregister'):\n atexit.unregister(_exit_function)\n atexit.register(_exit_function)\n else:\n atexit._exithandlers.remove((_exit_function, (), {}))\n atexit._exithandlers.append((_exit_function, (), {}))\n\n finally:\n logging._releaseLock()\n\n return _logger\n\ndef log_to_stderr(level=None):\n '''\n Turn on logging and add a handler which prints to stderr\n '''\n global _log_to_stderr\n import logging\n\n logger = get_logger()\n formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n if level:\n logger.setLevel(level)\n _log_to_stderr = True\n return _logger\n\n\n# Abstract socket support\n\ndef _platform_supports_abstract_sockets():\n if sys.platform == "linux":\n return True\n if hasattr(sys, 'getandroidapilevel'):\n return True\n return False\n\n\ndef is_abstract_socket_namespace(address):\n if not address:\n return False\n if isinstance(address, bytes):\n return address[0] == 0\n elif isinstance(address, str):\n return address[0] == "\0"\n raise TypeError(f'address type of {address!r} unrecognized')\n\n\nabstract_sockets_supported = _platform_supports_abstract_sockets()\n\n#\n# Function returning a temp directory which will be removed on exit\n#\n\ndef _remove_temp_dir(rmtree, tempdir):\n rmtree(tempdir)\n\n current_process = process.current_process()\n # current_process() can be None if the finalizer is called\n # late during Python finalization\n if current_process is not None:\n current_process._config['tempdir'] = None\n\ndef get_temp_dir():\n # get name of a temp directory which will be automatically cleaned up\n tempdir = process.current_process()._config.get('tempdir')\n if tempdir is None:\n import shutil, tempfile\n tempdir = tempfile.mkdtemp(prefix='pymp-')\n info('created temp directory %s', tempdir)\n # keep a strong reference to shutil.rmtree(), since the finalizer\n # can be called late during Python shutdown\n Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),\n exitpriority=-100)\n process.current_process()._config['tempdir'] = tempdir\n return tempdir\n\n#\n# Support for reinitialization of objects when bootstrapping a child process\n#\n\n_afterfork_registry = weakref.WeakValueDictionary()\n_afterfork_counter = itertools.count()\n\ndef _run_after_forkers():\n items = list(_afterfork_registry.items())\n items.sort()\n for (index, ident, func), obj in items:\n try:\n func(obj)\n except Exception as e:\n info('after forker raised exception %s', e)\n\ndef register_after_fork(obj, func):\n _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj\n\n#\n# Finalization using weakrefs\n#\n\n_finalizer_registry = {}\n_finalizer_counter = itertools.count()\n\n\nclass Finalize(object):\n '''\n Class which supports object finalization using weakrefs\n '''\n def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):\n if (exitpriority is not None) and not isinstance(exitpriority,int):\n raise TypeError(\n "Exitpriority ({0!r}) must be None or int, not {1!s}".format(\n exitpriority, type(exitpriority)))\n\n if obj is not None:\n self._weakref = weakref.ref(obj, self)\n elif exitpriority is None:\n raise ValueError("Without object, exitpriority cannot be None")\n\n self._callback = callback\n self._args = args\n self._kwargs = kwargs or {}\n self._key = (exitpriority, next(_finalizer_counter))\n self._pid = os.getpid()\n\n _finalizer_registry[self._key] = self\n\n def __call__(self, wr=None,\n # Need to bind these locally because the globals can have\n # been cleared at shutdown\n _finalizer_registry=_finalizer_registry,\n sub_debug=sub_debug, getpid=os.getpid):\n '''\n Run the callback unless it has already been called or cancelled\n '''\n try:\n del _finalizer_registry[self._key]\n except KeyError:\n sub_debug('finalizer no longer registered')\n else:\n if self._pid != getpid():\n sub_debug('finalizer ignored because different process')\n res = None\n else:\n sub_debug('finalizer calling %s with args %s and kwargs %s',\n self._callback, self._args, self._kwargs)\n res = self._callback(*self._args, **self._kwargs)\n self._weakref = self._callback = self._args = \\n self._kwargs = self._key = None\n return res\n\n def cancel(self):\n '''\n Cancel finalization of the object\n '''\n try:\n del _finalizer_registry[self._key]\n except KeyError:\n pass\n else:\n self._weakref = self._callback = self._args = \\n self._kwargs = self._key = None\n\n def still_active(self):\n '''\n Return whether this finalizer is still waiting to invoke callback\n '''\n return self._key in _finalizer_registry\n\n def __repr__(self):\n try:\n obj = self._weakref()\n except (AttributeError, TypeError):\n obj = None\n\n if obj is None:\n return '<%s object, dead>' % self.__class__.__name__\n\n x = '<%s object, callback=%s' % (\n self.__class__.__name__,\n getattr(self._callback, '__name__', self._callback))\n if self._args:\n x += ', args=' + str(self._args)\n if self._kwargs:\n x += ', kwargs=' + str(self._kwargs)\n if self._key[0] is not None:\n x += ', exitpriority=' + str(self._key[0])\n return x + '>'\n\n\ndef _run_finalizers(minpriority=None):\n '''\n Run all finalizers whose exit priority is not None and at least minpriority\n\n Finalizers with highest priority are called first; finalizers with\n the same priority will be called in reverse order of creation.\n '''\n if _finalizer_registry is None:\n # This function may be called after this module's globals are\n # destroyed. See the _exit_function function in this module for more\n # notes.\n return\n\n if minpriority is None:\n f = lambda p : p[0] is not None\n else:\n f = lambda p : p[0] is not None and p[0] >= minpriority\n\n # Careful: _finalizer_registry may be mutated while this function\n # is running (either by a GC run or by another thread).\n\n # list(_finalizer_registry) should be atomic, while\n # list(_finalizer_registry.items()) is not.\n keys = [key for key in list(_finalizer_registry) if f(key)]\n keys.sort(reverse=True)\n\n for key in keys:\n finalizer = _finalizer_registry.get(key)\n # key may have been removed from the registry\n if finalizer is not None:\n sub_debug('calling %s', finalizer)\n try:\n finalizer()\n except Exception:\n import traceback\n traceback.print_exc()\n\n if minpriority is None:\n _finalizer_registry.clear()\n\n#\n# Clean up on exit\n#\n\ndef is_exiting():\n '''\n Returns true if the process is shutting down\n '''\n return _exiting or _exiting is None\n\n_exiting = False\n\ndef _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,\n active_children=process.active_children,\n current_process=process.current_process):\n # We hold on to references to functions in the arglist due to the\n # situation described below, where this function is called after this\n # module's globals are destroyed.\n\n global _exiting\n\n if not _exiting:\n _exiting = True\n\n info('process shutting down')\n debug('running all "atexit" finalizers with priority >= 0')\n _run_finalizers(0)\n\n if current_process() is not None:\n # We check if the current process is None here because if\n # it's None, any call to ``active_children()`` will raise\n # an AttributeError (active_children winds up trying to\n # get attributes from util._current_process). One\n # situation where this can happen is if someone has\n # manipulated sys.modules, causing this module to be\n # garbage collected. The destructor for the module type\n # then replaces all values in the module dict with None.\n # For instance, after setuptools runs a test it replaces\n # sys.modules with a copy created earlier. See issues\n # #9775 and #15881. Also related: #4106, #9205, and\n # #9207.\n\n for p in active_children():\n if p.daemon:\n info('calling terminate() for daemon %s', p.name)\n p._popen.terminate()\n\n for p in active_children():\n info('calling join() for process %s', p.name)\n p.join()\n\n debug('running the remaining "atexit" finalizers')\n _run_finalizers()\n\natexit.register(_exit_function)\n\n#\n# Some fork aware types\n#\n\nclass ForkAwareThreadLock(object):\n def __init__(self):\n self._lock = threading.Lock()\n self.acquire = self._lock.acquire\n self.release = self._lock.release\n register_after_fork(self, ForkAwareThreadLock._at_fork_reinit)\n\n def _at_fork_reinit(self):\n self._lock._at_fork_reinit()\n\n def __enter__(self):\n return self._lock.__enter__()\n\n def __exit__(self, *args):\n return self._lock.__exit__(*args)\n\n\nclass ForkAwareLocal(threading.local):\n def __init__(self):\n register_after_fork(self, lambda obj : obj.__dict__.clear())\n def __reduce__(self):\n return type(self), ()\n\n#\n# Close fds except those specified\n#\n\ntry:\n MAXFD = os.sysconf("SC_OPEN_MAX")\nexcept Exception:\n MAXFD = 256\n\ndef close_all_fds_except(fds):\n fds = list(fds) + [-1, MAXFD]\n fds.sort()\n assert fds[-1] == MAXFD, 'fd too large'\n for i in range(len(fds) - 1):\n os.closerange(fds[i]+1, fds[i+1])\n#\n# Close sys.stdin and replace stdin with os.devnull\n#\n\ndef _close_stdin():\n if sys.stdin is None:\n return\n\n try:\n sys.stdin.close()\n except (OSError, ValueError):\n pass\n\n try:\n fd = os.open(os.devnull, os.O_RDONLY)\n try:\n sys.stdin = open(fd, encoding="utf-8", closefd=False)\n except:\n os.close(fd)\n raise\n except (OSError, ValueError):\n pass\n\n#\n# Flush standard streams, if any\n#\n\ndef _flush_std_streams():\n try:\n sys.stdout.flush()\n except (AttributeError, ValueError):\n pass\n try:\n sys.stderr.flush()\n except (AttributeError, ValueError):\n pass\n\n#\n# Start a program with only specified fds kept open\n#\n\ndef spawnv_passfds(path, args, passfds):\n import _posixsubprocess\n import subprocess\n passfds = tuple(sorted(map(int, passfds)))\n errpipe_read, errpipe_write = os.pipe()\n try:\n return _posixsubprocess.fork_exec(\n args, [path], True, passfds, None, None,\n -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,\n False, False, -1, None, None, None, -1, None,\n subprocess._USE_VFORK)\n finally:\n os.close(errpipe_read)\n os.close(errpipe_write)\n\n\ndef close_fds(*fds):\n """Close each file descriptor given as an argument"""\n for fd in fds:\n os.close(fd)\n\n\ndef _cleanup_tests():\n """Cleanup multiprocessing resources when multiprocessing tests\n completed."""\n\n from test import support\n\n # cleanup multiprocessing\n process._cleanup()\n\n # Stop the ForkServer process if it's running\n from multiprocess import forkserver\n forkserver._forkserver._stop()\n\n # Stop the ResourceTracker process if it's running\n from multiprocess import resource_tracker\n resource_tracker._resource_tracker._stop()\n\n # bpo-37421: Explicitly call _run_finalizers() to remove immediately\n # temporary directories created by multiprocessing.util.get_temp_dir().\n _run_finalizers()\n support.gc_collect()\n\n support.reap_children()\n | .venv\Lib\site-packages\multiprocess\util.py | util.py | Python | 14,060 | 0.95 | 0.211813 | 0.191919 | python-kit | 427 | 2024-05-18T06:56:30.960778 | GPL-3.0 | false | 44e3ad539ea0343c07e5e459873fe044 |
#!/usr/bin/env python\n#\n# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)\n# Copyright (c) 2024 The Uncertainty Quantification Foundation.\n# License: 3-clause BSD. The full license text is available at:\n# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE\n'''\n-----------------------------------------------------------------\nmultiprocess: better multiprocessing and multithreading in Python\n-----------------------------------------------------------------\n\nAbout Multiprocess\n==================\n\n``multiprocess`` is a fork of ``multiprocessing``. ``multiprocess`` extends ``multiprocessing`` to provide enhanced serialization, using `dill`. ``multiprocess`` leverages ``multiprocessing`` to support the spawning of processes using the API of the Python standard library's ``threading`` module. ``multiprocessing`` has been distributed as part of the standard library since Python 2.6.\n\n``multiprocess`` is part of ``pathos``, a Python framework for heterogeneous computing.\n``multiprocess`` is in active development, so any user feedback, bug reports, comments,\nor suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query.\n\n\nMajor Features\n==============\n\n``multiprocess`` enables:\n\n - objects to be transferred between processes using pipes or multi-producer/multi-consumer queues\n - objects to be shared between processes using a server process or (for simple data) shared memory\n\n``multiprocess`` provides:\n\n - equivalents of all the synchronization primitives in ``threading``\n - a ``Pool`` class to facilitate submitting tasks to worker processes\n - enhanced serialization, using ``dill``\n\n\nCurrent Release\n===============\n\nThe latest released version of ``multiprocess`` is available from:\n\n https://pypi.org/project/multiprocess\n\n``multiprocess`` is distributed under a 3-clause BSD license, and is a fork of ``multiprocessing``.\n\n\nDevelopment Version\n===================\n\nYou can get the latest development version with all the shiny new features at:\n\n https://github.com/uqfoundation\n\nIf you have a new contribution, please submit a pull request.\n\n\nInstallation\n============\n\n``multiprocess`` can be installed with ``pip``::\n\n $ pip install multiprocess\n\nFor Python 2, a C compiler is required to build the included extension module from source. Python 3 and binary installs do not require a C compiler.\n\n\nRequirements\n============\n\n``multiprocess`` requires:\n\n - ``python`` (or ``pypy``), **>=3.8**\n - ``setuptools``, **>=42**\n - ``dill``, **>=0.3.8**\n\n\nBasic Usage\n===========\n\nThe ``multiprocess.Process`` class follows the API of ``threading.Thread``.\nFor example ::\n\n from multiprocess import Process, Queue\n\n def f(q):\n q.put('hello world')\n\n if __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=[q])\n p.start()\n print (q.get())\n p.join()\n\nSynchronization primitives like locks, semaphores and conditions are\navailable, for example ::\n\n >>> from multiprocess import Condition\n >>> c = Condition()\n >>> print (c)\n <Condition(<RLock(None, 0)>), 0>\n >>> c.acquire()\n True\n >>> print (c)\n <Condition(<RLock(MainProcess, 1)>), 0>\n\nOne can also use a manager to create shared objects either in shared\nmemory or in a server process, for example ::\n\n >>> from multiprocess import Manager\n >>> manager = Manager()\n >>> l = manager.list(range(10))\n >>> l.reverse()\n >>> print (l)\n [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n >>> print (repr(l))\n <Proxy[list] object at 0x00E1B3B0>\n\nTasks can be offloaded to a pool of worker processes in various ways,\nfor example ::\n\n >>> from multiprocess import Pool\n >>> def f(x): return x*x\n ...\n >>> p = Pool(4)\n >>> result = p.map_async(f, range(10))\n >>> print (result.get(timeout=1))\n [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]\n\nWhen ``dill`` is installed, serialization is extended to most objects,\nfor example ::\n\n >>> from multiprocess import Pool\n >>> p = Pool(4)\n >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10)))\n [0, 2, 6, 12, 20, 30, 42, 56, 72, 90]\n\n\nMore Information\n================\n\nProbably the best way to get started is to look at the documentation at\nhttp://multiprocess.rtfd.io. Also see ``multiprocess.tests`` for scripts that\ndemonstrate how ``multiprocess`` can be used to leverge multiple processes\nto execute Python in parallel. You can run the test suite with\n``python -m multiprocess.tests``. As ``multiprocess`` conforms to the\n``multiprocessing`` interface, the examples and documentation found at\nhttp://docs.python.org/library/multiprocessing.html also apply to\n``multiprocess`` if one will ``import multiprocessing as multiprocess``.\nSee https://github.com/uqfoundation/multiprocess/tree/master/py3.12/examples\nfor a set of examples that demonstrate some basic use cases and benchmarking\nfor running Python code in parallel. Please feel free to submit a ticket on\ngithub, or ask a question on stackoverflow (**@Mike McKerns**). If you would\nlike to share how you use ``multiprocess`` in your work, please send an email\n(to **mmckerns at uqfoundation dot org**).\n\n\nCitation\n========\n\nIf you use ``multiprocess`` to do research that leads to publication, we ask that you\nacknowledge use of ``multiprocess`` by citing the following in your publication::\n\n M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,\n "Building a framework for predictive science", Proceedings of\n the 10th Python in Science Conference, 2011;\n http://arxiv.org/pdf/1202.1056\n\n Michael McKerns and Michael Aivazis,\n "pathos: a framework for heterogeneous computing", 2010- ;\n https://uqfoundation.github.io/project/pathos\n\nPlease see https://uqfoundation.github.io/project/pathos or\nhttp://arxiv.org/pdf/1202.1056 for further information.\n\n'''\n\n__all__ = []\n__version__ = '0.70.16'\n__author__ = 'Mike McKerns'\n\n__license__ = '''\nCopyright (c) 2008-2016 California Institute of Technology.\nCopyright (c) 2016-2024 The Uncertainty Quantification Foundation.\nAll rights reserved.\n\nThis software forks the python package "multiprocessing". Licence and\ncopyright information for multiprocessing can be found in "COPYING".\n\nThis software is available subject to the conditions and terms laid\nout below. By downloading and using this software you are agreeing\nto the following conditions.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n - Neither the names of the copyright holders nor the names of any of\n the contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\nTO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\nOR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\nWHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\nOTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\nADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n'''\n | .venv\Lib\site-packages\multiprocess\__info__.py | __info__.py | Python | 7,997 | 0.95 | 0.085973 | 0.037736 | vue-tools | 563 | 2025-06-29T21:28:23.532520 | Apache-2.0 | false | 1f08815c727cfe4a809e1a1355529189 |
#\n# Package analogous to 'threading.py' but using processes\n#\n# multiprocessing/__init__.py\n#\n# This package is intended to duplicate the functionality (and much of\n# the API) of threading.py but uses processes instead of threads. A\n# subpackage 'multiprocessing.dummy' has the same API but is a simple\n# wrapper for 'threading'.\n#\n# Original: Copyright (c) 2006-2008, R Oudkerk\n# Original: Licensed to PSF under a Contributor Agreement.\n# Forked by Mike McKerns, to support enhanced serialization.\n\n# author, version, license, and long description\ntry: # the package is installed\n from .__info__ import __version__, __author__, __doc__, __license__\nexcept: # pragma: no cover\n import os\n import sys\n root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n sys.path.append(root)\n # get distribution meta info \n from version import (__version__, __author__,\n get_license_text, get_readme_as_rst)\n __license__ = get_license_text(os.path.join(root, 'LICENSE'))\n __license__ = "\n%s" % __license__\n __doc__ = get_readme_as_rst(os.path.join(root, 'README.md'))\n del os, sys, root, get_license_text, get_readme_as_rst\n\n\nimport sys\nfrom . import context\n\n#\n# Copy stuff from default context\n#\n\n__all__ = [x for x in dir(context._default_context) if not x.startswith('_')]\nglobals().update((name, getattr(context._default_context, name)) for name in __all__)\n\n#\n# XXX These should not really be documented or public.\n#\n\nSUBDEBUG = 5\nSUBWARNING = 25\n\n#\n# Alias for main module -- will be reset by bootstrapping child processes\n#\n\nif '__main__' in sys.modules:\n sys.modules['__mp_main__'] = sys.modules['__main__']\n\n\ndef license():\n """print license"""\n print (__license__)\n return\n\ndef citation():\n """print citation"""\n print (__doc__[-491:-118])\n return\n\n | .venv\Lib\site-packages\multiprocess\__init__.py | __init__.py | Python | 1,856 | 0.95 | 0.136364 | 0.45283 | awesome-app | 222 | 2024-07-22T05:01:14.271061 | MIT | false | db5c4f97014ad0259795bed8ac36afac |
#\n# Analogue of `multiprocessing.connection` which uses queues instead of sockets\n#\n# multiprocessing/dummy/connection.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\n__all__ = [ 'Client', 'Listener', 'Pipe' ]\n\nfrom queue import Queue\n\n\nfamilies = [None]\n\n\nclass Listener(object):\n\n def __init__(self, address=None, family=None, backlog=1):\n self._backlog_queue = Queue(backlog)\n\n def accept(self):\n return Connection(*self._backlog_queue.get())\n\n def close(self):\n self._backlog_queue = None\n\n @property\n def address(self):\n return self._backlog_queue\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.close()\n\n\ndef Client(address):\n _in, _out = Queue(), Queue()\n address.put((_out, _in))\n return Connection(_in, _out)\n\n\ndef Pipe(duplex=True):\n a, b = Queue(), Queue()\n return Connection(a, b), Connection(b, a)\n\n\nclass Connection(object):\n\n def __init__(self, _in, _out):\n self._out = _out\n self._in = _in\n self.send = self.send_bytes = _out.put\n self.recv = self.recv_bytes = _in.get\n\n def poll(self, timeout=0.0):\n if self._in.qsize() > 0:\n return True\n if timeout <= 0.0:\n return False\n with self._in.not_empty:\n self._in.not_empty.wait(timeout)\n return self._in.qsize() > 0\n\n def close(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.close()\n | .venv\Lib\site-packages\multiprocess\dummy\connection.py | connection.py | Python | 1,598 | 0.95 | 0.226667 | 0.153846 | python-kit | 407 | 2024-09-25T08:27:08.726285 | BSD-3-Clause | false | c539bf093a05c7fcab38f9434b5de21b |
#\n# Support for the API of the multiprocessing package using threads\n#\n# multiprocessing/dummy/__init__.py\n#\n# Copyright (c) 2006-2008, R Oudkerk\n# Licensed to PSF under a Contributor Agreement.\n#\n\n__all__ = [\n 'Process', 'current_process', 'active_children', 'freeze_support',\n 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',\n 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'\n ]\n\n#\n# Imports\n#\n\nimport threading\nimport sys\nimport weakref\nimport array\n\nfrom .connection import Pipe\nfrom threading import Lock, RLock, Semaphore, BoundedSemaphore\nfrom threading import Event, Condition, Barrier\nfrom queue import Queue\n\n#\n#\n#\n\nclass DummyProcess(threading.Thread):\n\n def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):\n threading.Thread.__init__(self, group, target, name, args, kwargs)\n self._pid = None\n self._children = weakref.WeakKeyDictionary()\n self._start_called = False\n self._parent = current_process()\n\n def start(self):\n if self._parent is not current_process():\n raise RuntimeError(\n "Parent is {0!r} but current_process is {1!r}".format(\n self._parent, current_process()))\n self._start_called = True\n if hasattr(self._parent, '_children'):\n self._parent._children[self] = None\n threading.Thread.start(self)\n\n @property\n def exitcode(self):\n if self._start_called and not self.is_alive():\n return 0\n else:\n return None\n\n#\n#\n#\n\nProcess = DummyProcess\ncurrent_process = threading.current_thread\ncurrent_process()._children = weakref.WeakKeyDictionary()\n\ndef active_children():\n children = current_process()._children\n for p in list(children):\n if not p.is_alive():\n children.pop(p, None)\n return list(children)\n\ndef freeze_support():\n pass\n\n#\n#\n#\n\nclass Namespace(object):\n def __init__(self, /, **kwds):\n self.__dict__.update(kwds)\n def __repr__(self):\n items = list(self.__dict__.items())\n temp = []\n for name, value in items:\n if not name.startswith('_'):\n temp.append('%s=%r' % (name, value))\n temp.sort()\n return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))\n\ndict = dict\nlist = list\n\ndef Array(typecode, sequence, lock=True):\n return array.array(typecode, sequence)\n\nclass Value(object):\n def __init__(self, typecode, value, lock=True):\n self._typecode = typecode\n self._value = value\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = value\n\n def __repr__(self):\n return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)\n\ndef Manager():\n return sys.modules[__name__]\n\ndef shutdown():\n pass\n\ndef Pool(processes=None, initializer=None, initargs=()):\n from ..pool import ThreadPool\n return ThreadPool(processes, initializer, initargs)\n\nJoinableQueue = Queue\n | .venv\Lib\site-packages\multiprocess\dummy\__init__.py | __init__.py | Python | 3,061 | 0.95 | 0.206349 | 0.19802 | python-kit | 628 | 2025-05-26T00:04:25.346157 | Apache-2.0 | false | cbbef451063777d332614efa907e5fd8 |
\n\n | .venv\Lib\site-packages\multiprocess\dummy\__pycache__\connection.cpython-313.pyc | connection.cpython-313.pyc | Other | 3,632 | 0.8 | 0 | 0 | vue-tools | 345 | 2023-10-22T13:09:53.174357 | Apache-2.0 | false | 6b243f7535f1d518db1c5b884da3669a |
\n\n | .venv\Lib\site-packages\multiprocess\dummy\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 5,822 | 0.8 | 0 | 0 | vue-tools | 192 | 2023-10-20T23:58:08.887083 | MIT | false | 003303904e7aa3da1a19bbaf3086b2cd |
import multiprocessing, sys\n\ndef foo():\n print("123")\n\n# Because "if __name__ == '__main__'" is missing this will not work\n# correctly on Windows. However, we should get a RuntimeError rather\n# than the Windows equivalent of a fork bomb.\n\nif len(sys.argv) > 1:\n multiprocessing.set_start_method(sys.argv[1])\nelse:\n multiprocessing.set_start_method('spawn')\n\np = multiprocessing.Process(target=foo)\np.start()\np.join()\nsys.exit(p.exitcode)\n | .venv\Lib\site-packages\multiprocess\tests\mp_fork_bomb.py | mp_fork_bomb.py | Python | 448 | 0.95 | 0.166667 | 0.214286 | react-lib | 122 | 2025-01-01T11:52:49.456137 | BSD-3-Clause | true | b2a4d66d820d7c046cabd1a7d34746b2 |
import multiprocessing\n\nmultiprocessing.Lock()\n\n\ndef f():\n print("ok")\n\n\nif __name__ == "__main__":\n ctx = multiprocessing.get_context("forkserver")\n modname = "multiprocess.tests.mp_preload"\n # Make sure it's importable\n __import__(modname)\n ctx.set_forkserver_preload([modname])\n proc = ctx.Process(target=f)\n proc.start()\n proc.join()\n | .venv\Lib\site-packages\multiprocess\tests\mp_preload.py | mp_preload.py | Python | 365 | 0.95 | 0.111111 | 0.076923 | awesome-app | 277 | 2024-07-15T12:30:54.132818 | GPL-3.0 | true | e0702254b71c3d577579d62d91063221 |
# tests __main__ module handling in multiprocessing\nfrom test import support\nfrom test.support import import_helper\n# Skip tests if _multiprocessing wasn't built.\nimport_helper.import_module('_multiprocessing')\n\nimport importlib\nimport importlib.machinery\nimport unittest\nimport sys\nimport os\nimport os.path\nimport py_compile\n\nfrom test.support import os_helper\nfrom test.support.script_helper import (\n make_pkg, make_script, make_zip_pkg, make_zip_script,\n assert_python_ok)\n\nif support.PGO:\n raise unittest.SkipTest("test is not helpful for PGO")\n\n# Look up which start methods are available to test\nimport multiprocess as multiprocessing\nAVAILABLE_START_METHODS = set(multiprocessing.get_all_start_methods())\n\n# Issue #22332: Skip tests if sem_open implementation is broken.\nimport_helper.import_module('multiprocess.synchronize')\n\nverbose = support.verbose\n\ntest_source = """\\n# multiprocessing includes all sorts of shenanigans to make __main__\n# attributes accessible in the subprocess in a pickle compatible way.\n\n# We run the "doesn't work in the interactive interpreter" example from\n# the docs to make sure it *does* work from an executed __main__,\n# regardless of the invocation mechanism\n\nimport sys\nimport time\nsys.path.extend({0})\nfrom multiprocess import Pool, set_start_method\nfrom test import support\n\n# We use this __main__ defined function in the map call below in order to\n# check that multiprocessing in correctly running the unguarded\n# code in child processes and then making it available as __main__\ndef f(x):\n return x*x\n\n# Check explicit relative imports\nif "check_sibling" in __file__:\n # We're inside a package and not in a __main__.py file\n # so make sure explicit relative imports work correctly\n from . import sibling\n\nif __name__ == '__main__':\n start_method = sys.argv[1]\n set_start_method(start_method)\n results = []\n with Pool(5) as pool:\n pool.map_async(f, [1, 2, 3], callback=results.extend)\n\n # up to 1 min to report the results\n for _ in support.sleeping_retry(support.LONG_TIMEOUT,\n "Timed out waiting for results"):\n if results:\n break\n\n results.sort()\n print(start_method, "->", results)\n\n pool.join()\n""".format(sys.path)\n\ntest_source_main_skipped_in_children = """\\n# __main__.py files have an implied "if __name__ == '__main__'" so\n# multiprocessing should always skip running them in child processes\n\n# This means we can't use __main__ defined functions in child processes,\n# so we just use "int" as a passthrough operation below\n\nif __name__ != "__main__":\n raise RuntimeError("Should only be called as __main__!")\n\nimport sys\nimport time\nsys.path.extend({0})\nfrom multiprocess import Pool, set_start_method\nfrom test import support\n\nstart_method = sys.argv[1]\nset_start_method(start_method)\nresults = []\nwith Pool(5) as pool:\n pool.map_async(int, [1, 4, 9], callback=results.extend)\n # up to 1 min to report the results\n for _ in support.sleeping_retry(support.LONG_TIMEOUT,\n "Timed out waiting for results"):\n if results:\n break\n\nresults.sort()\nprint(start_method, "->", results)\n\npool.join()\n""".format(sys.path)\n\n# These helpers were copied from test_cmd_line_script & tweaked a bit...\n\ndef _make_test_script(script_dir, script_basename,\n source=test_source, omit_suffix=False):\n to_return = make_script(script_dir, script_basename,\n source, omit_suffix)\n # Hack to check explicit relative imports\n if script_basename == "check_sibling":\n make_script(script_dir, "sibling", "")\n importlib.invalidate_caches()\n return to_return\n\ndef _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,\n source=test_source, depth=1):\n to_return = make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,\n source, depth)\n importlib.invalidate_caches()\n return to_return\n\n# There's no easy way to pass the script directory in to get\n# -m to work (avoiding that is the whole point of making\n# directories and zipfiles executable!)\n# So we fake it for testing purposes with a custom launch script\nlaunch_source = """\\nimport sys, os.path, runpy\nsys.path.insert(0, %s)\nrunpy._run_module_as_main(%r)\n"""\n\ndef _make_launch_script(script_dir, script_basename, module_name, path=None):\n if path is None:\n path = "os.path.dirname(__file__)"\n else:\n path = repr(path)\n source = launch_source % (path, module_name)\n to_return = make_script(script_dir, script_basename, source)\n importlib.invalidate_caches()\n return to_return\n\nclass MultiProcessingCmdLineMixin():\n maxDiff = None # Show full tracebacks on subprocess failure\n\n def setUp(self):\n if self.start_method not in AVAILABLE_START_METHODS:\n self.skipTest("%r start method not available" % self.start_method)\n\n def _check_output(self, script_name, exit_code, out, err):\n if verbose > 1:\n print("Output from test script %r:" % script_name)\n print(repr(out))\n self.assertEqual(exit_code, 0)\n self.assertEqual(err.decode('utf-8'), '')\n expected_results = "%s -> [1, 4, 9]" % self.start_method\n self.assertEqual(out.decode('utf-8').strip(), expected_results)\n\n def _check_script(self, script_name, *cmd_line_switches):\n if not __debug__:\n cmd_line_switches += ('-' + 'O' * sys.flags.optimize,)\n run_args = cmd_line_switches + (script_name, self.start_method)\n rc, out, err = assert_python_ok(*run_args, __isolated=False)\n self._check_output(script_name, rc, out, err)\n\n def test_basic_script(self):\n with os_helper.temp_dir() as script_dir:\n script_name = _make_test_script(script_dir, 'script')\n self._check_script(script_name)\n\n def test_basic_script_no_suffix(self):\n with os_helper.temp_dir() as script_dir:\n script_name = _make_test_script(script_dir, 'script',\n omit_suffix=True)\n self._check_script(script_name)\n\n def test_ipython_workaround(self):\n # Some versions of the IPython launch script are missing the\n # __name__ = "__main__" guard, and multiprocessing has long had\n # a workaround for that case\n # See https://github.com/ipython/ipython/issues/4698\n source = test_source_main_skipped_in_children\n with os_helper.temp_dir() as script_dir:\n script_name = _make_test_script(script_dir, 'ipython',\n source=source)\n self._check_script(script_name)\n script_no_suffix = _make_test_script(script_dir, 'ipython',\n source=source,\n omit_suffix=True)\n self._check_script(script_no_suffix)\n\n def test_script_compiled(self):\n with os_helper.temp_dir() as script_dir:\n script_name = _make_test_script(script_dir, 'script')\n py_compile.compile(script_name, doraise=True)\n os.remove(script_name)\n pyc_file = import_helper.make_legacy_pyc(script_name)\n self._check_script(pyc_file)\n\n def test_directory(self):\n source = self.main_in_children_source\n with os_helper.temp_dir() as script_dir:\n script_name = _make_test_script(script_dir, '__main__',\n source=source)\n self._check_script(script_dir)\n\n def test_directory_compiled(self):\n source = self.main_in_children_source\n with os_helper.temp_dir() as script_dir:\n script_name = _make_test_script(script_dir, '__main__',\n source=source)\n py_compile.compile(script_name, doraise=True)\n os.remove(script_name)\n pyc_file = import_helper.make_legacy_pyc(script_name)\n self._check_script(script_dir)\n\n def test_zipfile(self):\n source = self.main_in_children_source\n with os_helper.temp_dir() as script_dir:\n script_name = _make_test_script(script_dir, '__main__',\n source=source)\n zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)\n self._check_script(zip_name)\n\n def test_zipfile_compiled(self):\n source = self.main_in_children_source\n with os_helper.temp_dir() as script_dir:\n script_name = _make_test_script(script_dir, '__main__',\n source=source)\n compiled_name = py_compile.compile(script_name, doraise=True)\n zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name)\n self._check_script(zip_name)\n\n def test_module_in_package(self):\n with os_helper.temp_dir() as script_dir:\n pkg_dir = os.path.join(script_dir, 'test_pkg')\n make_pkg(pkg_dir)\n script_name = _make_test_script(pkg_dir, 'check_sibling')\n launch_name = _make_launch_script(script_dir, 'launch',\n 'test_pkg.check_sibling')\n self._check_script(launch_name)\n\n def test_module_in_package_in_zipfile(self):\n with os_helper.temp_dir() as script_dir:\n zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script')\n launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name)\n self._check_script(launch_name)\n\n def test_module_in_subpackage_in_zipfile(self):\n with os_helper.temp_dir() as script_dir:\n zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2)\n launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name)\n self._check_script(launch_name)\n\n def test_package(self):\n source = self.main_in_children_source\n with os_helper.temp_dir() as script_dir:\n pkg_dir = os.path.join(script_dir, 'test_pkg')\n make_pkg(pkg_dir)\n script_name = _make_test_script(pkg_dir, '__main__',\n source=source)\n launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')\n self._check_script(launch_name)\n\n def test_package_compiled(self):\n source = self.main_in_children_source\n with os_helper.temp_dir() as script_dir:\n pkg_dir = os.path.join(script_dir, 'test_pkg')\n make_pkg(pkg_dir)\n script_name = _make_test_script(pkg_dir, '__main__',\n source=source)\n compiled_name = py_compile.compile(script_name, doraise=True)\n os.remove(script_name)\n pyc_file = import_helper.make_legacy_pyc(script_name)\n launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')\n self._check_script(launch_name)\n\n# Test all supported start methods (setupClass skips as appropriate)\n\nclass SpawnCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase):\n start_method = 'spawn'\n main_in_children_source = test_source_main_skipped_in_children\n\nclass ForkCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase):\n start_method = 'fork'\n main_in_children_source = test_source\n\nclass ForkServerCmdLineTest(MultiProcessingCmdLineMixin, unittest.TestCase):\n start_method = 'forkserver'\n main_in_children_source = test_source_main_skipped_in_children\n\ndef tearDownModule():\n support.reap_children()\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_main_handling.py | test_multiprocessing_main_handling.py | Python | 11,847 | 0.95 | 0.15894 | 0.126984 | python-kit | 197 | 2024-11-25T01:50:27.135177 | Apache-2.0 | true | 74c59beee86c87c3a11f105732d8f1a8 |
#!/usr/bin/env python\n#\n# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)\n# Copyright (c) 2018-2024 The Uncertainty Quantification Foundation.\n# License: 3-clause BSD. The full license text is available at:\n# - https://github.com/uqfoundation/multiprocess/blob/master/LICENSE\n\nimport glob\nimport os\nimport sys\nimport subprocess as sp\npython = sys.executable\ntry:\n import pox\n python = pox.which_python(version=True) or python\nexcept ImportError:\n pass\nshell = sys.platform[:3] == 'win'\n\nsuite = os.path.dirname(__file__) or os.path.curdir\ntests = glob.glob(suite + os.path.sep + '__init__.py') + \\n glob.glob(suite + os.path.sep + '*' + os.path.sep + '__init__.py')\n\n\nif __name__ == '__main__':\n\n failed = 0\n for test in tests:\n p = sp.Popen([python, test], shell=shell).wait()\n if p:\n failed = 1\n print('')\n exit(failed)\n | .venv\Lib\site-packages\multiprocess\tests\__main__.py | __main__.py | Python | 888 | 0.95 | 0.121212 | 0.214286 | vue-tools | 811 | 2024-06-05T14:40:08.211138 | Apache-2.0 | true | e60c56e55cffe108f80503a7920877bd |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'fork', only_type="manager")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\test_manager.py | test_manager.py | Python | 194 | 0.85 | 0.142857 | 0 | python-kit | 844 | 2024-08-08T01:35:22.920699 | Apache-2.0 | true | d82d7fcae0e9b12fd3a6ed54b04e91e3 |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'fork', exclude_types=True)\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\test_misc.py | test_misc.py | Python | 193 | 0.85 | 0.142857 | 0 | node-utils | 420 | 2024-04-03T10:32:18.750886 | GPL-3.0 | true | 11b021a049f317b50a8a5610a0f18603 |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'fork', only_type="processes")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\test_processes.py | test_processes.py | Python | 196 | 0.85 | 0.142857 | 0 | react-lib | 26 | 2023-08-06T03:17:16.821093 | MIT | true | d6bf91b6d8fc0d2dad2d3ad23b87fc1d |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'fork', only_type="threads")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\test_threads.py | test_threads.py | Python | 194 | 0.85 | 0.142857 | 0 | react-lib | 972 | 2024-03-24T11:28:39.536981 | GPL-3.0 | true | 2773d7fa9f3cd5aa732ff279334bf472 |
import os.path\nimport sys\nimport unittest\nfrom test import support\nimport glob\nimport subprocess as sp\npython = sys.executable\ntry:\n import pox\n python = pox.which_python(version=True) or python\nexcept ImportError:\n pass\nshell = sys.platform[:3] == 'win'\n\nif support.PGO:\n raise unittest.SkipTest("test is not helpful for PGO")\n\nif sys.platform == "win32":\n raise unittest.SkipTest("fork is not available on Windows")\n\nif sys.platform == 'darwin':\n raise unittest.SkipTest("test may crash on macOS (bpo-33725)")\n\nsuite = os.path.dirname(__file__) or os.path.curdir\ntests = glob.glob(suite + os.path.sep + 'test_*.py')\n\n\nif __name__ == '__main__':\n\n failed = 0\n for test in tests:\n p = sp.Popen([python, test], shell=shell).wait()\n if p:\n failed = 1\n print('')\n exit(failed)\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\__init__.py | __init__.py | Python | 829 | 0.85 | 0.222222 | 0 | awesome-app | 813 | 2023-08-25T18:56:58.200615 | GPL-3.0 | true | e04fbf4066f27e512121b20c7c34328f |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\__pycache__\test_manager.cpython-313.pyc | test_manager.cpython-313.pyc | Other | 491 | 0.7 | 0 | 0 | react-lib | 478 | 2025-01-27T18:07:49.617876 | MIT | true | 7adcbefb90646f990d6f98851b04759e |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\__pycache__\test_misc.cpython-313.pyc | test_misc.cpython-313.pyc | Other | 484 | 0.7 | 0 | 0 | react-lib | 824 | 2025-04-26T13:22:44.357315 | MIT | true | eee1ddd77f4bd685c841055fbb4fd4dd |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\__pycache__\test_processes.cpython-313.pyc | test_processes.cpython-313.pyc | Other | 495 | 0.7 | 0 | 0 | vue-tools | 429 | 2023-09-23T17:07:34.043582 | MIT | true | 09a694f6437bb5ffa38ee15209770ad8 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\__pycache__\test_threads.cpython-313.pyc | test_threads.cpython-313.pyc | Other | 491 | 0.7 | 0 | 0 | react-lib | 1,000 | 2024-06-03T02:48:29.576437 | MIT | true | 74dcefd486bb8e20ccc9c69db334150d |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_fork\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,757 | 0.8 | 0.029412 | 0 | python-kit | 240 | 2023-12-16T17:46:07.539427 | GPL-3.0 | true | dcf9659f2654d5aaa3f5bb86634d0bce |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'forkserver', only_type="manager")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\test_manager.py | test_manager.py | Python | 200 | 0.85 | 0.142857 | 0 | node-utils | 640 | 2024-03-02T21:47:37.251289 | MIT | true | 37894ffecb12ccf3d172742b752a3b55 |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'forkserver', exclude_types=True)\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\test_misc.py | test_misc.py | Python | 199 | 0.85 | 0.142857 | 0 | python-kit | 836 | 2023-07-18T05:38:59.173626 | MIT | true | 4223eebeb933e36284ed59da87763b1a |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'forkserver', only_type="processes")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\test_processes.py | test_processes.py | Python | 202 | 0.85 | 0.142857 | 0 | node-utils | 923 | 2023-12-06T18:58:49.463969 | GPL-3.0 | true | 4c2d68d998784254b07068d1d547fa83 |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'forkserver', only_type="threads")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\test_threads.py | test_threads.py | Python | 200 | 0.85 | 0.142857 | 0 | react-lib | 353 | 2023-11-22T21:07:33.992949 | BSD-3-Clause | true | 1ba2d707ef5681a827f119891c707595 |
import os.path\nimport sys\nimport unittest\nfrom test import support\nimport glob\nimport subprocess as sp\npython = sys.executable\ntry:\n import pox\n python = pox.which_python(version=True) or python\nexcept ImportError:\n pass\nshell = sys.platform[:3] == 'win'\n\nif support.PGO:\n raise unittest.SkipTest("test is not helpful for PGO")\n\nif sys.platform == "win32":\n raise unittest.SkipTest("forkserver is not available on Windows")\n\nsuite = os.path.dirname(__file__) or os.path.curdir\ntests = glob.glob(suite + os.path.sep + 'test_*.py')\n\n\nif __name__ == '__main__':\n\n failed = 0\n for test in tests:\n p = sp.Popen([python, test], shell=shell).wait()\n if p:\n failed = 1\n print('')\n exit(failed)\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\__init__.py | __init__.py | Python | 738 | 0.85 | 0.212121 | 0 | awesome-app | 340 | 2025-06-28T17:13:03.589330 | BSD-3-Clause | true | d2fe35c26317dccc677c996b653c68dc |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\__pycache__\test_manager.cpython-313.pyc | test_manager.cpython-313.pyc | Other | 503 | 0.7 | 0 | 0 | vue-tools | 648 | 2024-06-21T00:50:44.932940 | MIT | true | 6a45f4ff30a20bbc5dadde06b4574555 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\__pycache__\test_misc.cpython-313.pyc | test_misc.cpython-313.pyc | Other | 496 | 0.7 | 0 | 0 | node-utils | 982 | 2024-10-21T07:35:55.755323 | BSD-3-Clause | true | b833ad092402d20dca5eadf451242794 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\__pycache__\test_processes.cpython-313.pyc | test_processes.cpython-313.pyc | Other | 507 | 0.7 | 0 | 0 | awesome-app | 261 | 2025-06-07T09:02:28.600947 | Apache-2.0 | true | a271f9b451d9836806fcab6e9aaeffc3 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\__pycache__\test_threads.cpython-313.pyc | test_threads.cpython-313.pyc | Other | 503 | 0.7 | 0 | 0 | awesome-app | 757 | 2024-08-01T18:57:53.369015 | BSD-3-Clause | true | 8614af76905b0440f3613635183adebe |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_forkserver\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,626 | 0.7 | 0.034483 | 0 | react-lib | 513 | 2024-02-09T00:34:04.614579 | BSD-3-Clause | true | 0360f1a1719adbd7413b9af22c7557ea |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'spawn', only_type="manager")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\test_manager.py | test_manager.py | Python | 195 | 0.85 | 0.142857 | 0 | python-kit | 373 | 2024-12-05T02:33:26.320086 | GPL-3.0 | true | 18ac9687561c42ede572485564608ca6 |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'spawn', exclude_types=True)\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\test_misc.py | test_misc.py | Python | 194 | 0.85 | 0.142857 | 0 | node-utils | 497 | 2024-11-18T01:14:28.815345 | MIT | true | e8743ceda119da349810ed7f6c892ecd |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'spawn', only_type="processes")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\test_processes.py | test_processes.py | Python | 197 | 0.85 | 0.142857 | 0 | react-lib | 69 | 2024-12-20T16:46:20.842130 | GPL-3.0 | true | d74a70ef21a82900ed0f45f26d17ad8d |
import unittest\nfrom multiprocess.tests import install_tests_in_module_dict\n\ninstall_tests_in_module_dict(globals(), 'spawn', only_type="threads")\n\nif __name__ == '__main__':\n unittest.main()\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\test_threads.py | test_threads.py | Python | 195 | 0.85 | 0.142857 | 0 | vue-tools | 537 | 2024-01-11T18:28:11.683059 | MIT | true | db4ef79635befd315221ceaec520926a |
import os.path\nimport sys\nimport unittest\nfrom test import support\nimport glob\nimport subprocess as sp\npython = sys.executable\ntry:\n import pox\n python = pox.which_python(version=True) or python\nexcept ImportError:\n pass\nshell = sys.platform[:3] == 'win'\n\nif support.PGO:\n raise unittest.SkipTest("test is not helpful for PGO")\n\nsuite = os.path.dirname(__file__) or os.path.curdir\ntests = glob.glob(suite + os.path.sep + 'test_*.py')\n\n\nif __name__ == '__main__':\n\n failed = 0\n for test in tests:\n p = sp.Popen([python, test], shell=shell).wait()\n if p:\n failed = 1\n print('')\n exit(failed)\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\__init__.py | __init__.py | Python | 639 | 0.85 | 0.2 | 0 | python-kit | 134 | 2025-05-28T21:34:20.578886 | MIT | true | 19d3276988fe33e87f1948a795babe3e |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\__pycache__\test_manager.cpython-313.pyc | test_manager.cpython-313.pyc | Other | 493 | 0.7 | 0 | 0 | vue-tools | 541 | 2023-08-26T03:09:14.128153 | Apache-2.0 | true | 48c0f81bb2c7d2ebff6a701363e91de1 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\__pycache__\test_misc.cpython-313.pyc | test_misc.cpython-313.pyc | Other | 486 | 0.7 | 0 | 0 | node-utils | 56 | 2023-10-12T04:36:27.049654 | Apache-2.0 | true | b0c3fae9e149cf11632911c48e7166be |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\__pycache__\test_processes.cpython-313.pyc | test_processes.cpython-313.pyc | Other | 497 | 0.7 | 0 | 0 | node-utils | 121 | 2024-04-08T05:43:42.542294 | GPL-3.0 | true | 5b2e64a70a17135f29c8610b7a6b5350 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\__pycache__\test_threads.cpython-313.pyc | test_threads.cpython-313.pyc | Other | 493 | 0.7 | 0 | 0 | vue-tools | 558 | 2023-10-22T22:14:44.755842 | GPL-3.0 | true | 992d999649ac6c7b8ab25a62e8235e5a |
\n\n | .venv\Lib\site-packages\multiprocess\tests\test_multiprocessing_spawn\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,474 | 0.7 | 0.04 | 0 | vue-tools | 932 | 2023-11-25T21:34:36.938822 | MIT | true | 6911547ae639a2824f22e45b75883245 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\__pycache__\mp_fork_bomb.cpython-313.pyc | mp_fork_bomb.cpython-313.pyc | Other | 865 | 0.7 | 0 | 0 | vue-tools | 649 | 2024-12-16T00:00:06.507389 | BSD-3-Clause | true | 285d6f5bae64122ba4feebd879f6b4d0 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\__pycache__\mp_preload.cpython-313.pyc | mp_preload.cpython-313.pyc | Other | 865 | 0.85 | 0 | 0 | python-kit | 454 | 2023-07-17T15:54:19.873235 | GPL-3.0 | true | 7eff44ed4e870806ede2fb787e5d62f3 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\__pycache__\test_multiprocessing_main_handling.cpython-313.pyc | test_multiprocessing_main_handling.cpython-313.pyc | Other | 15,343 | 0.95 | 0.055319 | 0.07732 | awesome-app | 521 | 2025-01-15T00:47:36.095345 | GPL-3.0 | true | 07d74e7888a53486ac0be96b3bc16002 |
\n\n | .venv\Lib\site-packages\multiprocess\tests\__pycache__\__main__.cpython-313.pyc | __main__.cpython-313.pyc | Other | 1,452 | 0.7 | 0 | 0 | vue-tools | 374 | 2025-07-01T04:17:56.649734 | BSD-3-Clause | true | c794d0613c1573633903f0ae978f8def |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\connection.cpython-313.pyc | connection.cpython-313.pyc | Other | 48,938 | 0.95 | 0.031008 | 0.005618 | node-utils | 184 | 2024-11-08T13:37:46.718266 | Apache-2.0 | false | 2997942e734347c508af3a99aa2e0dcf |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\context.cpython-313.pyc | context.cpython-313.pyc | Other | 17,295 | 0.8 | 0.048544 | 0.009901 | awesome-app | 456 | 2024-03-05T20:11:01.140241 | Apache-2.0 | false | 443c19ef6a47cfab44f12c0203b577c1 |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\forkserver.cpython-313.pyc | forkserver.cpython-313.pyc | Other | 15,433 | 0.95 | 0.013072 | 0.029412 | node-utils | 805 | 2024-11-18T09:57:24.177378 | BSD-3-Clause | false | 5d82cff5fed89ab6003b22f7fbaf7325 |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\heap.cpython-313.pyc | heap.cpython-313.pyc | Other | 14,049 | 0.8 | 0.008547 | 0 | awesome-app | 683 | 2025-03-20T03:03:14.966857 | MIT | false | d29fc54cef55b63e362e984e9cf80543 |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\managers.cpython-313.pyc | managers.cpython-313.pyc | Other | 68,784 | 0.75 | 0.028668 | 0.005474 | node-utils | 786 | 2024-09-20T14:52:31.559713 | MIT | false | 32272633bbd2d1910e78329140797771 |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\pool.cpython-313.pyc | pool.cpython-313.pyc | Other | 44,864 | 0.95 | 0.027431 | 0.005495 | react-lib | 776 | 2025-06-16T20:37:47.520832 | BSD-3-Clause | false | 9566800a400643dbcf86227fd32e4f47 |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\popen_fork.cpython-313.pyc | popen_fork.cpython-313.pyc | Other | 4,252 | 0.8 | 0 | 0 | awesome-app | 701 | 2023-12-06T12:07:58.304079 | GPL-3.0 | false | 6eef9f3c961ce1c45b1c086ceaa1b867 |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\popen_forkserver.cpython-313.pyc | popen_forkserver.cpython-313.pyc | Other | 4,151 | 0.8 | 0.022222 | 0 | vue-tools | 257 | 2024-08-23T10:38:50.933234 | GPL-3.0 | false | 4e5e21440aca4be5576a3cfa83473e45 |
\n\n | .venv\Lib\site-packages\multiprocess\__pycache__\popen_spawn_posix.cpython-313.pyc | popen_spawn_posix.cpython-313.pyc | Other | 4,075 | 0.8 | 0 | 0 | awesome-app | 89 | 2024-03-31T07:47:51.934835 | MIT | false | 9efa2ad344f6b323ef727a9f11fc493a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.