language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 256680,
"end": 258288
} | class ____(TypedDict, total=False):
"""
:class:`altair.ScaleInvalidDataConfig` ``TypedDict`` wrapper.
Parameters
----------
angle
color
fill
fillOpacity
opacity
radius
shape
size
stroke
strokeDash
strokeOpacity
strokeWidth
theta
time
x
xOffset
y
yOffset
"""
angle: Value[float] | Literal["zero-or-min"]
color: (
Literal["zero-or-min"]
| Value[ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T]
)
fill: (
Literal["zero-or-min"]
| Value[ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T | None]
)
fillOpacity: Value[float] | Literal["zero-or-min"]
opacity: Value[float] | Literal["zero-or-min"]
radius: Value[float] | Literal["zero-or-min"]
shape: Value[str] | Literal["zero-or-min"]
size: Value[float] | Literal["zero-or-min"]
stroke: (
Literal["zero-or-min"]
| Value[ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T | None]
)
strokeDash: Literal["zero-or-min"] | Value[Sequence[float]]
strokeOpacity: Value[float] | Literal["zero-or-min"]
strokeWidth: Value[float] | Literal["zero-or-min"]
theta: Value[float] | Literal["zero-or-min"]
time: Value[float] | Literal["zero-or-min"]
x: Literal["zero-or-min"] | Value[float | Literal["width"]]
xOffset: Value[float] | Literal["zero-or-min"]
y: Literal["zero-or-min"] | Value[float | Literal["height"]]
yOffset: Value[float] | Literal["zero-or-min"]
| ScaleInvalidDataConfigKwds |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py | {
"start": 27532,
"end": 27801
} | class ____(GenericForSequenceClassification, HunYuanMoEV1PreTrainedModel):
pass
__all__ = [
"HunYuanMoEV1ForCausalLM",
"HunYuanMoEV1Model",
"HunYuanMoEV1PreTrainedModel",
"HunYuanMoEV1ForSequenceClassification",
]
| HunYuanMoEV1ForSequenceClassification |
python | pyqtgraph__pyqtgraph | pyqtgraph/opengl/items/GLVolumeItem.py | {
"start": 325,
"end": 9373
} | class ____(GLGraphicsItem):
"""
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem.GLGraphicsItem>`
Displays volumetric data.
"""
_shaderProgram = None
def __init__(self, data, sliceDensity=1, smooth=True, glOptions='translucent', parentItem=None):
"""
============== =======================================================================================
**Arguments:**
data Volume data to be rendered. *Must* be 4D numpy array (x, y, z, RGBA) with dtype=ubyte.
sliceDensity Density of slices to render through the volume. A value of 1 means one slice per voxel.
smooth (bool) If True, the volume slices are rendered with linear interpolation
============== =======================================================================================
"""
super().__init__()
self.setGLOptions(glOptions)
self.sliceDensity = sliceDensity
self.smooth = smooth
self.data = None
self._needUpload = False
self.texture = None
self.m_vbo_position = QtOpenGL.QOpenGLBuffer(QtOpenGL.QOpenGLBuffer.Type.VertexBuffer)
self.setParentItem(parentItem)
self.setData(data)
def setData(self, data):
self.data = data
self._needUpload = True
self.update()
def _uploadData(self):
if self.texture is None:
self.texture = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_3D, self.texture)
filt = GL.GL_LINEAR if self.smooth else GL.GL_NEAREST
GL.glTexParameteri(GL.GL_TEXTURE_3D, GL.GL_TEXTURE_MIN_FILTER, filt)
GL.glTexParameteri(GL.GL_TEXTURE_3D, GL.GL_TEXTURE_MAG_FILTER, filt)
GL.glTexParameteri(GL.GL_TEXTURE_3D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_BORDER)
GL.glTexParameteri(GL.GL_TEXTURE_3D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_BORDER)
GL.glTexParameteri(GL.GL_TEXTURE_3D, GL.GL_TEXTURE_WRAP_R, GL.GL_CLAMP_TO_BORDER)
shape = self.data.shape
context = QtGui.QOpenGLContext.currentContext()
if not context.isOpenGLES():
## Test texture dimensions first
GL.glTexImage3D(GL.GL_PROXY_TEXTURE_3D, 0, GL.GL_RGBA, shape[0], shape[1], shape[2], 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, None)
if GL.glGetTexLevelParameteriv(GL.GL_PROXY_TEXTURE_3D, 0, GL.GL_TEXTURE_WIDTH) == 0:
raise Exception("OpenGL failed to create 3D texture (%dx%dx%d); too large for this hardware." % shape[:3])
data = np.ascontiguousarray(self.data.transpose((2,1,0,3)))
GL.glTexImage3D(GL.GL_TEXTURE_3D, 0, GL.GL_RGBA, shape[0], shape[1], shape[2], 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, data)
GL.glBindTexture(GL.GL_TEXTURE_3D, 0)
all_vertices = []
self.lists = {}
for ax in [0,1,2]:
for d in [-1, 1]:
vertices = self.drawVolume(ax, d)
self.lists[(ax,d)] = (len(all_vertices), len(vertices))
all_vertices.extend(vertices)
pos = np.array(all_vertices, dtype=np.float32)
vbo = self.m_vbo_position
if not vbo.isCreated():
vbo.create()
vbo.bind()
vbo.allocate(pos, pos.nbytes)
vbo.release()
self._needUpload = False
@staticmethod
def getShaderProgram():
klass = GLVolumeItem
if klass._shaderProgram is not None:
return klass._shaderProgram
ctx = QtGui.QOpenGLContext.currentContext()
fmt = ctx.format()
if ctx.isOpenGLES():
if fmt.version() >= (3, 0):
glsl_version = "#version 300 es\n"
sources = SHADER_CORE
else:
glsl_version = ""
sources = SHADER_LEGACY
else:
if fmt.version() >= (3, 1):
glsl_version = "#version 140\n"
sources = SHADER_CORE
else:
glsl_version = ""
sources = SHADER_LEGACY
compiled = [shaders.compileShader([glsl_version, v], k) for k, v in sources.items()]
program = shaders.compileProgram(*compiled)
GL.glBindAttribLocation(program, 0, "a_position")
GL.glBindAttribLocation(program, 1, "a_texcoord")
GL.glLinkProgram(program)
klass._shaderProgram = program
return program
def paint(self):
if self.data is None:
return
if self._needUpload:
self._uploadData()
self.setupGLState()
mat_mvp = self.mvpMatrix()
mat_mvp = np.array(mat_mvp.data(), dtype=np.float32)
# calculate camera coordinates in this model's local space.
# (in eye space, the camera is at the origin)
modelview = self.modelViewMatrix()
cam_local = modelview.inverted()[0].map(QtGui.QVector3D())
# in local space, the model spans (0,0,0) to data.shape
center = QtGui.QVector3D(*[x/2. for x in self.data.shape[:3]])
cam = cam_local - center
cam = np.array([cam.x(), cam.y(), cam.z()])
ax = np.argmax(abs(cam))
d = 1 if cam[ax] > 0 else -1
offset, num_vertices = self.lists[(ax,d)]
program = self.getShaderProgram()
loc_pos, loc_tex = 0, 1
self.m_vbo_position.bind()
GL.glVertexAttribPointer(loc_pos, 3, GL.GL_FLOAT, False, 6*4, None)
GL.glVertexAttribPointer(loc_tex, 3, GL.GL_FLOAT, False, 6*4, GL.GLvoidp(3*4))
self.m_vbo_position.release()
enabled_locs = [loc_pos, loc_tex]
GL.glBindTexture(GL.GL_TEXTURE_3D, self.texture)
for loc in enabled_locs:
GL.glEnableVertexAttribArray(loc)
with program:
loc = GL.glGetUniformLocation(program, "u_mvp")
GL.glUniformMatrix4fv(loc, 1, False, mat_mvp)
GL.glDrawArrays(GL.GL_TRIANGLES, offset, num_vertices)
for loc in enabled_locs:
GL.glDisableVertexAttribArray(loc)
GL.glBindTexture(GL.GL_TEXTURE_3D, 0)
def drawVolume(self, ax, d):
imax = [0,1,2]
imax.remove(ax)
tp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
vp = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
nudge = [0.5/x for x in self.data.shape]
tp[0][imax[0]] = 0+nudge[imax[0]]
tp[0][imax[1]] = 0+nudge[imax[1]]
tp[1][imax[0]] = 1-nudge[imax[0]]
tp[1][imax[1]] = 0+nudge[imax[1]]
tp[2][imax[0]] = 1-nudge[imax[0]]
tp[2][imax[1]] = 1-nudge[imax[1]]
tp[3][imax[0]] = 0+nudge[imax[0]]
tp[3][imax[1]] = 1-nudge[imax[1]]
vp[0][imax[0]] = 0
vp[0][imax[1]] = 0
vp[1][imax[0]] = self.data.shape[imax[0]]
vp[1][imax[1]] = 0
vp[2][imax[0]] = self.data.shape[imax[0]]
vp[2][imax[1]] = self.data.shape[imax[1]]
vp[3][imax[0]] = 0
vp[3][imax[1]] = self.data.shape[imax[1]]
slices = self.data.shape[ax] * self.sliceDensity
r = list(range(slices))
if d == -1:
r = r[::-1]
vertices = []
tzVals = np.linspace(nudge[ax], 1.0-nudge[ax], slices)
vzVals = np.linspace(0, self.data.shape[ax], slices)
for i in r:
z = tzVals[i]
w = vzVals[i]
tp[0][ax] = z
tp[1][ax] = z
tp[2][ax] = z
tp[3][ax] = z
vp[0][ax] = w
vp[1][ax] = w
vp[2][ax] = w
vp[3][ax] = w
# assuming 0-1-2-3 are the BL, BR, TR, TL vertices of a quad
for idx in [0, 1, 3, 1, 2, 3]: # 2 triangles per quad
vtx = tuple(vp[idx]) + tuple(tp[idx])
vertices.append(vtx)
return vertices
SHADER_LEGACY = {
GL.GL_VERTEX_SHADER : """
uniform mat4 u_mvp;
attribute vec4 a_position;
attribute vec3 a_texcoord;
varying vec3 v_texcoord;
void main() {
gl_Position = u_mvp * a_position;
v_texcoord = a_texcoord;
}
""",
GL.GL_FRAGMENT_SHADER : """
uniform sampler3D u_texture;
varying vec3 v_texcoord;
void main()
{
gl_FragColor = texture3D(u_texture, v_texcoord);
}
""",
}
SHADER_CORE = {
GL.GL_VERTEX_SHADER : """
uniform mat4 u_mvp;
in vec4 a_position;
in vec3 a_texcoord;
out vec3 v_texcoord;
void main() {
gl_Position = u_mvp * a_position;
v_texcoord = a_texcoord;
}
""",
GL.GL_FRAGMENT_SHADER : """
#ifdef GL_ES
precision mediump float;
precision lowp sampler3D;
#endif
uniform sampler3D u_texture;
in vec3 v_texcoord;
out vec4 fragColor;
void main()
{
fragColor = texture(u_texture, v_texcoord);
}
""",
}
| GLVolumeItem |
python | getsentry__sentry | tests/sentry/issues/test_suspect_flags.py | {
"start": 316,
"end": 4491
} | class ____(TestCase, SnubaTestCase):
def mock_event(
self,
ts: datetime.datetime,
hash: str = "a" * 32,
group_id: int | None = None,
project_id: int = 1,
flags: list[_FlagResult] | None = None,
) -> None:
self.snuba_insert(
(
2,
"insert",
{
"event_id": uuid.uuid4().hex,
"primary_hash": hash,
"group_id": group_id if group_id else int(hash[:16], 16),
"project_id": project_id,
"message": "message",
"platform": "python",
"datetime": ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"data": {
"received": time.mktime(ts.timetuple()),
"contexts": {"flags": {"values": flags or []}},
},
},
{},
)
)
def test_query_baseline_set(self) -> None:
before = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(hours=1)
today = before + datetime.timedelta(hours=1)
later = today + datetime.timedelta(hours=1)
self.mock_event(
today,
hash="a" * 32,
flags=[
{"flag": "key", "result": True},
{"flag": "other", "result": False},
],
)
self.mock_event(
today,
hash="a" * 32,
flags=[
{"flag": "key", "result": False},
{"flag": "other", "result": False},
],
)
results = query_baseline_set(
1, 1, before, later, environments=[], flag_keys=["key", "other"]
)
assert results == [("key", "false", 1), ("key", "true", 1), ("other", "false", 2)]
def test_query_selection_set(self) -> None:
before = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(hours=1)
today = before + datetime.timedelta(hours=1)
later = today + datetime.timedelta(hours=1)
self.mock_event(
today,
hash="a" * 32,
group_id=1,
flags=[
{"flag": "key", "result": True},
{"flag": "other", "result": False},
],
)
self.mock_event(
today,
hash="a" * 32,
group_id=2,
flags=[
{"flag": "key", "result": False},
{"flag": "other", "result": False},
],
)
results = query_selection_set(1, 1, before, later, environments=[], group_id=1)
assert results == [("key", "true", 1), ("other", "false", 1)]
def test_get_suspect_flag_scores(self) -> None:
before = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(hours=1)
today = before + datetime.timedelta(hours=1)
later = today + datetime.timedelta(hours=1)
self.mock_event(
today,
group_id=1,
flags=[
{"flag": "key", "result": True},
{"flag": "other", "result": False},
],
)
self.mock_event(
today,
group_id=2,
flags=[
{"flag": "key", "result": False},
{"flag": "other", "result": False},
],
)
results = get_suspect_flag_scores(1, 1, before, later, envs=[], group_id=1)
assert results == [
{
"flag": "key",
"score": 0.01634056054997356,
"baseline_percent": 0.5,
"distribution": {
"baseline": {"false": 1, "true": 1},
"outliers": {"true": 1},
},
"is_filtered": True,
},
{
"flag": "other",
"score": 0.016181914331041776,
"baseline_percent": 0,
"distribution": {"baseline": {"false": 2}, "outliers": {"false": 1}},
"is_filtered": True,
},
]
| SnubaTest |
python | pypa__pip | src/pip/_vendor/rich/console.py | {
"start": 8771,
"end": 9274
} | class ____:
"""Render a list of lines at a given offset."""
def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None:
self._lines = lines
self.x = x
self.y = y
def __rich_console__(
self, console: "Console", options: ConsoleOptions
) -> RenderResult:
x = self.x
move_to = Control.move_to
for offset, line in enumerate(self._lines, self.y):
yield move_to(x, offset)
yield from line
| ScreenUpdate |
python | kamyu104__LeetCode-Solutions | Python/find-missing-elements.py | {
"start": 46,
"end": 296
} | class ____(object):
def findMissingElements(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
lookup = set(nums)
return [x for x in xrange(min(nums)+1, max(nums)) if x not in lookup]
| Solution |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/closure_models.py | {
"start": 328,
"end": 4120
} | class ____:
def __init__(self, inner, feature):
self.inner = inner
self.feature = feature
def reclassify(self, feature):
self.feature = feature
return self.inner()
def reclassify(inner, feature) -> Reclassification:
pass
def outer():
pass
def use_source(x):
pass
def test() -> None:
value = "benign"
def return_taint():
return _test_source()
def return_model_taint():
return value
source = _test_source()
def return_model_taint_tito():
use_source(source)
def return_model_query_tito():
use_source(source)
value = reclassify(
inner=return_taint,
feature="breadcrumb1",
)
value.reclassify(feature="breadcrumb2")
value = reclassify(
inner=return_model_taint,
feature="breadcrumb1",
)
value.reclassify(feature="breadcrumb2")
value = reclassify(
inner=outer,
feature="breadcrumb1",
)
value.reclassify(feature="breadcrumb2")
value = reclassify(
inner=return_model_taint_tito,
feature="breadcrumb1",
)
value.reclassify(feature="breadcrumb2")
value = reclassify(
inner=return_model_query_tito,
feature="breadcrumb1",
)
value.reclassify(feature="breadcrumb2")
def test_tito_transform():
source = _test_source()
def return_model_taint_tito():
use_source(source)
def return_model_query_tito():
use_source(source)
value = reclassify(
inner=return_model_taint_tito,
feature="breadcrumb1",
)
value.reclassify(feature="breadcrumb2")
value = reclassify(
inner=return_model_query_tito,
feature="breadcrumb1",
)
value.reclassify(feature="breadcrumb2")
def some_decorator(func):
def wrapper_func():
func()
return wrapper_func
def test_dsl_source(some_data: str) -> None:
def local_function_capturing_local_variable():
_test_sink(some_data)
def test_dsl_decorator_source(some_data: str) -> None:
@some_decorator
def decorated_local_function_capturing_local_variable():
_test_sink(some_data)
def captured_variable_model_tito():
complicated_name = _test_source()
# model simulates captured variables returned
def model_all_captured_as_tito():
complicated_name
_test_sink(model_all_captured_as_tito())
def captured_variable_model_parameter_source():
complicated_name = ...
# complicated_name has no taint outside nested functions, no issue
_test_sink(complicated_name)
# model capturing all variables as parameter sources in nested functions
def model_all_captured_as_parameter_sources():
_test_sink(complicated_name)
model_all_captured_as_parameter_sources()
_test_sink(complicated_name) # no issue
def captured_variable_model_generation_source():
complicated_name = ...
# model simulates writing taint to nonlocal
def model_all_captured_as_generation_sources():
complicated_name
_test_sink(complicated_name) # no issue
model_all_captured_as_generation_sources()
_test_sink(complicated_name)
def captured_variable_model_both_generation_parameter_source():
complicated_name = ...
# model simulates writing taint to nonlocal and parameter sources
def model_all_captured_as_generation_and_parameter_sources():
_test_sink(complicated_name)
model_all_captured_as_generation_and_parameter_sources()
_test_sink(complicated_name)
def captured_variable_model_sink():
complicated_name = _test_source()
# model simulates sink on each captured variable
def model_all_captured_as_sinks():
complicated_name
model_all_captured_as_sinks()
| Reclassification |
python | spack__spack | lib/spack/spack/vendor/pyrsistent/_precord.py | {
"start": 342,
"end": 953
} | class ____(type):
def __new__(mcs, name, bases, dct):
set_fields(dct, bases, name='_precord_fields')
store_invariants(dct, bases, '_precord_invariants', '__invariant__')
dct['_precord_mandatory_fields'] = \
set(name for name, field in dct['_precord_fields'].items() if field.mandatory)
dct['_precord_initial_values'] = \
dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL)
dct['__slots__'] = ()
return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct)
| _PRecordMeta |
python | fluentpython__example-code | 06-dp-1class-func/classic_strategy.py | {
"start": 1173,
"end": 1397
} | class ____:
def __init__(self, product, quantity, price):
self.product = product
self.quantity = quantity
self.price = price
def total(self):
return self.price * self.quantity
| LineItem |
python | lazyprogrammer__machine_learning_examples | ab_testing/optimistic_starter.py | {
"start": 505,
"end": 1812
} | class ____:
def __init__(self, p):
# p: the win rate
self.p = p
self.p_estimate = # TODO
self.N = # TODO
def pull(self):
# draw a 1 with probability p
return np.random.random() < self.p
def update(self, x):
# TODO
self.p_estimate = # TODO
def experiment():
bandits = [Bandit(p) for p in BANDIT_PROBABILITIES]
rewards = np.zeros(NUM_TRIALS)
for i in range(NUM_TRIALS):
# use optimistic initial values to select the next bandit
j = # TODO
# pull the arm for the bandit with the largest sample
x = bandits[j].pull()
# update rewards log
rewards[i] = x
# update the distribution for the bandit whose arm we just pulled
bandits[j].update(x)
# print mean estimates for each bandit
for b in bandits:
print("mean estimate:", b.p_estimate)
# print total reward
print("total reward earned:", rewards.sum())
print("overall win rate:", rewards.sum() / NUM_TRIALS)
print("num times selected each bandit:", [b.N for b in bandits])
# plot the results
cumulative_rewards = np.cumsum(rewards)
win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1)
plt.ylim([0, 1])
plt.plot(win_rates)
plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES))
plt.show()
if __name__ == "__main__":
experiment()
| Bandit |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_project_release_file_details.py | {
"start": 9059,
"end": 11457
} | class ____(APITestCase):
def test_simple(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
assert release.count_artifacts() == 0
releasefile = ReleaseFile.objects.create(
organization_id=project.organization_id,
release_id=release.id,
file=File.objects.create(name="application.js", type="release.file"),
name="http://example.com/application.js",
)
assert release.count_artifacts() == 1
url = reverse(
"sentry-api-0-project-release-file-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
"file_id": releasefile.id,
},
)
response = self.client.delete(url)
assert response.status_code == 204, response.content
assert not ReleaseFile.objects.filter(id=releasefile.id).exists()
assert not File.objects.filter(id=releasefile.file.id).exists()
assert release.count_artifacts() == 0
def test_delete_archived(self) -> None:
self.login_as(user=self.user)
self.create_release_archive()
assert self.release.count_artifacts() == 2
url = lambda id: reverse(
"sentry-api-0-project-release-file-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"version": self.release.version,
"file_id": id,
},
)
id = urlsafe_b64encode(b"_~/index.js")
response = self.client.delete(url(id.decode()))
assert response.status_code == 204
assert self.release.count_artifacts() == 1
response = self.client.delete(url(urlsafe_b64encode(b"invalid_id").decode()))
assert response.status_code == 404
assert self.release.count_artifacts() == 1
response = self.client.delete(url(urlsafe_b64encode(b"_~/does_not_exist.js").decode()))
assert response.status_code == 404
assert self.release.count_artifacts() == 1
| ReleaseFileDeleteTest |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 41370,
"end": 48962
} | class ____(_TestParametrizer):
def __init__(
self,
op_list,
*,
dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported,
allowed_dtypes: Optional[Sequence[torch.dtype]] = None,
skip_if_dynamo=True,
):
self.op_list = list(op_list)
self.opinfo_dtypes = dtypes
self.allowed_dtypes = (
set(allowed_dtypes) if allowed_dtypes is not None else None
)
self.skip_if_dynamo = skip_if_dynamo
def _parametrize_test(self, test, generic_cls, device_cls):
"""Parameterizes the given test function across each op and its associated dtypes."""
if device_cls is None:
raise RuntimeError(
"The @ops decorator is only intended to be used in a device-specific "
"context; use it with instantiate_device_type_tests() instead of "
"instantiate_parametrized_tests()"
)
op = check_exhausted_iterator = object()
for op in self.op_list:
# Determine the set of dtypes to use.
dtypes: Union[set[torch.dtype], set[None]]
if isinstance(self.opinfo_dtypes, Sequence):
dtypes = set(self.opinfo_dtypes)
elif self.opinfo_dtypes == OpDTypes.unsupported_backward:
dtypes = set(get_all_dtypes()).difference(
op.supported_backward_dtypes(device_cls.device_type)
)
elif self.opinfo_dtypes == OpDTypes.supported_backward:
dtypes = op.supported_backward_dtypes(device_cls.device_type)
elif self.opinfo_dtypes == OpDTypes.unsupported:
dtypes = set(get_all_dtypes()).difference(
op.supported_dtypes(device_cls.device_type)
)
elif self.opinfo_dtypes == OpDTypes.supported:
dtypes = set(op.supported_dtypes(device_cls.device_type))
elif self.opinfo_dtypes == OpDTypes.any_one:
# Tries to pick a dtype that supports both forward or backward
supported = op.supported_dtypes(device_cls.device_type)
supported_backward = op.supported_backward_dtypes(
device_cls.device_type
)
supported_both = supported.intersection(supported_backward)
dtype_set = supported_both if len(supported_both) > 0 else supported
for dtype in ANY_DTYPE_ORDER:
if dtype in dtype_set:
dtypes = {dtype}
break
else:
dtypes = {}
elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one:
# Tries to pick a dtype that supports both CPU and CUDA
supported = set(op.dtypes).intersection(op.dtypesIfCUDA)
if supported:
dtypes = {
next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)
}
else:
dtypes = {}
elif self.opinfo_dtypes == OpDTypes.none:
dtypes = {None}
else:
raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}")
if self.allowed_dtypes is not None:
dtypes = dtypes.intersection(self.allowed_dtypes)
# Construct the test name; device / dtype parts are handled outside.
# See [Note: device and dtype suffix placement]
test_name = op.formatted_name
# Filter sample skips / xfails to only those that apply to the OpInfo.
# These are defined on the test function via decorators.
sample_skips_and_xfails = getattr(test, "sample_skips_and_xfails", None)
if sample_skips_and_xfails is not None:
sample_skips_and_xfails = [
rule
for rule in sample_skips_and_xfails
if rule.op_match_fn(device_cls.device_type, op)
]
for dtype in dtypes:
# Construct parameter kwargs to pass to the test.
param_kwargs = {"op": op}
_update_param_kwargs(param_kwargs, "dtype", dtype)
# NOTE: test_wrapper exists because we don't want to apply
# op-specific decorators to the original test.
# Test-specific decorators are applied to the original test,
# however.
try:
@wraps(test)
def test_wrapper(*args, **kwargs):
try:
return test(*args, **kwargs)
except unittest.SkipTest as e:
raise e
except Exception as e:
tracked_input = get_tracked_input()
if PRINT_REPRO_ON_FAILURE and tracked_input is not None:
e_tracked = Exception( # noqa: TRY002
f"{str(e)}\n\nCaused by {tracked_input.type_desc} "
f"at index {tracked_input.index}: "
f"{_serialize_sample(tracked_input.val)}"
)
e_tracked._tracked_input = tracked_input # type: ignore[attr]
raise e_tracked from e
raise e
finally:
clear_tracked_input()
if self.skip_if_dynamo and not TEST_WITH_TORCHINDUCTOR:
test_wrapper = skipIfTorchDynamo(
"Policy: we don't run OpInfo tests w/ Dynamo"
)(test_wrapper)
# Initialize info for the last input seen. This is useful for tracking
# down which inputs caused a test failure. Note that TrackedInputIter is
# responsible for managing this.
test.tracked_input = None
decorator_fn = partial(
op.get_decorators,
generic_cls.__name__,
test.__name__,
device_cls.device_type,
dtype,
)
if sample_skips_and_xfails is not None:
test_wrapper.sample_skips_and_xfails = sample_skips_and_xfails
yield (test_wrapper, test_name, param_kwargs, decorator_fn)
except Exception as ex:
# Provides an error message for debugging before rethrowing the exception
print(f"Failed to instantiate {test_name} for op {op.name}!")
raise ex
if op is check_exhausted_iterator:
raise ValueError(
"An empty op_list was passed to @ops. "
"Note that this may result from reuse of a generator."
)
# Decorator that skips a test if the given condition is true.
# Notes:
# (1) Skip conditions stack.
# (2) Skip conditions can be bools or strings. If a string the
# test base must have defined the corresponding attribute to be False
# for the test to run. If you want to use a string argument you should
# probably define a new decorator instead (see below).
# (3) Prefer the existing decorators to defining the 'device_type' kwarg.
| ops |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass3.py | {
"start": 623,
"end": 677
} | class ____(type(Base10), type(Proto10)):
pass
| Meta11 |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 3544,
"end": 4409
} | class ____:
params = (
["dict", "Series", "lambda"],
["object", "category", "int"],
[None, "ignore"],
)
param_names = ["mapper", "dtype", "na_action"]
def setup(self, mapper, dtype, na_action):
map_size = 1000
map_data = Series(map_size - np.arange(map_size), dtype=dtype)
# construct mapper
if mapper == "Series":
self.map_data = map_data
elif mapper == "dict":
self.map_data = map_data.to_dict()
elif mapper == "lambda":
map_dict = map_data.to_dict()
self.map_data = lambda x: map_dict[x]
else:
raise NotImplementedError
self.s = Series(np.random.randint(0, map_size, 10000), dtype=dtype)
def time_map(self, mapper, dtype, na_action):
self.s.map(self.map_data, na_action=na_action)
| Map |
python | PyCQA__pylint | tests/functional/d/docstrings.py | {
"start": 398,
"end": 929
} | class ____:
# missing docstring
## class BBBB:
## # missing docstring
## pass
## class CCCC:
## """yeah !"""
## def method1(self):
## pass
## def method2(self):
## """ yeah !"""
## pass
# +1: [missing-function-docstring]
def method1(self):
pass
def method2(self):
""" yeah !"""
pass
# +1: [empty-docstring]
def method3(self):
""""""
pass
def __init__(self):
pass
| AAAA |
python | getsentry__sentry | src/sentry/features/exceptions.py | {
"start": 37,
"end": 346
} | class ____(Exception):
def __init__(self, name: str) -> None:
msg = (
'The "{}" feature has not been registered. '
"Ensure that a feature has been added to sentry.features.default_manager"
)
super(Exception, self).__init__(msg.format(name))
| FeatureNotRegistered |
python | plotly__plotly.py | plotly/graph_objs/contour/_hoverlabel.py | {
"start": 233,
"end": 11241
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour"
_path_str = "contour.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.contour.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.contour.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | django__django | django/core/exceptions.py | {
"start": 2128,
"end": 2243
} | class ____(Exception):
"""This middleware is not used in this server configuration"""
pass
| MiddlewareNotUsed |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 187928,
"end": 188051
} | class ____(_MatrixMixin, TestLIL):
spcreator = lil_matrix
TestLIL.init_class()
TestLILMatrix.init_class()
| TestLILMatrix |
python | walkccc__LeetCode | solutions/1723. Find Minimum Time to Finish All Jobs/1723.py | {
"start": 0,
"end": 785
} | class ____:
def minimumTimeRequired(self, jobs: list[int], k: int) -> int:
ans = sum(jobs)
times = [0] * k # times[i] := accumulate time of workers[i]
# Assign the most time-consuming job first.
jobs.sort(reverse=True)
def dfs(s: int) -> None:
nonlocal ans
if s == len(jobs):
ans = min(ans, max(times))
return
for i in range(k):
# There is no need to explore assigning jobs[s] to workers[i] further as
# it would not yield better results.
if times[i] + jobs[s] >= ans:
continue
times[i] += jobs[s]
dfs(s + 1)
times[i] -= jobs[s]
# It's always non-optimal to have a worker with no jobs.
if times[i] == 0:
return
dfs(0)
return ans
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 3168,
"end": 3221
} | class ____:
x: str = ""
@dataclass(frozen=True)
| DC9 |
python | conda__conda | conda/common/_logic.py | {
"start": 6059,
"end": 6689
} | class ____(_SatSolver):
def setup(self, m, threads=1, **kwargs):
from pycryptosat import Solver
solver = Solver(threads=threads)
solver.add_clauses(self._clauses.as_list())
return solver
def invoke(self, solver):
sat, sat_solution = solver.solve()
if not sat:
sat_solution = None
return sat_solution
def process_solution(self, solution):
if not solution:
return None
# The first element of the solution is always None.
solution = [i for i, b in enumerate(solution) if b]
return solution
| _PyCryptoSatSolver |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 115463,
"end": 118301
} | class ____(TypedDict, total=False):
type: Required[Literal['dataclass-field']]
name: Required[str]
schema: Required[CoreSchema]
kw_only: bool # default: True
init: bool # default: True
init_only: bool # default: False
frozen: bool # default: False
validation_alias: Union[str, list[Union[str, int]], list[list[Union[str, int]]]]
serialization_alias: str
serialization_exclude: bool # default: False
metadata: dict[str, Any]
serialization_exclude_if: Callable[[Any], bool] # default: None
def dataclass_field(
name: str,
schema: CoreSchema,
*,
kw_only: bool | None = None,
init: bool | None = None,
init_only: bool | None = None,
validation_alias: str | list[str | int] | list[list[str | int]] | None = None,
serialization_alias: str | None = None,
serialization_exclude: bool | None = None,
metadata: dict[str, Any] | None = None,
serialization_exclude_if: Callable[[Any], bool] | None = None,
frozen: bool | None = None,
) -> DataclassField:
"""
Returns a schema for a dataclass field, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
field = core_schema.dataclass_field(
name='a', schema=core_schema.str_schema(), kw_only=False
)
schema = core_schema.dataclass_args_schema('Foobar', [field])
v = SchemaValidator(schema)
assert v.validate_python({'a': 'hello'}) == ({'a': 'hello'}, None)
```
Args:
name: The name to use for the argument parameter
schema: The schema to use for the argument parameter
kw_only: Whether the field can be set with a positional argument as well as a keyword argument
init: Whether the field should be validated during initialization
init_only: Whether the field should be omitted from `__dict__` and passed to `__post_init__`
validation_alias: The alias(es) to use to find the field in the validation data
serialization_alias: The alias to use as a key when serializing
serialization_exclude: Whether to exclude the field when serializing
serialization_exclude_if: A callable that determines whether to exclude the field when serializing based on its value.
metadata: Any other information you want to include with the schema, not used by pydantic-core
frozen: Whether the field is frozen
"""
return _dict_not_none(
type='dataclass-field',
name=name,
schema=schema,
kw_only=kw_only,
init=init,
init_only=init_only,
validation_alias=validation_alias,
serialization_alias=serialization_alias,
serialization_exclude=serialization_exclude,
serialization_exclude_if=serialization_exclude_if,
metadata=metadata,
frozen=frozen,
)
| DataclassField |
python | doocs__leetcode | lcof2/剑指 Offer II 102. 加减的目标值/Solution3.py | {
"start": 0,
"end": 448
} | class ____:
def findTargetSumWays(self, nums: List[int], target: int) -> int:
s = sum(nums)
if s - target < 0 or (s - target) % 2 != 0:
return 0
target = (s - target) // 2 + 1
n = len(nums) + 1
dp = [0] * target
dp[0] = 1
for i in range(1, n):
for j in range(target - 1, nums[i - 1] - 1, -1):
dp[j] += dp[j - nums[i - 1]]
return dp[-1]
| Solution |
python | google__pytype | pytype/load_pytd_test.py | {
"start": 1034,
"end": 1554
} | class ____(test_base.UnitTest):
@contextlib.contextmanager
def _setup_loader(self, **kwargs):
with test_utils.Tempdir() as d:
for name, contents in kwargs.items():
d.create_file(f"{name}.pyi", contents)
yield load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
def _import(self, **kwargs):
with self._setup_loader(**kwargs) as loader:
return loader.import_name(kwargs.popitem()[0])
| _LoaderTest |
python | bokeh__bokeh | src/bokeh/server/views/ws.py | {
"start": 2202,
"end": 13347
} | class ____(AuthRequestHandler, WebSocketHandler):
''' Implements a custom Tornado WebSocketHandler for the Bokeh Server.
'''
connection: ServerConnection | None
def __init__(self, tornado_app, *args, **kw) -> None:
self.receiver = None
self.handler = None
self.connection = None
self.application_context = kw['application_context']
self.latest_pong = -1
# write_lock allows us to lock the connection to send multiple
# messages atomically.
self.write_lock = locks.Lock()
self._token = None
self._compression_level = kw.pop('compression_level', None)
self._mem_level = kw.pop('mem_level', None)
# Note: tornado_app is stored as self.application
super().__init__(tornado_app, *args, **kw)
def initialize(self, application_context, bokeh_websocket_path):
pass
def check_origin(self, origin: str) -> bool:
''' Implement a check_origin policy for Tornado to call.
The supplied origin will be compared to the Bokeh server allowlist. If the
origin is not allow, an error will be logged and ``False`` will be returned.
Args:
origin (str) :
The URL of the connection origin
Returns:
bool, True if the connection is allowed, False otherwise
'''
from ..util import check_allowlist
parsed_origin = urlparse(origin)
origin_host = parsed_origin.netloc.lower()
allowed_hosts = self.application.websocket_origins
if settings.allowed_ws_origin():
allowed_hosts = set(settings.allowed_ws_origin())
allowed = check_allowlist(origin_host, allowed_hosts)
if allowed:
return True
else:
log.error("Refusing websocket connection from Origin '%s'; \
use --allow-websocket-origin=%s or set BOKEH_ALLOW_WS_ORIGIN=%s to permit this; currently we allow origins %r",
origin, origin_host, origin_host, allowed_hosts)
return False
@web.authenticated
def open(self) -> None:
''' Initialize a connection to a client.
Returns:
None
'''
log.info('WebSocket connection opened')
token = self._token
if self.selected_subprotocol != 'bokeh':
self.close()
raise ProtocolError("Subprotocol header is not 'bokeh'")
elif token is None:
self.close()
raise ProtocolError("No token received in subprotocol header")
now = calendar.timegm(dt.datetime.now(tz=dt.timezone.utc).timetuple())
payload = get_token_payload(token)
if 'session_expiry' not in payload:
self.close()
raise ProtocolError("Session expiry has not been provided")
elif now >= payload['session_expiry']:
self.close()
raise ProtocolError("Token is expired. Configure the app with a larger value for --session-token-expiration if necessary")
elif not check_token_signature(token,
signed=self.application.sign_sessions,
secret_key=self.application.secret_key):
session_id = get_session_id(token)
log.error("Token for session %r had invalid signature", session_id)
raise ProtocolError("Invalid token signature")
try:
self.application.io_loop.add_callback(self._async_open, self._token)
except Exception as e:
# this isn't really an error (unless we have a
# bug), it just means a client disconnected
# immediately, most likely.
log.debug("Failed to fully open connection %r", e)
def select_subprotocol(self, subprotocols: list[str]) -> str | None:
log.debug('Subprotocol header received')
log.trace('Supplied subprotocol headers: %r', subprotocols)
if not len(subprotocols) == 2:
return None
self._token = subprotocols[1]
return subprotocols[0]
def get_compression_options(self) -> dict[str, Any] | None:
if self._compression_level is None:
return None
options = {'compression_level': self._compression_level}
if self._mem_level is not None:
options['mem_level'] = self._mem_level
return options
async def _async_open(self, token: str) -> None:
''' Perform the specific steps needed to open a connection to a Bokeh session
Specifically, this method coordinates:
* Getting a session for a session ID (creating a new one if needed)
* Creating a protocol receiver and handler
* Opening a new ServerConnection and sending it an ACK
Args:
session_id (str) :
A session ID to for a session to connect to
If no session exists with the given ID, a new session is made
Returns:
None
'''
try:
session_id = get_session_id(token)
await self.application_context.create_session_if_needed(session_id, self.request, token)
session = self.application_context.get_session(session_id)
protocol = Protocol()
self.receiver = Receiver(protocol)
log.debug("Receiver created for %r", protocol)
self.handler = ProtocolHandler()
log.debug("ProtocolHandler created for %r", protocol)
self.connection = self.application.new_connection(protocol, self, self.application_context, session)
log.info("ServerConnection created")
except ProtocolError as e:
log.error("Could not create new server session, reason: %s", e)
self.close()
raise e
msg = self.connection.protocol.create('ACK')
await self.send_message(msg)
return None
async def on_message(self, fragment: str | bytes) -> None:
''' Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing text frames
from binary frames. Tornado passes us either a text or binary string
depending on that opcode, we have to look at the type of the fragment
to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process
'''
# We shouldn't throw exceptions from on_message because the caller is
# just Tornado and it doesn't know what to do with them other than
# report them as an unhandled Future
try:
message = await self._receive(fragment)
except Exception as e:
# If you go look at self._receive, it's catching the
# expected error types... here we have something weird.
log.error("Unhandled exception receiving a message: %r: %r", e, fragment, exc_info=True)
self._internal_error("server failed to parse a message")
message = None
try:
if message:
if _message_test_port is not None:
_message_test_port.received.append(message)
work = await self._handle(message)
if work:
await self._schedule(work)
except Exception as e:
log.error("Handler or its work threw an exception: %r: %r", e, message, exc_info=True)
self._internal_error("server failed to handle a message")
return None
def on_pong(self, data: bytes) -> None:
# if we get an invalid integer or utf-8 back, either we
# sent a buggy ping or the client is evil/broken.
try:
self.latest_pong = int(data.decode("utf-8"))
except UnicodeDecodeError:
log.trace("received invalid unicode in pong %r", data, exc_info=True)
except ValueError:
log.trace("received invalid integer in pong %r", data, exc_info=True)
async def send_message(self, message: Message[Any]) -> None:
''' Send a Bokeh Server protocol message to the connected client.
Args:
message (Message) : a message to send
'''
try:
if _message_test_port is not None:
_message_test_port.sent.append(message)
await message.send(self)
except WebSocketClosedError:
# on_close() is / will be called anyway
log.warning("Failed sending message as connection was closed")
return None
async def write_message(self, message: bytes | str | dict[str, Any],
binary: bool = False, locked: bool = True) -> None:
''' Override parent write_message with a version that acquires a
write lock before writing.
'''
if locked:
with await self.write_lock.acquire():
await super().write_message(message, binary)
else:
await super().write_message(message, binary)
def on_close(self) -> None:
''' Clean up when the connection is closed.
'''
log.info('WebSocket connection closed: code=%s, reason=%r', self.close_code, self.close_reason)
if self.connection is not None:
self.connection.session.notify_connection_lost()
self.application.client_lost(self.connection)
async def _receive(self, fragment: str | bytes) -> Message[Any] | None:
# Receive fragments until a complete message is assembled
try:
message = await self.receiver.consume(fragment)
return message
except (MessageError, ProtocolError, ValidationError) as e:
self._protocol_error(str(e))
return None
async def _handle(self, message: Message[Any]) -> Any | None:
# Handle the message, possibly resulting in work to do
try:
work = await self.handler.handle(message, self.connection)
return work
except (MessageError, ProtocolError, ValidationError) as e: # TODO (other exceptions?)
self._internal_error(str(e))
return None
async def _schedule(self, work: Any) -> None:
if isinstance(work, Message):
await self.send_message(cast(Message[Any], work))
else:
self._internal_error(f"expected a Message not {work!r}")
return None
def _internal_error(self, message: str) -> None:
log.error("Bokeh Server internal error: %s, closing connection", message)
self.close(10000, message)
def _protocol_error(self, message: str) -> None:
log.error("Bokeh Server protocol error: %s, closing connection", message)
self.close(10001, message)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
# This is an undocumented API purely for harvesting low level messages
# for testing. When needed it will be set by the testing machinery, and
# should not be used for any other purpose.
@dataclass
| WSHandler |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_for_update.py | {
"start": 6436,
"end": 16854
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mysql.dialect()
table1 = table(
"mytable", column("myid"), column("name"), column("description")
)
table2 = table("table2", column("mytable_id"))
join = table2.join(table1, table2.c.mytable_id == table1.c.myid)
for_update_of_dialect = mysql.dialect()
for_update_of_dialect.server_version_info = (8, 0, 0)
for_update_of_dialect.supports_for_update_of = True
for_share_dialect = mysql.dialect()
for_share_dialect.server_version_info = (8, 0, 1)
for_share_dialect.supports_for_update_of = True
for_share_dialect.use_mysql_for_share = True
def test_for_update_basic(self):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s FOR UPDATE",
)
@testing.variation("dialect_type", ["generic", "mysql801"])
def test_for_update_read(self, dialect_type):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(read=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
f"""{'FOR SHARE'
if dialect_type.mysql801 else 'LOCK IN SHARE MODE'}""",
dialect=self.for_share_dialect if dialect_type.mysql801 else None,
)
def test_for_update_skip_locked(self):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(skip_locked=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
"FOR UPDATE SKIP LOCKED",
)
@testing.variation("dialect_type", ["generic", "mysql801"])
def test_for_update_read_and_skip_locked(self, dialect_type):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(read=True, skip_locked=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
f"""{'FOR SHARE'
if dialect_type.mysql801 else 'LOCK IN SHARE MODE'}"""
" SKIP LOCKED",
dialect=self.for_share_dialect if dialect_type.mysql801 else None,
)
def test_for_update_nowait(self):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
"FOR UPDATE NOWAIT",
)
@testing.variation("dialect_type", ["generic", "mysql801"])
def test_for_update_read_and_nowait(self, dialect_type):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(read=True, nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
f"""{'FOR SHARE'
if dialect_type.mysql801 else 'LOCK IN SHARE MODE'}"""
" NOWAIT",
dialect=self.for_share_dialect if dialect_type.mysql801 else None,
)
def test_for_update_of_nowait(self):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(of=self.table1, nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
"FOR UPDATE OF mytable NOWAIT",
dialect=self.for_update_of_dialect,
)
def test_for_update_of_basic(self):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(of=self.table1),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
"FOR UPDATE OF mytable",
dialect=self.for_update_of_dialect,
)
def test_for_update_of_skip_locked(self):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(of=self.table1, skip_locked=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
"FOR UPDATE OF mytable SKIP LOCKED",
dialect=self.for_update_of_dialect,
)
def test_for_update_of_join_one(self):
self.assert_compile(
self.join.select()
.where(self.table2.c.mytable_id == 7)
.with_for_update(of=[self.join]),
"SELECT table2.mytable_id, "
"mytable.myid, mytable.name, mytable.description "
"FROM table2 "
"INNER JOIN mytable ON table2.mytable_id = mytable.myid "
"WHERE table2.mytable_id = %s "
"FOR UPDATE OF mytable, table2",
dialect=self.for_update_of_dialect,
)
def test_for_update_of_column_list_aliased(self):
ta = self.table1.alias()
self.assert_compile(
ta.select()
.where(ta.c.myid == 7)
.with_for_update(of=[ta.c.myid, ta.c.name]),
"SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM mytable AS mytable_1 "
"WHERE mytable_1.myid = %s FOR UPDATE OF mytable_1",
dialect=self.for_update_of_dialect,
)
def test_for_update_of_join_aliased(self):
ta = self.table1.alias()
alias_join = self.table2.join(
ta, self.table2.c.mytable_id == ta.c.myid
)
self.assert_compile(
alias_join.select()
.where(self.table2.c.mytable_id == 7)
.with_for_update(of=[alias_join]),
"SELECT table2.mytable_id, "
"mytable_1.myid, mytable_1.name, mytable_1.description "
"FROM table2 "
"INNER JOIN mytable AS mytable_1 "
"ON table2.mytable_id = mytable_1.myid "
"WHERE table2.mytable_id = %s "
"FOR UPDATE OF mytable_1, table2",
dialect=self.for_update_of_dialect,
)
@testing.variation("dialect_type", ["mysql800", "mysql801"])
def test_for_update_of_read_nowait(self, dialect_type):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(read=True, of=self.table1, nowait=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
f"""{'FOR SHARE'
if dialect_type.mysql801 else 'LOCK IN SHARE MODE'} """
"OF mytable NOWAIT",
dialect=(
self.for_update_of_dialect
if dialect_type.mysql800
else self.for_share_dialect
),
)
@testing.variation("dialect_type", ["mysql800", "mysql801"])
def test_for_update_of_read_skip_locked(self, dialect_type):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(read=True, of=self.table1, skip_locked=True),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
f"""{'FOR SHARE'
if dialect_type.mysql801 else 'LOCK IN SHARE MODE'} """
"OF mytable SKIP LOCKED",
dialect=(
self.for_update_of_dialect
if dialect_type.mysql800
else self.for_share_dialect
),
)
@testing.variation("dialect_type", ["mysql800", "mysql801"])
def test_for_update_of_read_nowait_column_list(self, dialect_type):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(
read=True,
of=[self.table1.c.myid, self.table1.c.name],
nowait=True,
),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
f"""{'FOR SHARE'
if dialect_type.mysql801 else 'LOCK IN SHARE MODE'} """
"OF mytable NOWAIT",
dialect=(
self.for_update_of_dialect
if dialect_type.mysql800
else self.for_share_dialect
),
)
@testing.variation("dialect_type", ["mysql800", "mysql801"])
def test_for_update_of_read(self, dialect_type):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(read=True, of=self.table1),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
f"""{'FOR SHARE'
if dialect_type.mysql801 else 'LOCK IN SHARE MODE'} """
"OF mytable",
dialect=(
self.for_update_of_dialect
if dialect_type.mysql800
else self.for_share_dialect
),
)
def test_for_update_textual_of(self):
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(of=text("mytable")),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
"FOR UPDATE OF mytable",
dialect=self.for_update_of_dialect,
)
self.assert_compile(
self.table1.select()
.where(self.table1.c.myid == 7)
.with_for_update(of=literal_column("mytable")),
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable WHERE mytable.myid = %s "
"FOR UPDATE OF mytable",
dialect=self.for_update_of_dialect,
)
| MySQLForUpdateCompileTest |
python | pola-rs__polars | py-polars/src/polars/io/cloud/_utils.py | {
"start": 228,
"end": 2071
} | class ____(Generic[T]):
"""
Wrapper that does not pickle the wrapped value.
This wrapper will unpickle to contain a None. Used for cached values.
"""
def __init__(self, opt_value: T | None = None) -> None:
self._opt_value = opt_value
def get(self) -> T | None:
return self._opt_value
def set(self, value: T | None) -> None:
self._opt_value = value
def __getstate__(self) -> tuple[()]:
# Needs to return not-None for `__setstate__()` to be called
return ()
def __setstate__(self, _state: tuple[()]) -> None:
NoPickleOption.__init__(self)
def _first_scan_path(
source: Any,
) -> str | Path | None:
if isinstance(source, (str, Path)):
return source
elif is_path_or_str_sequence(source) and source:
return source[0]
elif isinstance(source, _SinkDirectory):
return source._base_path
return None
def _get_path_scheme(path: str | Path) -> str | None:
path_str = str(path)
i = path_str.find("://")
return path_str[:i] if i >= 0 else None
def _is_aws_cloud(*, scheme: str, first_scan_path: str) -> bool:
if any(scheme == x for x in ["s3", "s3a"]):
return True
if scheme == "http" or scheme == "https":
bucket_end = first_scan_path.find(".s3.")
region_end = first_scan_path.find(".amazonaws.com/", bucket_end + 4)
if (
first_scan_path.find("/", len(scheme) + 3, region_end) > 0
or "?" in first_scan_path
):
return False
return 0 < bucket_end < region_end
return False
def _is_azure_cloud(scheme: str) -> bool:
return any(scheme == x for x in ["az", "azure", "adl", "abfs", "abfss"])
def _is_gcp_cloud(scheme: str) -> bool:
return any(scheme == x for x in ["gs", "gcp", "gcs"])
| NoPickleOption |
python | langchain-ai__langchain | libs/langchain/langchain_classic/memory/chat_memory.py | {
"start": 607,
"end": 3537
} | class ____(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory,
)
output_key: str | None = None
input_key: str | None = None
return_messages: bool = False
def _get_input_output(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = next(iter(outputs.keys()))
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'.",
stacklevel=3,
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
| BaseChatMemory |
python | bokeh__bokeh | src/bokeh/models/selectors.py | {
"start": 2934,
"end": 3823
} | class ____(Selector):
""" Represents an XPath selector query. """
# explicit __init__ to support Init signatures
def __init__(self, query: Init[str] = Intrinsic, **kwargs: Any) -> None:
super().__init__(query=query, **kwargs)
query = Required(String, help="""
XPath selector query (see https://developer.mozilla.org/en-US/docs/Web/XPath).
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ByXPath |
python | sqlalchemy__sqlalchemy | test/perf/compiled_extensions/result.py | {
"start": 7934,
"end": 8746
} | class ____:
def __init__(self, rows: list[tuple], compiled):
self._rows = list(rows)
if compiled._result_columns is None:
self.description = None
else:
self.description = [
(rc.keyname, 42, None, None, None, True)
for rc in compiled._result_columns
]
def close(self):
pass
def fetchone(self):
if self._rows:
return self._rows.pop(0)
else:
return None
def fetchmany(self, size=None):
if size is None:
return self.fetchall()
else:
ret = self._rows[:size]
self._rows[:size] = []
return ret
def fetchall(self):
ret = self._rows
self._rows = []
return ret
| _MockCursor |
python | doocs__leetcode | lcci/08.11.Coin/Solution2.py | {
"start": 0,
"end": 269
} | class ____:
def waysToChange(self, n: int) -> int:
mod = 10**9 + 7
coins = [25, 10, 5, 1]
f = [1] + [0] * n
for c in coins:
for j in range(c, n + 1):
f[j] = (f[j] + f[j - c]) % mod
return f[n]
| Solution |
python | django__django | tests/proxy_models/models.py | {
"start": 869,
"end": 1067
} | class ____(models.Model):
"""
A simple abstract base class, to be used for error checking.
"""
data = models.CharField(max_length=10)
class Meta:
abstract = True
| Abstract |
python | PrefectHQ__prefect | src/prefect/utilities/callables.py | {
"start": 7882,
"end": 25933
} | class ____(pydantic.BaseModel):
"""Simple data model corresponding to an OpenAPI `Schema`."""
title: Literal["Parameters"] = "Parameters"
type: Literal["object"] = "object"
properties: dict[str, Any] = pydantic.Field(default_factory=dict)
required: list[str] = pydantic.Field(default_factory=list)
definitions: dict[str, Any] = pydantic.Field(default_factory=dict)
def model_dump_for_openapi(self) -> dict[str, Any]:
result = self.model_dump(mode="python", exclude_none=True)
if "required" in result and not result["required"]:
del result["required"]
return result
def parameter_docstrings(docstring: Optional[str]) -> dict[str, str]:
"""
Given a docstring in Google docstring format, parse the parameter section
and return a dictionary that maps parameter names to docstring.
Args:
docstring: The function's docstring.
Returns:
Mapping from parameter names to docstrings.
"""
param_docstrings: dict[str, str] = {}
if not docstring:
return param_docstrings
with disable_logger("griffe"):
parsed = parse(Docstring(docstring), Parser.google)
for section in parsed:
if section.kind != DocstringSectionKind.parameters:
continue
param_docstrings = {
parameter.name: parameter.description for parameter in section.value
}
return param_docstrings
def process_v1_params(
param: inspect.Parameter,
*,
position: int,
docstrings: dict[str, str],
aliases: dict[str, str],
) -> tuple[str, Any, Any]:
import pydantic.v1 as pydantic_v1
# Pydantic model creation will fail if names collide with the BaseModel type
if hasattr(pydantic_v1.BaseModel, param.name):
name = param.name + "__"
aliases[name] = param.name
else:
name = param.name
type_ = Any if param.annotation is inspect.Parameter.empty else param.annotation
with warnings.catch_warnings():
# Note: pydantic.v1 doesn't have the warnings module, so we can't suppress them
warnings.filterwarnings("ignore", category=DeprecationWarning)
field: Any = pydantic_v1.Field(
default=... if param.default is param.empty else param.default,
title=param.name,
description=docstrings.get(param.name, None),
alias=aliases.get(name),
position=position,
)
return name, type_, field
def create_v1_schema(
name_: str, model_cfg: type[Any], model_fields: Optional[dict[str, Any]] = None
) -> dict[str, Any]:
import pydantic.v1 as pydantic_v1
with warnings.catch_warnings():
# Note: pydantic.v1 doesn't have the warnings module, so we can't suppress them
warnings.filterwarnings("ignore", category=DeprecationWarning)
model_fields = model_fields or {}
model: type[pydantic_v1.BaseModel] = pydantic_v1.create_model(
name_,
__config__=model_cfg,
**model_fields,
)
return model.schema(by_alias=True)
def parameter_schema(fn: Callable[..., Any]) -> ParameterSchema:
"""Given a function, generates an OpenAPI-compatible description
of the function's arguments, including:
- name
- typing information
- whether it is required
- a default value
- additional constraints (like possible enum values)
Args:
fn (Callable): The function whose arguments will be serialized
Returns:
ParameterSchema: the argument schema
"""
try:
signature = inspect.signature(fn, eval_str=True) # novm
except (NameError, TypeError):
# `eval_str` is not available in Python < 3.10
signature = inspect.signature(fn)
docstrings = parameter_docstrings(inspect.getdoc(fn))
return generate_parameter_schema(signature, docstrings)
def parameter_schema_from_entrypoint(entrypoint: str) -> ParameterSchema:
"""
Generate a parameter schema from an entrypoint string.
Will load the source code of the function and extract the signature and docstring
to generate the schema.
Useful for generating a schema for a function when instantiating the function may
not be possible due to missing imports or other issues.
Args:
entrypoint: A string representing the entrypoint to a function. The string
should be in the format of `module.path.to.function:do_stuff`.
Returns:
ParameterSchema: The parameter schema for the function.
"""
filepath = None
if ":" in entrypoint:
# split by the last colon once to handle Windows paths with drive letters i.e C:\path\to\file.py:do_stuff
path, func_name = entrypoint.rsplit(":", maxsplit=1)
source_code = Path(path).read_text()
filepath = path
else:
path, func_name = entrypoint.rsplit(".", maxsplit=1)
spec = importlib.util.find_spec(path)
if not spec or not spec.origin:
raise ValueError(f"Could not find module {path!r}")
source_code = Path(spec.origin).read_text()
signature = _generate_signature_from_source(source_code, func_name, filepath)
docstring = _get_docstring_from_source(source_code, func_name)
return generate_parameter_schema(signature, parameter_docstrings(docstring))
def generate_parameter_schema(
signature: inspect.Signature, docstrings: dict[str, str]
) -> ParameterSchema:
"""
Generate a parameter schema from a function signature and docstrings.
To get a signature from a function, use `inspect.signature(fn)` or
`_generate_signature_from_source(source_code, func_name)`.
Args:
signature: The function signature.
docstrings: A dictionary mapping parameter names to docstrings.
Returns:
ParameterSchema: The parameter schema.
"""
model_fields: dict[str, Any] = {}
aliases: dict[str, str] = {}
if not has_v1_type_as_param(signature):
config = pydantic.ConfigDict(arbitrary_types_allowed=True)
create_schema = partial(create_v2_schema, model_cfg=config)
process_params = process_v2_params
else:
class ModelConfig:
arbitrary_types_allowed = True
create_schema = partial(create_v1_schema, model_cfg=ModelConfig)
process_params = process_v1_params
for position, param in enumerate(signature.parameters.values()):
name, type_, field = process_params(
param, position=position, docstrings=docstrings, aliases=aliases
)
if name in ("cls", "self"):
continue # Exclude 'cls'/'self' as they're implicitly passed and not real flow parameters
# Generate a Pydantic model at each step so we can check if this parameter
# type supports schema generation
try:
create_schema("CheckParameter", model_fields={name: (type_, field)})
except (ValueError, TypeError):
# This field's type is not valid for schema creation, update it to `Any`
type_ = Any
model_fields[name] = (type_, field)
# Generate the final model and schema
schema = create_schema("Parameters", model_fields=model_fields)
return ParameterSchema(**schema)
def raise_for_reserved_arguments(
fn: Callable[..., Any], reserved_arguments: Iterable[str]
) -> None:
"""Raise a ReservedArgumentError if `fn` has any parameters that conflict
with the names contained in `reserved_arguments`."""
function_parameters = inspect.signature(fn).parameters
for argument in reserved_arguments:
if argument in function_parameters:
raise ReservedArgumentError(
f"{argument!r} is a reserved argument name and cannot be used."
)
def _generate_signature_from_source(
source_code: str, func_name: str, filepath: Optional[str] = None
) -> inspect.Signature:
"""
Extract the signature of a function from its source code.
Will ignore missing imports and exceptions while loading local class definitions.
Args:
source_code: The source code where the function named `func_name` is declared.
func_name: The name of the function.
Returns:
The signature of the function.
"""
# Load the namespace from the source code. Missing imports and exceptions while
# loading local class definitions are ignored.
namespace = safe_load_namespace(source_code, filepath=filepath)
# Parse the source code into an AST
parsed_code = ast.parse(source_code)
func_def = next(
(
node
for node in ast.walk(parsed_code)
if isinstance(
node,
(
ast.FunctionDef,
ast.AsyncFunctionDef,
),
)
and node.name == func_name
),
None,
)
if func_def is None:
raise ValueError(f"Function {func_name} not found in source code")
parameters: list[inspect.Parameter] = []
# Handle annotations for positional only args e.g. def func(a, /, b, c)
for arg in func_def.args.posonlyargs:
name = arg.arg
annotation = arg.annotation
if annotation is not None:
try:
ann_code = compile(ast.Expression(annotation), "<string>", "eval")
annotation = eval(ann_code, namespace)
except Exception as e:
logger.debug("Failed to evaluate annotation for %s: %s", name, e)
annotation = inspect.Parameter.empty
else:
annotation = inspect.Parameter.empty
param = inspect.Parameter(
name, inspect.Parameter.POSITIONAL_ONLY, annotation=annotation
)
parameters.append(param)
# Determine the annotations for args e.g. def func(a: int, b: str, c: float)
for arg in func_def.args.args:
name = arg.arg
annotation = arg.annotation
if annotation is not None:
try:
# Compile and evaluate the annotation
ann_code = compile(ast.Expression(annotation), "<string>", "eval")
annotation = eval(ann_code, namespace)
except Exception as e:
# Don't raise an error if the annotation evaluation fails. Set the
# annotation to `inspect.Parameter.empty` instead which is equivalent to
# not having an annotation.
logger.debug("Failed to evaluate annotation for %s: %s", name, e)
annotation = inspect.Parameter.empty
else:
annotation = inspect.Parameter.empty
param = inspect.Parameter(
name, inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=annotation
)
parameters.append(param)
# Handle default values for args e.g. def func(a=1, b="hello", c=3.14)
defaults = [None] * (
len(func_def.args.args) - len(func_def.args.defaults)
) + func_def.args.defaults
for param, default in zip(parameters, defaults):
if default is not None:
try:
def_code = compile(ast.Expression(default), "<string>", "eval")
default = eval(def_code, namespace)
except Exception as e:
logger.debug(
"Failed to evaluate default value for %s: %s", param.name, e
)
default = None # Set to None if evaluation fails
parameters[parameters.index(param)] = param.replace(default=default)
# Handle annotations for keyword only args e.g. def func(*, a: int, b: str)
for kwarg in func_def.args.kwonlyargs:
name = kwarg.arg
annotation = kwarg.annotation
if annotation is not None:
try:
ann_code = compile(ast.Expression(annotation), "<string>", "eval")
annotation = eval(ann_code, namespace)
except Exception as e:
logger.debug("Failed to evaluate annotation for %s: %s", name, e)
annotation = inspect.Parameter.empty
else:
annotation = inspect.Parameter.empty
param = inspect.Parameter(
name, inspect.Parameter.KEYWORD_ONLY, annotation=annotation
)
parameters.append(param)
# Handle default values for keyword only args e.g. def func(*, a=1, b="hello")
defaults = [None] * (
len(func_def.args.kwonlyargs) - len(func_def.args.kw_defaults)
) + func_def.args.kw_defaults
for param, default in zip(parameters[-len(func_def.args.kwonlyargs) :], defaults):
if default is not None:
try:
def_code = compile(ast.Expression(default), "<string>", "eval")
default = eval(def_code, namespace)
except Exception as e:
logger.debug(
"Failed to evaluate default value for %s: %s", param.name, e
)
default = None
parameters[parameters.index(param)] = param.replace(default=default)
# Handle annotations for varargs and kwargs e.g. def func(*args: int, **kwargs: str)
if func_def.args.vararg:
parameters.append(
inspect.Parameter(
func_def.args.vararg.arg, inspect.Parameter.VAR_POSITIONAL
)
)
if func_def.args.kwarg:
parameters.append(
inspect.Parameter(func_def.args.kwarg.arg, inspect.Parameter.VAR_KEYWORD)
)
# Handle return annotation e.g. def func() -> int
return_annotation = func_def.returns
if return_annotation is not None:
try:
ret_ann_code = compile(
ast.Expression(return_annotation), "<string>", "eval"
)
return_annotation = eval(ret_ann_code, namespace)
except Exception as e:
logger.debug("Failed to evaluate return annotation: %s", e)
return_annotation = inspect.Signature.empty
return inspect.Signature(parameters, return_annotation=return_annotation)
def _get_docstring_from_source(source_code: str, func_name: str) -> Optional[str]:
"""
Extract the docstring of a function from its source code.
Args:
source_code (str): The source code of the function.
func_name (str): The name of the function.
Returns:
The docstring of the function. If the function has no docstring, returns None.
"""
parsed_code = ast.parse(source_code)
func_def = next(
(
node
for node in ast.walk(parsed_code)
if isinstance(
node,
(
ast.FunctionDef,
ast.AsyncFunctionDef,
),
)
and node.name == func_name
),
None,
)
if func_def is None:
raise ValueError(f"Function {func_name} not found in source code")
if (
func_def.body
and isinstance(func_def.body[0], ast.Expr)
and isinstance(func_def.body[0].value, ast.Constant)
):
return str(func_def.body[0].value.value)
return None
def expand_mapping_parameters(
func: Callable[..., Any], parameters: dict[str, Any]
) -> list[dict[str, Any]]:
"""
Generates a list of call parameters to be used for individual calls in a mapping
operation.
Args:
func: The function to be called
parameters: A dictionary of parameters with iterables to be mapped over
Returns:
list: A list of dictionaries to be used as parameters for each
call in the mapping operation
"""
# Ensure that any parameters in kwargs are expanded before this check
parameters = explode_variadic_parameter(func, parameters)
iterable_parameters: dict[str, list[Any]] = {}
static_parameters: dict[str, Any] = {}
annotated_parameters: dict[str, Union[allow_failure[Any], quote[Any]]] = {}
for key, val in parameters.items():
if isinstance(val, (allow_failure, quote)):
# Unwrap annotated parameters to determine if they are iterable
annotated_parameters[key] = val
val: Any = val.unwrap()
if isinstance(val, unmapped):
static_parameters[key] = cast(unmapped[Any], val).value
elif isiterable(val):
iterable_parameters[key] = list(val)
else:
static_parameters[key] = val
if not iterable_parameters:
raise MappingMissingIterable(
"No iterable parameters were received. Parameters for map must "
f"include at least one iterable. Parameters: {parameters}"
)
iterable_parameter_lengths = {
key: len(val) for key, val in iterable_parameters.items()
}
lengths = set(iterable_parameter_lengths.values())
if len(lengths) > 1:
raise MappingLengthMismatch(
"Received iterable parameters with different lengths. Parameters for map"
f" must all be the same length. Got lengths: {iterable_parameter_lengths}"
)
map_length = list(lengths)[0]
call_parameters_list: list[dict[str, Any]] = []
for i in range(map_length):
call_parameters = {key: value[i] for key, value in iterable_parameters.items()}
call_parameters.update({key: value for key, value in static_parameters.items()})
# Add default values for parameters; these are skipped earlier since they should
# not be mapped over
for key, value in get_parameter_defaults(func).items():
call_parameters.setdefault(key, value)
# Re-apply annotations to each key again
for key, annotation in annotated_parameters.items():
call_parameters[key] = annotation.rewrap(call_parameters[key])
# Collapse any previously exploded kwargs
call_parameters_list.append(collapse_variadic_parameters(func, call_parameters))
return call_parameters_list
| ParameterSchema |
python | huggingface__transformers | tests/utils/test_hf_argparser.py | {
"start": 2711,
"end": 2946
} | class ____:
required_list: list[int] = field()
required_str: str = field()
required_enum: BasicEnum = field()
def __post_init__(self):
self.required_enum = BasicEnum(self.required_enum)
@dataclass
| RequiredExample |
python | getsentry__sentry | tests/sentry/utils/email/test_list_resolver.py | {
"start": 221,
"end": 1089
} | class ____(TestCase):
resolver = ListResolver("namespace", default_list_type_handlers)
def test_rejects_invalid_namespace(self) -> None:
with pytest.raises(AssertionError):
ListResolver("\x00", {})
def test_rejects_invalid_types(self) -> None:
with pytest.raises(ListResolver.UnregisteredTypeError):
self.resolver(self.user)
def test_generates_list_ids(self) -> None:
expected = f"<{self.event.project.slug}.{self.event.organization.slug}.namespace>"
assert self.resolver(self.event.group) == expected
assert self.resolver(self.event.project) == expected
def test_rejects_invalid_objects(self) -> None:
resolver = ListResolver("namespace", {Project: lambda value: ("\x00",)})
with pytest.raises(AssertionError):
resolver(self.project)
| ListResolverTestCase |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 28489,
"end": 29712
} | class ____(Benchmark):
r"""
Shubert 4 objective function.
This class defines the Shubert 4 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Shubert04}}(x) = \left(\sum_{i=1}^n \sum_{j=1}^5 -j
\cos ((j+1)x_i + j)\right)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -29.016015` for
:math:`x = [-0.80032121, -7.08350592]` (and many others).
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#135 has wrong global minimum value, and is missing a minus sign
before the whole thing.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-0.80032121, -7.08350592]]
self.fglob = -29.016015
def fun(self, x, *args):
self.nfev += 1
j = atleast_2d(arange(1, 6)).T
y = -j * cos((j + 1) * x + j)
return sum(sum(y))
| Shubert04 |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/asb.py | {
"start": 22092,
"end": 24589
} | class ____(BaseOperator):
"""
Update an Azure ServiceBus Topic Subscription under a ServiceBus Namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusUpdateSubscriptionOperator`
:param topic_name: The topic that will own the to-be-created subscription.
:param subscription_name: Name of the subscription that need to be created.
:param max_delivery_count: The maximum delivery count. A message is automatically dead lettered
after this number of deliveries. Default value is 10.
:param dead_lettering_on_message_expiration: A value that indicates whether this subscription
has dead letter support when a message expires.
:param enable_batched_operations: Value that indicates whether server-side batched
operations are enabled.
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection<howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("topic_name", "subscription_name")
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
subscription_name: str,
max_delivery_count: int | None = None,
dead_lettering_on_message_expiration: bool | None = None,
enable_batched_operations: bool | None = None,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.subscription_name = subscription_name
self.max_delivery_count = max_delivery_count
self.dl_on_message_expiration = dead_lettering_on_message_expiration
self.enable_batched_operations = enable_batched_operations
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Update Subscription properties, by connecting to Service Bus Admin client."""
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
hook.update_subscription(
topic_name=self.topic_name,
subscription_name=self.subscription_name,
max_delivery_count=self.max_delivery_count,
dead_lettering_on_message_expiration=self.dl_on_message_expiration,
enable_batched_operations=self.enable_batched_operations,
)
| AzureServiceBusUpdateSubscriptionOperator |
python | skorch-dev__skorch | skorch/tests/test_utils.py | {
"start": 13747,
"end": 14318
} | class ____:
@pytest.fixture
def params_for(self):
from skorch.utils import params_for
return params_for
@pytest.mark.parametrize('prefix, kwargs, expected', [
('p1', {'p1__a': 1, 'p1__b': 2}, {'a': 1, 'b': 2}),
('p2', {'p1__a': 1, 'p1__b': 2}, {}),
('p1', {'p1__a': 1, 'p1__b': 2, 'p2__a': 3}, {'a': 1, 'b': 2}),
('p2', {'p1__a': 1, 'p1__b': 2, 'p2__a': 3}, {'a': 3}),
])
def test_params_for(self, params_for, prefix, kwargs, expected):
assert params_for(prefix, kwargs) == expected
| TestParamsFor |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-longrag/llama_index/packs/longrag/base.py | {
"start": 6405,
"end": 9943
} | class ____(Workflow):
"""Long RAG Workflow."""
@step()
async def ingest(self, ev: StartEvent) -> t.Optional[LoadNodeEvent]:
"""
Ingestion step.
Args:
ctx (Context): Context
ev (StartEvent): start event
Returns:
StopEvent | None: stop event with result
"""
data_dir: str = ev.get("data_dir")
llm: LLM = ev.get("llm")
chunk_size: t.Optional[int] = ev.get("chunk_size")
similarity_top_k: int = ev.get("similarity_top_k")
small_chunk_size: int = ev.get("small_chunk_size")
index: t.Optional[VectorStoreIndex] = ev.get("index")
index_kwargs: t.Optional[t.Dict[str, t.Any]] = ev.get("index_kwargs")
if any(i is None for i in [data_dir, llm, similarity_top_k, small_chunk_size]):
return None
if not index:
docs = SimpleDirectoryReader(data_dir).load_data()
if chunk_size is not None:
nodes = split_doc(
chunk_size, docs
) # split documents into chunks of chunk_size
grouped_nodes = get_grouped_docs(
nodes
) # get list of nodes after grouping (groups are combined into one node), these are long retrieval units
else:
grouped_nodes = docs
# split large retrieval units into smaller nodes
small_nodes = split_doc(small_chunk_size, grouped_nodes)
index_kwargs = index_kwargs or {}
index = VectorStoreIndex(small_nodes, **index_kwargs)
else:
# get smaller nodes from index and form large retrieval units from these nodes
small_nodes = index.docstore.docs.values()
grouped_nodes = get_grouped_docs(small_nodes, None)
return LoadNodeEvent(
small_nodes=small_nodes,
grouped_nodes=grouped_nodes,
index=index,
similarity_top_k=similarity_top_k,
llm=llm,
)
@step(pass_context=True)
async def make_query_engine(self, ctx: Context, ev: LoadNodeEvent) -> StopEvent:
"""
Query engine construction step.
Args:
ctx (Context): context
ev (LoadNodeEvent): event
Returns:
StopEvent: stop event
"""
# make retriever and query engine
retriever = LongRAGRetriever(
grouped_nodes=ev.grouped_nodes,
small_toks=ev.small_nodes,
similarity_top_k=ev.similarity_top_k,
vector_store=ev.index.vector_store,
)
query_eng = RetrieverQueryEngine.from_args(retriever, ev.llm)
ctx.data["query_eng"] = query_eng
return StopEvent(
result={
"retriever": retriever,
"query_engine": query_eng,
"index": ev.index,
}
)
@step(pass_context=True)
async def query(self, ctx: Context, ev: StartEvent) -> t.Optional[StopEvent]:
"""
Query step.
Args:
ctx (Context): context
ev (StartEvent): start event
Returns:
StopEvent | None: stop event with result
"""
query_str: t.Optional[str] = ev.get("query_str")
if query_str is None:
return None
query_eng: RetrieverQueryEngine = ctx.data.get("query_eng")
result = query_eng.query(query_str)
return StopEvent(result=result)
| LongRAGWorkflow |
python | getsentry__sentry | tests/sentry/db/postgres/schema/safe_migrations/integration/test_migrations.py | {
"start": 5388,
"end": 5745
} | class ____(BaseSafeMigrationTest):
app = "bad_flow_delete_model_app"
migrate_from = "0001_initial"
migrate_to = "0002_delete_model"
def test(self) -> None:
with pytest.raises(
UnsafeOperationException,
match="Deleting the TestTable model is unsafe.",
):
self.run_migration()
| DeleteModelTest |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 129463,
"end": 131841
} | class ____(TypedDict, total=False):
type: Required[Literal['arguments-v3']]
arguments_schema: Required[list[ArgumentsV3Parameter]]
validate_by_name: bool
validate_by_alias: bool
extra_behavior: Literal['forbid', 'ignore'] # 'allow' doesn't make sense here.
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def arguments_v3_schema(
arguments: list[ArgumentsV3Parameter],
*,
validate_by_name: bool | None = None,
validate_by_alias: bool | None = None,
extra_behavior: Literal['forbid', 'ignore'] | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> ArgumentsV3Schema:
"""
Returns a schema that matches an arguments schema, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
param_a = core_schema.arguments_v3_parameter(
name='a', schema=core_schema.str_schema(), mode='positional_only'
)
param_b = core_schema.arguments_v3_parameter(
name='kwargs', schema=core_schema.bool_schema(), mode='var_kwargs_uniform'
)
schema = core_schema.arguments_v3_schema([param_a, param_b])
v = SchemaValidator(schema)
assert v.validate_python({'a': 'hi', 'kwargs': {'b': True}}) == (('hi',), {'b': True})
```
This schema is currently not used by other Pydantic components. In V3, it will most likely
become the default arguments schema for the `'call'` schema.
Args:
arguments: The arguments to use for the arguments schema.
validate_by_name: Whether to populate by the parameter names, defaults to `False`.
validate_by_alias: Whether to populate by the parameter aliases, defaults to `True`.
extra_behavior: The extra behavior to use.
ref: optional unique identifier of the schema, used to reference the schema in other places.
metadata: Any other information you want to include with the schema, not used by pydantic-core.
serialization: Custom serialization schema.
"""
return _dict_not_none(
type='arguments-v3',
arguments_schema=arguments,
validate_by_name=validate_by_name,
validate_by_alias=validate_by_alias,
extra_behavior=extra_behavior,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| ArgumentsV3Schema |
python | pytorch__pytorch | torch/distributed/debug/_frontend.py | {
"start": 6539,
"end": 6694
} | class ____(ThreadingHTTPServer):
address_family: socket.AddressFamily = socket.AF_INET6 # pyre-ignore
request_queue_size: int = 1024
| _IPv6HTTPServer |
python | huggingface__transformers | tests/models/m2m_100/test_tokenization_m2m_100.py | {
"start": 7101,
"end": 12496
} | class ____(unittest.TestCase):
checkpoint_name = "facebook/m2m100_418M"
src_text = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
tgt_text = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
expected_src_tokens = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] # fmt: skip
@classmethod
def setUpClass(cls):
cls.tokenizer: M2M100Tokenizer = M2M100Tokenizer.from_pretrained(
cls.checkpoint_name, src_lang="en", tgt_lang="fr"
)
cls.pad_token_id = 1
return cls
def check_language_codes(self):
self.assertEqual(self.tokenizer.get_lang_id("ar"), 128006)
self.assertEqual(self.tokenizer.get_lang_id("en"), 128022)
self.assertEqual(self.tokenizer.get_lang_id("ro"), 128076)
self.assertEqual(self.tokenizer.get_lang_id("mr"), 128063)
def test_get_vocab(self):
vocab = self.tokenizer.get_vocab()
self.assertEqual(len(vocab), len(self.tokenizer))
self.assertEqual(vocab["<unk>"], 3)
self.assertIn(self.tokenizer.get_lang_token("en"), vocab)
def test_tokenizer_batch_encode_plus(self):
self.tokenizer.src_lang = "en"
ids = self.tokenizer(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, ids)
def test_tokenizer_decode_ignores_language_codes(self):
self.assertIn(FR_CODE, self.tokenizer.all_special_ids)
generated_ids = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: skip
result = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
expected_french = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True)
self.assertEqual(result, expected_french)
self.assertNotIn(self.tokenizer.eos_token, result)
def test_special_tokens_unaffacted_by_save_load(self):
with tempfile.TemporaryDirectory() as tmpdirname:
original_special_tokens = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(tmpdirname)
new_tok = M2M100Tokenizer.from_pretrained(tmpdirname)
self.assertDictEqual(new_tok.lang_token_to_id, original_special_tokens)
@require_torch
def test_batch_fairseq_parity(self):
self.tokenizer.src_lang = "en"
self.tokenizer.tgt_lang = "fr"
batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt")
batch["decoder_input_ids"] = shift_tokens_right(
batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id
)
for k in batch:
batch[k] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def test_src_lang_setter(self):
self.tokenizer.src_lang = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer.src_lang = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
@require_torch
def test_tokenizer_target_mode(self):
self.tokenizer.tgt_lang = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
self.tokenizer.tgt_lang = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def test_tokenizer_translation(self):
inputs = self.tokenizer._build_translation_inputs("A test", return_tensors="pt", src_lang="en", tgt_lang="ar")
self.assertEqual(
nested_simplify(inputs),
{
# en_XX, A, test, EOS
"input_ids": [[128022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128006,
},
)
| M2M100TokenizerIntegrationTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image17.py | {
"start": 315,
"end": 919
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image17.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_row(1, 96)
worksheet.set_column("C:C", 18)
worksheet.insert_image("C2", self.image_dir + "issue32.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytest-dev__pytest-xdist | testing/test_workermanage.py | {
"start": 13788,
"end": 15132
} | class ____(UserWarning):
pass
@pytest.mark.parametrize(
"w_cls",
[
UserWarning,
MyWarning,
"Imported",
pytest.param(
"Nested",
marks=pytest.mark.xfail(reason="Nested warning classes are not supported."),
),
],
)
def test_unserialize_warning_msg(w_cls: type[Warning] | str) -> None:
"""Test that warning serialization process works well."""
# Create a test warning message
with pytest.warns(UserWarning) as w:
if not isinstance(w_cls, str):
warnings.warn("hello", w_cls)
elif w_cls == "Imported":
generate_warning()
elif w_cls == "Nested":
# dynamic creation
class MyWarning2(UserWarning):
pass
warnings.warn("hello", MyWarning2)
# Unpack
assert len(w) == 1
w_msg = w[0]
# Serialize and deserialize
data = serialize_warning_message(w_msg)
w_msg2 = unserialize_warning_message(data)
# Compare the two objects
all_keys = set(vars(w_msg).keys()).union(set(vars(w_msg2).keys()))
for k in all_keys:
v1 = getattr(w_msg, k)
v2 = getattr(w_msg2, k)
if k == "message":
assert type(v1) is type(v2)
assert v1.args == v2.args
else:
assert v1 == v2
| MyWarning |
python | python-excel__xlwt | xlwt/ExcelFormulaParser.py | {
"start": 404,
"end": 1228
} | class ____(Exception):
"""
An exception indicating that a Formula could not be successfully parsed.
"""
### header action <<<
### preamble action>>>
### preamble action <<<
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
TRUE_CONST = 4
FALSE_CONST = 5
STR_CONST = 6
NUM_CONST = 7
INT_CONST = 8
FUNC_IF = 9
FUNC_CHOOSE = 10
NAME = 11
QUOTENAME = 12
EQ = 13
NE = 14
GT = 15
LT = 16
GE = 17
LE = 18
ADD = 19
SUB = 20
MUL = 21
DIV = 22
POWER = 23
PERCENT = 24
LP = 25
RP = 26
LB = 27
RB = 28
COLON = 29
COMMA = 30
SEMICOLON = 31
REF2D = 32
REF2D_R1C1 = 33
BANG = 34
CONCAT = 35
| FormulaParseException |
python | ansible__ansible | lib/ansible/module_utils/facts/other/ohai.py | {
"start": 860,
"end": 2265
} | class ____(BaseFactCollector):
"""This is a subclass of Facts for including information gathered from Ohai."""
name = 'ohai'
_fact_ids = set() # type: t.Set[str]
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='ohai',
prefix='ohai_')
super(OhaiFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_ohai(self, module):
return module.get_bin_path(
'ohai'
)
def run_ohai(self, module, ohai_path):
rc, out, err = module.run_command(ohai_path)
return rc, out, err
def get_ohai_output(self, module):
ohai_path = self.find_ohai(module)
if not ohai_path:
return None
rc, out, err = self.run_ohai(module, ohai_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
ohai_facts = {}
if not module:
return ohai_facts
ohai_output = self.get_ohai_output(module)
if ohai_output is None:
return ohai_facts
try:
ohai_facts = json.loads(ohai_output)
except Exception:
module.warn("Failed to gather ohai facts")
return ohai_facts
| OhaiFactCollector |
python | pikepdf__pikepdf | src/pikepdf/canvas.py | {
"start": 15411,
"end": 27621
} | class ____:
"""Content stream builder."""
def __init__(self):
"""Initialize."""
self._stream = bytearray()
def _append(self, inst: ContentStreamInstruction):
self._stream += unparse_content_stream([inst]) + b"\n"
def extend(self, other: ContentStreamBuilder | bytes):
"""Append another content stream."""
if isinstance(other, ContentStreamBuilder):
self._stream += other._stream
else:
self._stream += other + b"\n"
def push(self):
"""Save the graphics state."""
inst = ContentStreamInstruction([], Operator("q"))
self._append(inst)
return self
def pop(self):
"""Restore the graphics state."""
inst = ContentStreamInstruction([], Operator("Q"))
self._append(inst)
return self
def cm(self, matrix: Matrix):
"""Concatenate matrix."""
inst = ContentStreamInstruction(matrix.shorthand, Operator("cm"))
self._append(inst)
return self
def begin_text(self):
"""Begin text object.
All text operations must be contained within a text object, and are invalid
otherwise. The text matrix and font are reset for each text object. Text objects
may not be nested.
"""
inst = ContentStreamInstruction([], Operator("BT"))
self._append(inst)
return self
def end_text(self):
"""End text object."""
inst = ContentStreamInstruction([], Operator("ET"))
self._append(inst)
return self
def begin_marked_content_proplist(self, mctype: Name, mcid: int):
"""Begin marked content sequence."""
inst = ContentStreamInstruction(
[mctype, Dictionary(MCID=mcid)], Operator("BDC")
)
self._append(inst)
return self
def begin_marked_content(self, mctype: Name):
"""Begin marked content sequence."""
inst = ContentStreamInstruction([mctype], Operator("BMC"))
self._append(inst)
return self
def end_marked_content(self):
"""End marked content sequence."""
inst = ContentStreamInstruction([], Operator("EMC"))
self._append(inst)
return self
def set_text_font(self, font: Name, size: int | float | Decimal):
"""Set text font and size.
This operator is mandatory in order to show text. Any text object which attempts
to show text without first calling this operator is invalid.
The font name must match an entry in the current resources dictionary. The font
size is expressed in text-space units. Assuming no text scaling is in place, and
the PDF has not set a user-defined unit in the page dictionary, then text space
units will be points (defined as 1/72 of an inch).
"""
inst = ContentStreamInstruction([font, size], Operator("Tf"))
self._append(inst)
return self
def set_text_char_spacing(self, size: int | float | Decimal):
"""Set the character spacing (Tc) for future text operations.
This is a value, measured in unscaled text-space units, which will be used to
adjust the spacing between characters. A value of 0 (the default) means that,
for each rendered glyph, the cursor will advance only the actual width of the
glyph. Positive values will result in additional space between characters, and
negative values will cause glyphs to overlap.
In vertical writing, the sign works opposite of what one might expect: a
positive value shrinks the space, and a negative value increases it.
"""
inst = ContentStreamInstruction([size], Operator("Tc"))
self._append(inst)
return self
def set_text_word_spacing(self, size: int | float | Decimal):
"""Set the word spacing (Tw) for future text operations.
This is a value, measured in unscaled text-space units, which will be added to
the width of any ASCII space characters.
In vertical writing, the sign works opposite of what one might expect: a
positive value shrinks the space, and a negative value increases it.
"""
inst = ContentStreamInstruction([size], Operator("Tw"))
self._append(inst)
return self
def set_text_leading(self, size: int | float | Decimal):
"""Set the leading value (TL) for future text operations.
This is the vertical spacing between lines. Specifically, it is defined as the
distance between the baseline of the previous line to the baseline of the next
line.
"""
inst = ContentStreamInstruction([size], Operator("TL"))
self._append(inst)
return self
def set_text_matrix(self, matrix: Matrix):
"""Set text matrix.
The text matrix defines the conversion between text-space and page-space, in
terms of both scaling and translation. If this matrix scales the text, then
it redefines text-space units as being some scale factor of page-space units.
"""
inst = ContentStreamInstruction(matrix.shorthand, Operator("Tm"))
self._append(inst)
return self
def set_text_rendering(self, mode: int):
"""Set text rendering mode."""
inst = ContentStreamInstruction([mode], Operator("Tr"))
self._append(inst)
return self
def set_text_horizontal_scaling(self, scale: float):
"""Set text horizontal scaling."""
inst = ContentStreamInstruction([scale], Operator("Tz"))
self._append(inst)
return self
def show_text(self, encoded: bytes):
"""Show text.
The text must be encoded in character codes expected by the font.
"""
# [ <text string> ] TJ
# operands need to be enclosed in Array
# There is a Tj operator (lowercase j) which does not have this requirement,
# but for some reason QPDF hex-encodes the strings when using that operator.
# The TJ operator (Uppercase J) is technically meant for including spacing
# options, rather than showing a single string.
inst = ContentStreamInstruction([Array([String(encoded)])], Operator("TJ"))
self._append(inst)
return self
def show_text_with_kerning(self, *parts: bytes | int | float | Decimal):
"""Show text, with manual spacing (kerning) options.
Arguments are either bytes, which represent the actual text to show, or numbers,
which move the cursor. The units for the numbers are expressed in thousandths
of a text-space unit (thus typically equivalent to a glyph-space unit).
For horizontal writing, positive values move the cursor left, and negative
right. For vertical writing, positive values move down and negative up.
The text must be encoded in character codes expected by the font.
"""
inst = ContentStreamInstruction(
[
Array(
String(part) if isinstance(part, bytes) else part for part in parts
)
],
Operator("TJ"),
)
self._append(inst)
return self
def show_text_line(self, encoded: bytes):
"""Advance to the next line and show text.
The text must be encoded in character codes expected by the font.
This is functionally equivalent to ``move_cursor_new_line()`` followed by
``show_text_string(encoded)``, but in a single operation.
"""
inst = ContentStreamInstruction([String(encoded)], Operator("'"))
self._append(inst)
return self
def show_text_line_with_spacing(
self, encoded: bytes, word_spacing: int, char_spacing: int
):
"""Advance to the next line and show text.
The text must be encoded in character codes expected by the font.
This is functionally equivalent to ``set_text_char_spacing(char_spacing)`` and
``set_text_word_spacing()``, followed by ``move_cursor_new_line()`` and then
``show_text(encoded)``, all in a single operation.
"""
inst = ContentStreamInstruction(
[word_spacing, char_spacing, String(encoded)], Operator('"')
)
self._append(inst)
return self
def move_cursor(self, dx, dy):
"""Move cursor by the given offset, relative to the start of the current line.
This operator modifies the both current text matrix and the text line matrix.
This means that, in addition to moving the current cursor, the new cursor will
also be defined as the start of a new line.
The new position will be redefined as the new start of the line even if the y
offset is 0; what to a user may look like a single line of text could be encoded
in the PDF content stream as multiple "lines". It's not uncommon for PDFs to be
written with every word as a separate "line", allowing the PDF writer to
explicitly define the spacing between each word.
"""
inst = ContentStreamInstruction([dx, dy], Operator("Td"))
self._append(inst)
return self
def move_cursor_new_line(self):
"""Move cursor to the start of the next line.
This moves down by the current leading value, and resets the x position back to
the value it had at the beginning of the current line.
This operator modifies the both current text matrix and the text line matrix.
This means that, in addition to moving the current cursor, the new cursor will
also be defined as the start of a new line.
The value this operation moves the cursor is set using ``set_text_leading``.
"""
inst = ContentStreamInstruction([], Operator("T*"))
self._append(inst)
return self
def stroke_and_close(self):
"""Stroke and close path."""
inst = ContentStreamInstruction([], Operator("s"))
self._append(inst)
return self
def fill(self):
"""Stroke and close path."""
inst = ContentStreamInstruction([], Operator("f"))
self._append(inst)
return self
def append_rectangle(self, x: float, y: float, w: float, h: float):
"""Append rectangle to path."""
inst = ContentStreamInstruction([x, y, w, h], Operator("re"))
self._append(inst)
return self
def set_stroke_color(self, r: float, g: float, b: float):
"""Set RGB stroke color."""
inst = ContentStreamInstruction([r, g, b], Operator("RG"))
self._append(inst)
return self
def set_fill_color(self, r: float, g: float, b: float):
"""Set RGB fill color."""
inst = ContentStreamInstruction([r, g, b], Operator("rg"))
self._append(inst)
return self
def set_line_width(self, width):
"""Set line width."""
inst = ContentStreamInstruction([width], Operator("w"))
self._append(inst)
return self
def line(self, x1: float, y1: float, x2: float, y2: float):
"""Draw line."""
insts = [
ContentStreamInstruction([x1, y1], Operator("m")),
ContentStreamInstruction([x2, y2], Operator("l")),
]
self._append(insts[0])
self._append(insts[1])
return self
def set_dashes(self, array=None, phase=0):
"""Set dashes."""
if array is None:
array = []
if isinstance(array, int | float):
array = (array, phase)
phase = 0
inst = ContentStreamInstruction([array, phase], Operator("d"))
self._append(inst)
return self
def draw_xobject(self, name: Name):
"""Draw XObject.
Add instructions to render an XObject. The XObject must be
defined in the document.
Args:
name: Name of XObject
"""
inst = ContentStreamInstruction([name], Operator("Do"))
self._append(inst)
return self
def build(self) -> bytes:
"""Build content stream."""
return bytes(self._stream)
@dataclass
| ContentStreamBuilder |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sql_datasource.py | {
"start": 4116,
"end": 4884
} | class ____(SQLDatasourceError):
"""
An error creating a SQLAlchemy `Engine` object.
Not to be confused with the GX `SQLAlchemyExecutionEngine`.
"""
@overload
def __init__(self, addendum: str | None = ..., cause: Exception = ...): ...
@overload
def __init__(self, addendum: str = ..., cause: Exception | None = ...): ...
def __init__(
self,
addendum: str | None = None,
cause: Exception | None = None,
):
"""Must provide a `cause`, `addendum`, or both."""
message = "Unable to create SQLAlchemy Engine"
if cause:
message += f": due to {cause!r}"
if addendum:
message += f": {addendum}"
super().__init__(message)
| SQLAlchemyCreateEngineError |
python | py-pdf__pypdf | pypdf/_text_extraction/_layout_mode/_text_state_manager.py | {
"start": 577,
"end": 8214
} | class ____:
"""
Tracks the current text state including cm/tm/trm transformation matrices.
Attributes:
transform_stack (ChainMap): ChainMap of cm/tm transformation matrices
q_queue (Counter[int]): Counter of q operators
q_depth (List[int]): list of q operator nesting levels
Tc (float): character spacing
Tw (float): word spacing
Tz (int): horizontal scaling
TL (float): leading
Ts (float): text rise
font (Font): font object
font_size (int | float): font size
"""
def __init__(self) -> None:
self.transform_stack: TextStateManagerChainMapType = ChainMap(
self.new_transform()
)
self.q_queue: CounterType[int] = Counter()
self.q_depth = [0]
self.Tc: float = 0.0
self.Tw: float = 0.0
self.Tz: float = 100.0
self.TL: float = 0.0
self.Ts: float = 0.0
self.font_stack: list[tuple[Union[Font, None], Union[int, float]]] = []
self.font: Union[Font, None] = None
self.font_size: Union[int, float] = 0
def set_state_param(self, op: bytes, value: Union[float, list[Any]]) -> None:
"""
Set a text state parameter. Supports Tc, Tz, Tw, TL, and Ts operators.
Args:
op: operator read from PDF stream as bytes. No action is taken
for unsupported operators (see supported operators above).
value (float | List[Any]): new parameter value. If a list,
value[0] is used.
"""
if op not in [b"Tc", b"Tz", b"Tw", b"TL", b"Ts"]:
return
self.__setattr__(op.decode(), value[0] if isinstance(value, list) else value)
def set_font(self, font: Font, size: float) -> None:
"""
Set the current font and font_size.
Args:
font (Font): a layout mode Font
size (float): font size
"""
self.font = font
self.font_size = size
def text_state_params(self, value: Union[bytes, str] = "") -> TextStateParams:
"""
Create a TextStateParams instance to display a text string. Type[bytes] values
will be decoded implicitly.
Args:
value (str | bytes): text to associate with the captured state.
Raises:
PdfReadError: if font not set (no Tf operator in incoming pdf content stream)
Returns:
TextStateParams: current text state parameters
"""
if not isinstance(self.font, Font):
raise PdfReadError(
"font not set: is PDF missing a Tf operator?"
) # pragma: no cover
if isinstance(value, bytes):
try:
if isinstance(self.font.encoding, str):
txt = value.decode(self.font.encoding, "surrogatepass")
else:
txt = "".join(
self.font.encoding[x]
if x in self.font.encoding
else bytes((x,)).decode()
for x in value
)
except (UnicodeEncodeError, UnicodeDecodeError):
txt = value.decode("utf-8", "replace")
txt = "".join(
self.font.char_map.get(x, x) for x in txt
)
else:
txt = value
return TextStateParams(
txt,
self.font,
self.font_size,
self.Tc,
self.Tw,
self.Tz,
self.TL,
self.Ts,
self.effective_transform,
)
@staticmethod
def raw_transform(
_a: float = 1.0,
_b: float = 0.0,
_c: float = 0.0,
_d: float = 1.0,
_e: float = 0.0,
_f: float = 0.0,
) -> dict[int, float]:
"""Only a/b/c/d/e/f matrix params"""
return dict(zip(range(6), map(float, (_a, _b, _c, _d, _e, _f))))
@staticmethod
def new_transform(
_a: float = 1.0,
_b: float = 0.0,
_c: float = 0.0,
_d: float = 1.0,
_e: float = 0.0,
_f: float = 0.0,
is_text: bool = False,
is_render: bool = False,
) -> TextStateManagerDictType:
"""Standard a/b/c/d/e/f matrix params + 'is_text' and 'is_render' keys"""
result: Any = TextStateManager.raw_transform(_a, _b, _c, _d, _e, _f)
result.update({"is_text": is_text, "is_render": is_render})
return result
def reset_tm(self) -> TextStateManagerChainMapType:
"""Clear all transforms from chainmap having is_text==True or is_render==True"""
while (
self.transform_stack.maps[0]["is_text"]
or self.transform_stack.maps[0]["is_render"]
):
self.transform_stack = self.transform_stack.parents
return self.transform_stack
def reset_trm(self) -> TextStateManagerChainMapType:
"""Clear all transforms from chainmap having is_render==True"""
while self.transform_stack.maps[0]["is_render"]:
self.transform_stack = self.transform_stack.parents
return self.transform_stack
def remove_q(self) -> TextStateManagerChainMapType:
"""Rewind to stack prior state after closing a 'q' with internal 'cm' ops"""
self.font, self.font_size = self.font_stack.pop(-1)
self.transform_stack = self.reset_tm()
self.transform_stack.maps = self.transform_stack.maps[
self.q_queue.pop(self.q_depth.pop(), 0) :
]
return self.transform_stack
def add_q(self) -> None:
"""Add another level to q_queue"""
self.font_stack.append((self.font, self.font_size))
self.q_depth.append(len(self.q_depth))
def add_cm(self, *args: Any) -> TextStateManagerChainMapType:
"""Concatenate an additional transform matrix"""
self.transform_stack = self.reset_tm()
self.q_queue.update(self.q_depth[-1:])
self.transform_stack = self.transform_stack.new_child(self.new_transform(*args))
return self.transform_stack
def _complete_matrix(self, operands: list[float]) -> list[float]:
"""Adds a, b, c, and d to an "e/f only" operand set (e.g Td)"""
if len(operands) == 2: # this is a Td operator or equivalent
operands = [1.0, 0.0, 0.0, 1.0, *operands]
return operands
def add_tm(self, operands: list[float]) -> TextStateManagerChainMapType:
"""Append a text transform matrix"""
self.transform_stack = self.transform_stack.new_child(
self.new_transform( # type: ignore[misc]
*self._complete_matrix(operands), is_text=True # type: ignore[arg-type]
)
)
return self.transform_stack
def add_trm(self, operands: list[float]) -> TextStateManagerChainMapType:
"""Append a text rendering transform matrix"""
self.transform_stack = self.transform_stack.new_child(
self.new_transform( # type: ignore[misc]
*self._complete_matrix(operands), is_text=True, is_render=True # type: ignore[arg-type]
)
)
return self.transform_stack
@property
def effective_transform(self) -> list[float]:
"""Current effective transform accounting for cm, tm, and trm transforms"""
eff_transform = [*self.transform_stack.maps[0].values()]
for transform in self.transform_stack.maps[1:]:
eff_transform = mult(eff_transform, transform) # type: ignore[arg-type] # dict has int keys 0-5
return eff_transform
| TextStateManager |
python | allegroai__clearml | clearml/automation/scheduler.py | {
"start": 12801,
"end": 13291
} | class ____(object):
name = attrib(type=str, default=None)
started = attrib(type=datetime, converter=datetime_from_isoformat, default=None)
finished = attrib(type=datetime, converter=datetime_from_isoformat, default=None)
task_id = attrib(type=str, default=None)
thread_id = attrib(type=str, default=None)
def to_dict(self, full: bool = False) -> Dict[str, Any]:
return {k: v for k, v in self.__dict__.items() if full or not str(k).startswith("_")}
| ExecutedJob |
python | gevent__gevent | src/greentest/3.9/test_subprocess.py | {
"start": 71733,
"end": 136818
} | class ____(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, user=2**64)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
with self.assertRaises(OverflowError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ, group=2**64)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
cwd=os.curdir, env=os.environ,
extra_groups=[2**64])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
support.gc_collect() # For PyPy or other GCs.
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
False, [], 0, -1,
func)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_send_signal_race2(self):
# bpo-40550: the process might exist between the returncode check and
# the kill operation
p = subprocess.Popen([sys.executable, '-c', 'exit(1)'])
# wait for process to exit
while not p.returncode:
p.poll()
with mock.patch.object(p, 'poll', new=lambda: None):
p.returncode = None
p.send_signal(signal.SIGTERM)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
| POSIXProcessTestCase |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/application/application.py | {
"start": 56075,
"end": 63242
} | class ____(KeyBindingsBase):
"""
The `KeyBindings` of key bindings for a `Application`.
This merges the global key bindings with the one of the current user
control.
"""
def __init__(self, app: Application[_AppResult]) -> None:
self.app = app
self._cache: SimpleCache[
tuple[Window, frozenset[UIControl]], KeyBindingsBase
] = SimpleCache()
@property
def _version(self) -> Hashable:
"""Not needed - this object is not going to be wrapped in another
KeyBindings object."""
raise NotImplementedError
@property
def bindings(self) -> list[Binding]:
"""Not needed - this object is not going to be wrapped in another
KeyBindings object."""
raise NotImplementedError
def _create_key_bindings(
self, current_window: Window, other_controls: list[UIControl]
) -> KeyBindingsBase:
"""
Create a `KeyBindings` object that merges the `KeyBindings` from the
`UIControl` with all the parent controls and the global key bindings.
"""
key_bindings = []
collected_containers = set()
# Collect key bindings from currently focused control and all parent
# controls. Don't include key bindings of container parent controls.
container: Container = current_window
while True:
collected_containers.add(container)
kb = container.get_key_bindings()
if kb is not None:
key_bindings.append(kb)
if container.is_modal():
break
parent = self.app.layout.get_parent(container)
if parent is None:
break
else:
container = parent
# Include global bindings (starting at the top-model container).
for c in walk(container):
if c not in collected_containers:
kb = c.get_key_bindings()
if kb is not None:
key_bindings.append(GlobalOnlyKeyBindings(kb))
# Add App key bindings
if self.app.key_bindings:
key_bindings.append(self.app.key_bindings)
# Add mouse bindings.
key_bindings.append(
ConditionalKeyBindings(
self.app._page_navigation_bindings,
self.app.enable_page_navigation_bindings,
)
)
key_bindings.append(self.app._default_bindings)
# Reverse this list. The current control's key bindings should come
# last. They need priority.
key_bindings = key_bindings[::-1]
return merge_key_bindings(key_bindings)
@property
def _key_bindings(self) -> KeyBindingsBase:
current_window = self.app.layout.current_window
other_controls = list(self.app.layout.find_all_controls())
key = current_window, frozenset(other_controls)
return self._cache.get(
key, lambda: self._create_key_bindings(current_window, other_controls)
)
def get_bindings_for_keys(self, keys: KeysTuple) -> list[Binding]:
return self._key_bindings.get_bindings_for_keys(keys)
def get_bindings_starting_with_keys(self, keys: KeysTuple) -> list[Binding]:
return self._key_bindings.get_bindings_starting_with_keys(keys)
async def _do_wait_for_enter(wait_text: AnyFormattedText) -> None:
"""
Create a sub application to wait for the enter key press.
This has two advantages over using 'input'/'raw_input':
- This will share the same input/output I/O.
- This doesn't block the event loop.
"""
from prompt_toolkit.shortcuts import PromptSession
key_bindings = KeyBindings()
@key_bindings.add("enter")
def _ok(event: E) -> None:
event.app.exit()
@key_bindings.add(Keys.Any)
def _ignore(event: E) -> None:
"Disallow typing."
pass
session: PromptSession[None] = PromptSession(
message=wait_text, key_bindings=key_bindings
)
try:
await session.app.run_async()
except KeyboardInterrupt:
pass # Control-c pressed. Don't propagate this error.
@contextmanager
def attach_winch_signal_handler(
handler: Callable[[], None],
) -> Generator[None, None, None]:
"""
Attach the given callback as a WINCH signal handler within the context
manager. Restore the original signal handler when done.
The `Application.run` method will register SIGWINCH, so that it will
properly repaint when the terminal window resizes. However, using
`run_in_terminal`, we can temporarily send an application to the
background, and run an other app in between, which will then overwrite the
SIGWINCH. This is why it's important to restore the handler when the app
terminates.
"""
# The tricky part here is that signals are registered in the Unix event
# loop with a wakeup fd, but another application could have registered
# signals using signal.signal directly. For now, the implementation is
# hard-coded for the `asyncio.unix_events._UnixSelectorEventLoop`.
# No WINCH? Then don't do anything.
sigwinch = getattr(signal, "SIGWINCH", None)
if sigwinch is None or not in_main_thread():
yield
return
# Keep track of the previous handler.
# (Only UnixSelectorEventloop has `_signal_handlers`.)
loop = get_running_loop()
previous_winch_handler = getattr(loop, "_signal_handlers", {}).get(sigwinch)
try:
loop.add_signal_handler(sigwinch, handler)
yield
finally:
# Restore the previous signal handler.
loop.remove_signal_handler(sigwinch)
if previous_winch_handler is not None:
loop.add_signal_handler(
sigwinch,
previous_winch_handler._callback,
*previous_winch_handler._args,
)
@contextmanager
def _restore_sigint_from_ctypes() -> Generator[None, None, None]:
# The following functions are part of the stable ABI since python 3.2
# See: https://docs.python.org/3/c-api/sys.html#c.PyOS_getsig
# Inline import: these are not available on Pypy.
try:
from ctypes import c_int, c_void_p, pythonapi
except ImportError:
have_ctypes_signal = False
else:
# GraalPy has the functions, but they don't work
have_ctypes_signal = sys.implementation.name != "graalpy"
if have_ctypes_signal:
# PyOS_sighandler_t PyOS_getsig(int i)
pythonapi.PyOS_getsig.restype = c_void_p
pythonapi.PyOS_getsig.argtypes = (c_int,)
# PyOS_sighandler_t PyOS_setsig(int i, PyOS_sighandler_t h)
pythonapi.PyOS_setsig.restype = c_void_p
pythonapi.PyOS_setsig.argtypes = (
c_int,
c_void_p,
)
sigint = signal.getsignal(signal.SIGINT)
if have_ctypes_signal:
sigint_os = pythonapi.PyOS_getsig(signal.SIGINT)
try:
yield
finally:
if sigint is not None:
signal.signal(signal.SIGINT, sigint)
if have_ctypes_signal:
pythonapi.PyOS_setsig(signal.SIGINT, sigint_os)
| _CombinedRegistry |
python | pytorch__pytorch | torch/optim/_muon.py | {
"start": 3110,
"end": 13662
} | class ____(Optimizer):
def __init__(
self,
params: ParamsT,
lr: float = 1e-3,
weight_decay: float = 0.1,
momentum: float = 0.95,
nesterov: bool = True,
ns_coefficients: tuple[float, float, float] = (DEFAULT_A, DEFAULT_B, DEFAULT_C),
eps: float = EPS,
ns_steps: int = DEFAULT_NS_STEPS,
adjust_lr_fn: Optional[str] = None,
) -> None:
if isinstance(lr, Tensor) and lr.numel() != 1:
raise ValueError("Tensor lr must be 1-element")
if not 0.0 <= lr:
raise ValueError(f"Learning rate should be >= 0 but is: {lr}")
if not 0.0 <= momentum:
raise ValueError(f"momentum should be >= 0 but is: {momentum}")
if not 0.0 <= weight_decay:
raise ValueError(f"weight decay should be >= 0 but is: {weight_decay}")
if adjust_lr_fn is not None and adjust_lr_fn not in [
"original",
"match_rms_adamw",
]:
raise ValueError(
f"Adjust learning rate function {adjust_lr_fn} is not supported"
)
defaults = {
"lr": lr,
"weight_decay": weight_decay,
"momentum": momentum,
"nesterov": nesterov,
"ns_coefficients": ns_coefficients,
"eps": eps,
"ns_steps": ns_steps,
"adjust_lr_fn": adjust_lr_fn,
}
super().__init__(params, defaults)
for group in self.param_groups:
for p in group["params"]:
if p.ndim != 2:
raise ValueError(
f"Muon only supports 2D parameters whereas we found a parameter with size: {p.size()}"
)
def _init_group(
self,
group: MutableMapping,
params_with_grad: list[Tensor],
grads: list[Tensor],
muon_momentum_bufs: list[Tensor],
) -> bool:
for p in group["params"]:
if p.grad is None:
continue
if torch.is_complex(p):
raise RuntimeError("Muon does not support complex parameters")
if p.grad.is_sparse:
raise RuntimeError("Muon does not support sparse gradients")
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
if "momentum_buffer" not in state:
state["momentum_buffer"] = torch.zeros_like(
p.grad, memory_format=torch.preserve_format
)
muon_momentum_bufs.append(state["momentum_buffer"])
return False # has_complex
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step."""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
lr = group["lr"]
weight_decay = group["weight_decay"]
momentum = group["momentum"]
params_with_grad: list[Tensor] = []
grads: list[Tensor] = []
muon_momentum_bufs: list[Tensor] = []
has_complex = self._init_group(
group,
params_with_grad,
grads,
muon_momentum_bufs,
)
muon(
params_with_grad,
grads,
muon_momentum_bufs,
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
nesterov=group["nesterov"],
ns_coefficients=group["ns_coefficients"],
eps=group["eps"],
ns_steps=group["ns_steps"],
adjust_lr_fn=group["adjust_lr_fn"],
has_complex=has_complex,
)
return loss
Muon.__doc__ = (
r"""Implements Muon algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \gamma \text{ (lr)},\ \lambda \text{ (weight decay)},\
\mu \text{ (momentum)},\ \textit{nesterov}\in\{True,False\},\\
&\hspace{13mm}(a,b,c)\ \text{ (NS coefficients)},\
\varepsilon \text{ (epsilon)},\ k \text{ (NS steps)},\
\theta_0 \text{ (params)},\ f(\theta) \text{ (objective)} \\
&\textbf{initialize} : B_0 \leftarrow 0 \text{ (momentum buffer)} \\[-1.ex]
&\rule{110mm}{0.4pt} \\
&\textbf{for}\ t=1\ \textbf{to}\ \ldots\ \textbf{do} \\[0.25ex]
&\hspace{5mm} g_t \leftarrow \nabla_{\theta} f_t(\theta_{t-1}) \\[0.25ex]
&\hspace{5mm} B_t \leftarrow \mu B_{t-1} + g_t \\[0.25ex]
&\hspace{5mm} \widetilde{B}_t \leftarrow
\begin{cases}
g_t + \mu B_t, & \text{if nesterov}=True \\
B_t, & \text{if nesterov}=False
\end{cases} \\[1.0ex]
&\hspace{5mm} O_t \leftarrow \mathrm{NS}^{(a,b,c)}_{k}\!\big(\widetilde{B}_t;\ \varepsilon\big) \\[0.5ex]
&\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma\,\lambda\,\theta_{t-1}
\quad\text{(decoupled weight decay)} \\[0.25ex]
&\hspace{5mm} \gamma \leftarrow \mathrm{AdjustLR}\!\big(\gamma;\ \mathrm{shape}\!\big(\theta_t \big) \big) \\[0.25ex]
&\hspace{5mm} \theta_t \leftarrow \theta_t - \gamma\, O_t \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\mathbf{return}\ \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt}s
\end{aligned}
Here, :math:`\mathrm{NS}^{(a,b,c)}_{k}(\cdot;\varepsilon)` denotes :math:`k` iterations of the
Newton–Schulz orthogonalization operator parameterized by coefficients :math:`(a,b,c)`
with numerical stabilization :math:`\varepsilon`.
The purpose for :math:`\mathrm{AdjustLR}\!\big(\gamma;\ \mathrm{shape}\!\big(\theta_t \big) \big)`
is to make the orthogonalized update have a consistent :math:`RMS` across rectangular matrices.
Keller's original implementation scales the update by :math:`\sqrt{\max\!\left(1, \frac{A}{B}\right)}`,
where :math:`A` and :math:`B` are dimension of the matrix being optimized.
Moonshot's implementation also focuses on matching :math:`RMS` of AdamW. The adjustment is computed as:
:math:`\gamma \leftarrow {0.2}\gamma\,\sqrt{\max\!\left({A}, {B}\right)}`
The method is adopted from `Muon is Scalable for LLM Training`_. Research
results show that with this adjustment Muon can directly reuse the learning rate
and weight decay tuned for AdamW.
We provide two options for the learning rate adjustment: "original", which follows Keller's
implementation, and "match_rms_adamw", which refers to Moonshot's implementation. This gives users the
flexibility to choose between the two. If `adjust_lr_fn` is not specified, the default is "original".
For further details regarding the algorithm we refer to `Muon: An optimizer for hidden layers in neural networks`_
and `Muon is Scalable for LLM Training`_.
"""
+ rf"""
Args:
{_params_doc}. Note that Muon is an optimizer for 2D parameters of neural network hidden layers. Other
parameters, such as bias, and embedding, should be optimized by a standard method such as AdamW.
lr (float, Tensor, optional): learning rate (default: 1e-3).
weight_decay (float, optional): weight decay (L2 penalty). (default: 0.1)
momentum (float, optional): momentum factor (default: 0.95)
nesterov (bool, optional): enables Nesterov momentum. Only applicable
when momentum is non-zero
ns_coefficients (tuple of three floats, optional): coefficients \(a,b,c\) for the
Newton–Schulz orthogonalization polynomial (default: ({DEFAULT_A}, {DEFAULT_B}, {DEFAULT_C}))
eps (float, optional): term added to the denominator for numerical stability. (default: {EPS})
ns_steps (int, optional): number of Newton–Schulz iteration steps. (default: {DEFAULT_NS_STEPS})
adjust_lr_fn (str, optional): function to adjust learning rate. One of "original" and "match_rms_adamw".
If not specified, we will default to use "original". (default: None)
.. _Muon\: An optimizer for hidden layers in neural networks:
https://kellerjordan.github.io/posts/muon/
.. _Muon is Scalable for LLM Training:
https://arxiv.org/pdf/2502.16982
"""
)
def _single_tensor_muon(
params: list[Tensor],
grads: list[Tensor],
muon_momentum_bufs: list[Tensor],
*,
lr: float,
weight_decay: float,
momentum: float,
nesterov: bool,
ns_coefficients: tuple[float, float, float],
ns_steps: int,
eps: float,
adjust_lr_fn: Optional[str],
has_complex: bool,
) -> None:
lr = _to_scalar(lr)
if has_complex:
raise ValueError("Complex parameters are not supported")
for i, param in enumerate(params):
grad = grads[i]
if grad.ndim != 2:
raise ValueError("Param gradient must be a 2D matrix")
buf = muon_momentum_bufs[i]
buf.lerp_(grad, 1 - momentum)
update = grad.lerp(buf, momentum) if nesterov else buf
update = _zeropower_via_newtonschulz(update, ns_coefficients, ns_steps, eps)
adjusted_lr = _adjust_lr(lr, adjust_lr_fn, param.shape)
param.mul_(1 - lr * weight_decay)
param.add_(update, alpha=-adjusted_lr)
@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_muon)
def muon(
params: list[Tensor],
grads: list[Tensor],
muon_momentum_bufs: list[Tensor],
*,
foreach: Optional[bool] = None,
lr: float,
weight_decay: float,
momentum: float,
nesterov: bool,
ns_coefficients: tuple[float, float, float],
ns_steps: int,
eps: float,
adjust_lr_fn: Optional[str],
has_complex: bool,
) -> None:
r"""Functional API that performs Muon algorithm computation.
See :class:`~torch.optim.Muon` for details.
"""
if foreach is not None and foreach:
raise RuntimeError("Foreach is not supported for Muon yet")
func = _single_tensor_muon
func(
params,
grads,
muon_momentum_bufs,
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
nesterov=nesterov,
ns_coefficients=ns_coefficients,
ns_steps=ns_steps,
eps=eps,
adjust_lr_fn=adjust_lr_fn,
has_complex=has_complex,
)
| Muon |
python | ansible__ansible | lib/ansible/plugins/action/add_host.py | {
"start": 1102,
"end": 3516
} | class ____(ActionBase):
""" Create inventory hosts and groups in the memory inventory"""
# We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
_requires_connection = False
_supports_check_mode = True
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
args = self._task.args
raw = args.pop('_raw_params', {})
if isinstance(raw, Mapping):
# TODO: create 'conflict' detection in base class to deal with repeats and aliases and warn user
args = combine_vars(raw, args)
else:
raise AnsibleActionFail('Invalid raw parameters passed, requires a dictionary/mapping got a %s' % type(raw))
# Parse out any hostname:port patterns
new_name = args.get('name', args.get('hostname', args.get('host', None)))
if new_name is None:
raise AnsibleActionFail('name, host or hostname needs to be provided')
display.vv("creating host via 'add_host': hostname=%s" % new_name)
try:
name, port = parse_address(new_name, allow_ranges=False)
except Exception:
# not a parsable hostname, but might still be usable
name = new_name
port = None
if port:
args['ansible_ssh_port'] = port
groups = args.get('groupname', args.get('groups', args.get('group', '')))
# add it to the group if that was specified
new_groups = []
if groups:
if isinstance(groups, list):
group_list = groups
elif isinstance(groups, str):
group_list = groups.split(",")
else:
raise AnsibleActionFail("Groups must be specified as a list.", obj=groups)
for group_name in group_list:
if group_name not in new_groups:
new_groups.append(group_name.strip())
# Add any variables to the new_host
host_vars = dict()
special_args = frozenset(('name', 'hostname', 'groupname', 'groups'))
for k in args.keys():
if k not in special_args:
host_vars[k] = args[k]
result['changed'] = False
result['add_host'] = dict(host_name=name, groups=new_groups, host_vars=host_vars)
return result
| ActionModule |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 32511,
"end": 33503
} | class ____(Benchmark):
r"""
Sodp objective function.
This class defines the Sum Of Different Powers [1]_ global optimization
problem. This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Sodp}}(x) = \sum_{i=1}^{n} \lvert{x_{i}}\rvert^{i + 1}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
i = arange(1, self.N + 1)
return sum(abs(x) ** (i + 1))
| Sodp |
python | redis__redis-py | redis/commands/policies.py | {
"start": 9931,
"end": 10668
} | class ____(AsyncBasePolicyResolver):
"""
Async version of DynamicPolicyResolver.
"""
def __init__(
self,
policy_records: PolicyRecords,
fallback: Optional[AsyncPolicyResolver] = None,
) -> None:
"""
Parameters:
policy_records (PolicyRecords): Policy records.
fallback (Optional[AsyncPolicyResolver]): An optional resolver to be used when the
primary policies cannot handle a specific request.
"""
super().__init__(policy_records, fallback)
def with_fallback(self, fallback: "AsyncPolicyResolver") -> "AsyncPolicyResolver":
return AsyncDynamicPolicyResolver(self._policies, fallback)
| AsyncDynamicPolicyResolver |
python | allegroai__clearml | clearml/utilities/pyhocon/config_tree.py | {
"start": 21705,
"end": 22106
} | class ____(object):
def __init__(self, variable, optional, ws, instring, loc):
self.variable = variable
self.optional = optional
self.ws = ws
self.index = None
self.parent = None
self.instring = instring
self.loc = loc
def __repr__(self): # pragma: no cover
return '[ConfigSubstitution: ' + self.variable + ']'
| ConfigSubstitution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 814155,
"end": 815731
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"enterprise",
"external_identities",
"provider_type",
"tenant_id",
)
enterprise = sgqlc.types.Field(Enterprise, graphql_name="enterprise")
external_identities = sgqlc.types.Field(
sgqlc.types.non_null(ExternalIdentityConnection),
graphql_name="externalIdentities",
args=sgqlc.types.ArgDict(
(
(
"members_only",
sgqlc.types.Arg(Boolean, graphql_name="membersOnly", default=None),
),
("login", sgqlc.types.Arg(String, graphql_name="login", default=None)),
(
"user_name",
sgqlc.types.Arg(String, graphql_name="userName", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
provider_type = sgqlc.types.Field(
sgqlc.types.non_null(OIDCProviderType), graphql_name="providerType"
)
tenant_id = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="tenantId")
| OIDCProvider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 71085,
"end": 71846
} | class ____(UuidTest):
__requires__ = ("uuid_data_type",)
datatype = UUID
__all__ = (
"ArrayTest",
"BinaryTest",
"UnicodeVarcharTest",
"UnicodeTextTest",
"JSONTest",
"JSONLegacyStringCastIndexTest",
"DateTest",
"DateTimeTest",
"DateTimeTZTest",
"TextTest",
"NumericTest",
"IntegerTest",
"IntervalTest",
"PrecisionIntervalTest",
"CastTypeDecoratorTest",
"DateTimeHistoricTest",
"DateTimeCoercedToDateTimeTest",
"TimeMicrosecondsTest",
"TimestampMicrosecondsTest",
"TimeTest",
"TimeTZTest",
"TrueDivTest",
"DateTimeMicrosecondsTest",
"DateHistoricTest",
"StringTest",
"BooleanTest",
"EnumTest",
"UuidTest",
"NativeUUIDTest",
)
| NativeUUIDTest |
python | scipy__scipy | scipy/ndimage/tests/test_fourier.py | {
"start": 302,
"end": 8080
} | class ____:
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
@pytest.mark.parametrize('dtype, dec', [("float32", 6), ("float64", 14)])
@make_xp_test_case(ndimage.fourier_gaussian)
def test_fourier_gaussian_real01(self, shape, dtype, dec, xp):
fft = getattr(xp, 'fft')
a = np.zeros(shape, dtype=dtype)
a[0, 0] = 1.0
a = xp.asarray(a)
a = fft.rfft(a, n=shape[0], axis=0)
a = fft.fft(a, n=shape[1], axis=1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5], shape[0], 0)
a = fft.ifft(a, n=shape[1], axis=1)
a = fft.irfft(a, n=shape[0], axis=0)
assert_almost_equal(ndimage.sum(a), xp.asarray(1), decimal=dec,
check_0d=False)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec', [("complex64", 6), ("complex128", 14)])
@make_xp_test_case(ndimage.fourier_gaussian)
def test_fourier_gaussian_complex01(self, shape, dtype, dec, xp):
fft = getattr(xp, 'fft')
a = np.zeros(shape, dtype=dtype)
a[0, 0] = 1.0
a = xp.asarray(a)
a = fft.fft(a, n=shape[0], axis=0)
a = fft.fft(a, n=shape[1], axis=1)
a = ndimage.fourier_gaussian(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, n=shape[1], axis=1)
a = fft.ifft(a, n=shape[0], axis=0)
assert_almost_equal(ndimage.sum(xp.real(a)), xp.asarray(1.0), decimal=dec,
check_0d=False)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
@pytest.mark.parametrize('dtype, dec', [("float32", 6), ("float64", 14)])
@make_xp_test_case(ndimage.fourier_uniform)
def test_fourier_uniform_real01(self, shape, dtype, dec, xp):
fft = getattr(xp, 'fft')
a = np.zeros(shape, dtype=dtype)
a[0, 0] = 1.0
a = xp.asarray(a)
a = fft.rfft(a, n=shape[0], axis=0)
a = fft.fft(a, n=shape[1], axis=1)
a = ndimage.fourier_uniform(a, [5.0, 2.5], shape[0], 0)
a = fft.ifft(a, n=shape[1], axis=1)
a = fft.irfft(a, n=shape[0], axis=0)
assert_almost_equal(ndimage.sum(a), xp.asarray(1.0), decimal=dec,
check_0d=False)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec', [("complex64", 6), ("complex128", 14)])
@make_xp_test_case(ndimage.fourier_uniform)
def test_fourier_uniform_complex01(self, shape, dtype, dec, xp):
fft = getattr(xp, 'fft')
a = np.zeros(shape, dtype=dtype)
a[0, 0] = 1.0
a = xp.asarray(a)
a = fft.fft(a, n=shape[0], axis=0)
a = fft.fft(a, n=shape[1], axis=1)
a = ndimage.fourier_uniform(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, n=shape[1], axis=1)
a = fft.ifft(a, n=shape[0], axis=0)
assert_almost_equal(ndimage.sum(xp.real(a)), xp.asarray(1.0), decimal=dec,
check_0d=False)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec', [("float32", 4), ("float64", 11)])
@make_xp_test_case(ndimage.fourier_shift)
def test_fourier_shift_real01(self, shape, dtype, dec, xp):
fft = getattr(xp, 'fft')
expected = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
expected = xp.asarray(expected)
a = fft.rfft(expected, n=shape[0], axis=0)
a = fft.fft(a, n=shape[1], axis=1)
a = ndimage.fourier_shift(a, [1, 1], shape[0], 0)
a = fft.ifft(a, n=shape[1], axis=1)
a = fft.irfft(a, n=shape[0], axis=0)
assert_array_almost_equal(a[1:, 1:], expected[:-1, :-1], decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec', [("complex64", 4), ("complex128", 11)])
@make_xp_test_case(ndimage.fourier_shift)
def test_fourier_shift_complex01(self, shape, dtype, dec, xp):
fft = getattr(xp, 'fft')
expected = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
expected = xp.asarray(expected)
a = fft.fft(expected, n=shape[0], axis=0)
a = fft.fft(a, n=shape[1], axis=1)
a = ndimage.fourier_shift(a, [1, 1], -1, 0)
a = fft.ifft(a, n=shape[1], axis=1)
a = fft.ifft(a, n=shape[0], axis=0)
assert_array_almost_equal(xp.real(a)[1:, 1:], expected[:-1, :-1], decimal=dec)
assert_array_almost_equal(xp.imag(a), xp.zeros(shape), decimal=dec)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15), (1, 10)])
@pytest.mark.parametrize('dtype, dec', [("float32", 5), ("float64", 14)])
@make_xp_test_case(ndimage.fourier_ellipsoid)
def test_fourier_ellipsoid_real01(self, shape, dtype, dec, xp):
fft = getattr(xp, 'fft')
a = np.zeros(shape, dtype=dtype)
a[0, 0] = 1.0
a = xp.asarray(a)
a = fft.rfft(a, n=shape[0], axis=0)
a = fft.fft(a, n=shape[1], axis=1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], shape[0], 0)
a = fft.ifft(a, n=shape[1], axis=1)
a = fft.irfft(a, n=shape[0], axis=0)
assert_almost_equal(ndimage.sum(a), xp.asarray(1.0), decimal=dec,
check_0d=False)
@pytest.mark.parametrize('shape', [(32, 16), (31, 15)])
@pytest.mark.parametrize('dtype, dec', [("complex64", 5), ("complex128", 14)])
@make_xp_test_case(ndimage.fourier_ellipsoid)
def test_fourier_ellipsoid_complex01(self, shape, dtype, dec, xp):
fft = getattr(xp, 'fft')
a = np.zeros(shape, dtype=dtype)
a[0, 0] = 1.0
a = xp.asarray(a)
a = fft.fft(a, n=shape[0], axis=0)
a = fft.fft(a, n=shape[1], axis=1)
a = ndimage.fourier_ellipsoid(a, [5.0, 2.5], -1, 0)
a = fft.ifft(a, n=shape[1], axis=1)
a = fft.ifft(a, n=shape[0], axis=0)
assert_almost_equal(ndimage.sum(xp.real(a)), xp.asarray(1.0), decimal=dec,
check_0d=False)
@make_xp_test_case(ndimage.fourier_ellipsoid)
def test_fourier_ellipsoid_unimplemented_ndim(self, xp):
# arrays with ndim > 3 raise NotImplementedError
x = xp.ones((4, 6, 8, 10), dtype=xp.complex128)
with pytest.raises(NotImplementedError):
ndimage.fourier_ellipsoid(x, 3)
@make_xp_test_case(ndimage.fourier_ellipsoid)
def test_fourier_ellipsoid_1d_complex(self, xp):
# expected result of 1d ellipsoid is the same as for fourier_uniform
for shape in [(32, ), (31, )]:
for type_, dec in zip([xp.complex64, xp.complex128], [5, 14]):
x = xp.ones(shape, dtype=type_)
a = ndimage.fourier_ellipsoid(x, 5, -1, 0)
b = ndimage.fourier_uniform(x, 5, -1, 0)
assert_array_almost_equal(a, b, decimal=dec)
@pytest.mark.parametrize('shape', [(0, ), (0, 10), (10, 0)])
@pytest.mark.parametrize('dtype', ["float32", "float64",
"complex64", "complex128"])
@pytest.mark.parametrize('test_func',
[make_xp_pytest_param(ndimage.fourier_ellipsoid),
make_xp_pytest_param(ndimage.fourier_gaussian),
make_xp_pytest_param(ndimage.fourier_uniform)])
def test_fourier_zero_length_dims(self, shape, dtype, test_func, xp):
if (
is_cupy(xp)
and test_func.__name__ == "fourier_ellipsoid"
and math.prod(shape) == 0
):
pytest.xfail("CuPy's fourier_ellipsoid does not accept size==0 arrays")
dtype = getattr(xp, dtype)
a = xp.ones(shape, dtype=dtype)
b = test_func(a, 3)
xp_assert_equal(a, b)
| TestNdimageFourier |
python | mlflow__mlflow | mlflow/types/chat.py | {
"start": 143,
"end": 219
} | class ____(BaseModel):
type: Literal["text"]
text: str
| TextContentPart |
python | mlflow__mlflow | mlflow/deployments/mlflow/__init__.py | {
"start": 966,
"end": 10910
} | class ____(BaseDeploymentClient):
"""
Client for interacting with the MLflow AI Gateway.
Example:
First, start the MLflow AI Gateway:
.. code-block:: bash
mlflow gateway start --config-path path/to/config.yaml
Then, create a client and use it to interact with the server:
.. code-block:: python
from mlflow.deployments import get_deploy_client
client = get_deploy_client("http://localhost:5000")
endpoints = client.list_endpoints()
assert [e.dict() for e in endpoints] == [
{
"name": "chat",
"endpoint_type": "llm/v1/chat",
"model": {"name": "gpt-4o-mini", "provider": "openai"},
"endpoint_url": "http://localhost:5000/gateway/chat/invocations",
},
]
"""
def create_deployment(self, name, model_uri, flavor=None, config=None, endpoint=None):
"""
.. warning::
This method is not implemented for `MlflowDeploymentClient`.
"""
raise NotImplementedError
def update_deployment(self, name, model_uri=None, flavor=None, config=None, endpoint=None):
"""
.. warning::
This method is not implemented for `MlflowDeploymentClient`.
"""
raise NotImplementedError
def delete_deployment(self, name, config=None, endpoint=None):
"""
.. warning::
This method is not implemented for `MlflowDeploymentClient`.
"""
raise NotImplementedError
def list_deployments(self, endpoint=None):
"""
.. warning::
This method is not implemented for `MlflowDeploymentClient`.
"""
raise NotImplementedError
def get_deployment(self, name, endpoint=None):
"""
.. warning::
This method is not implemented for `MLflowDeploymentClient`.
"""
raise NotImplementedError
def create_endpoint(self, name, config=None):
"""
.. warning::
This method is not implemented for `MlflowDeploymentClient`.
"""
raise NotImplementedError
def update_endpoint(self, endpoint, config=None):
"""
.. warning::
This method is not implemented for `MlflowDeploymentClient`.
"""
raise NotImplementedError
def delete_endpoint(self, endpoint):
"""
.. warning::
This method is not implemented for `MlflowDeploymentClient`.
"""
raise NotImplementedError
def _call_endpoint(
self,
method: str,
route: str,
json_body: str | None = None,
timeout: int | None = None,
):
call_kwargs = {}
if method.lower() == "get":
call_kwargs["params"] = json_body
else:
call_kwargs["json"] = json_body
response = http_request(
host_creds=get_default_host_creds(self.target_uri),
endpoint=route,
method=method,
timeout=MLFLOW_DEPLOYMENT_CLIENT_HTTP_REQUEST_TIMEOUT.get()
if timeout is None
else timeout,
retry_codes=MLFLOW_DEPLOYMENT_CLIENT_REQUEST_RETRY_CODES,
raise_on_status=False,
**call_kwargs,
)
augmented_raise_for_status(response)
return response.json()
def get_endpoint(self, endpoint) -> "Endpoint":
"""
Gets a specified endpoint configured for the MLflow AI Gateway.
Args:
endpoint: The name of the endpoint to retrieve.
Returns:
An `Endpoint` object representing the endpoint.
Example:
.. code-block:: python
from mlflow.deployments import get_deploy_client
client = get_deploy_client("http://localhost:5000")
endpoint = client.get_endpoint(endpoint="chat")
assert endpoint.dict() == {
"name": "chat",
"endpoint_type": "llm/v1/chat",
"model": {"name": "gpt-4o-mini", "provider": "openai"},
"endpoint_url": "http://localhost:5000/gateway/chat/invocations",
}
"""
# Delayed import to avoid importing mlflow.gateway in the module scope
from mlflow.deployments.server.config import Endpoint
route = join_paths(MLFLOW_DEPLOYMENTS_CRUD_ENDPOINT_BASE, endpoint)
response = self._call_endpoint("GET", route)
return Endpoint(
**{
**response,
"endpoint_url": resolve_endpoint_url(self.target_uri, response["endpoint_url"]),
}
)
def _list_endpoints(self, page_token=None) -> "PagedList[Endpoint]":
# Delayed import to avoid importing mlflow.gateway in the module scope
from mlflow.deployments.server.config import Endpoint
params = None if page_token is None else {"page_token": page_token}
response_json = self._call_endpoint(
"GET", MLFLOW_DEPLOYMENTS_CRUD_ENDPOINT_BASE, json_body=params
)
routes = [
Endpoint(
**{
**resp,
"endpoint_url": resolve_endpoint_url(
self.target_uri,
resp["endpoint_url"],
),
}
)
for resp in response_json.get("endpoints", [])
]
next_page_token = response_json.get("next_page_token")
return PagedList(routes, next_page_token)
def list_endpoints(self) -> "list[Endpoint]":
"""
List endpoints configured for the MLflow AI Gateway.
Returns:
A list of ``Endpoint`` objects.
Example:
.. code-block:: python
from mlflow.deployments import get_deploy_client
client = get_deploy_client("http://localhost:5000")
endpoints = client.list_endpoints()
assert [e.dict() for e in endpoints] == [
{
"name": "chat",
"endpoint_type": "llm/v1/chat",
"model": {"name": "gpt-4o-mini", "provider": "openai"},
"endpoint_url": "http://localhost:5000/gateway/chat/invocations",
},
]
"""
endpoints = []
next_page_token = None
while True:
page = self._list_endpoints(next_page_token)
endpoints.extend(page)
next_page_token = page.token
if next_page_token is None:
break
return endpoints
def predict(self, deployment_name=None, inputs=None, endpoint=None) -> dict[str, Any]:
"""
Submit a query to a configured provider endpoint.
Args:
deployment_name: Unused.
inputs: The inputs to the query, as a dictionary.
endpoint: The name of the endpoint to query.
Returns:
A dictionary containing the response from the endpoint.
Example:
.. code-block:: python
from mlflow.deployments import get_deploy_client
client = get_deploy_client("http://localhost:5000")
response = client.predict(
endpoint="chat",
inputs={"messages": [{"role": "user", "content": "Hello"}]},
)
assert response == {
"id": "chatcmpl-8OLoQuaeJSLybq3NBoe0w5eyqjGb9",
"object": "chat.completion",
"created": 1700814410,
"model": "gpt-4o-mini",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! How can I assist you today?",
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": 9,
"completion_tokens": 9,
"total_tokens": 18,
},
}
Additional parameters that are valid for a given provider and endpoint configuration can be
included with the request as shown below, using an openai completions endpoint request as
an example:
.. code-block:: python
from mlflow.deployments import get_deploy_client
client = get_deploy_client("http://localhost:5000")
client.predict(
endpoint="completions",
inputs={
"prompt": "Hello!",
"temperature": 0.3,
"max_tokens": 500,
},
)
"""
query_route = join_paths(
MLFLOW_DEPLOYMENTS_ENDPOINTS_BASE, endpoint, MLFLOW_DEPLOYMENTS_QUERY_SUFFIX
)
try:
return self._call_endpoint(
"POST", query_route, inputs, MLFLOW_DEPLOYMENT_PREDICT_TIMEOUT.get()
)
except MlflowException as e:
if isinstance(e.__cause__, requests.exceptions.Timeout):
raise MlflowException(
message=(
"The provider has timed out while generating a response to your "
"query. Please evaluate the available parameters for the query "
"that you are submitting. Some parameter values and inputs can "
"increase the computation time beyond the allowable route "
f"timeout of {MLFLOW_DEPLOYMENT_PREDICT_TIMEOUT} "
"seconds."
),
error_code=BAD_REQUEST,
)
raise e
def run_local(name, model_uri, flavor=None, config=None):
pass
def target_help():
pass
| MlflowDeploymentClient |
python | keras-team__keras | keras/src/ops/ops_test.py | {
"start": 2187,
"end": 11799
} | class ____(testing.TestCase):
@parameterized.named_parameters(named_product(module_name=OPS_MODULES))
def test_class_function_consistency(self, module_name):
ops_module = getattr(ops, module_name)
if module_name in ("core", "math"):
# `core` and `math` are not exported as their own module.
api_ops_module = None
else:
api_ops_module = getattr(api_ops_root, module_name)
for op_function, op_class in op_functions_and_classes(ops_module):
name = op_function.__name__
# ==== Check exports ====
# - op should be exported as e.g. `keras.ops.numpy.sum`
# - op should also be exported as e.g. `keras.ops.sum`
if module_name != "image":
# `image` ops are not exported at the top-level.
self.assertIsNotNone(
getattr(api_ops_root, name, None),
f"Not exported as `keras.ops.{name}`",
)
if api_ops_module is not None:
# `core` and `math` are not exported as their own module.
self.assertIsNotNone(
getattr(api_ops_module, name, None),
f"Not exported as `keras.ops.{module_name}.{name}`",
)
# ==== Check handling of name in __init__ ====
# - op class `__init__` should have a `name` parameter at the end,
# which should be keyword only and with a default value of `None`
# - op class `__init__` should call `super().__init__(name=name)`
if op_class.__init__ is Operation.__init__:
# `name` is not keyword only in `Operation`, use this instead.
class_init_signature = inspect.Signature(
[SELF_PARAMETER, NAME_PARAMETER]
)
else:
class_init_signature = inspect.signature(op_class.__init__)
# Check call to super.
self.assertContainsSubsequence(
inspect.getsource(op_class.__init__),
"super().__init__(name=name)",
f"`{op_class.__name__}.__init__` is not calling "
"`super().__init__(name=name)`",
)
static_parameters = list(class_init_signature.parameters.values())
# Remove `self`.
static_parameters = static_parameters[1:]
name_index = -1
if static_parameters[-1].kind == inspect.Parameter.VAR_KEYWORD:
# When there is a `**kwargs`, `name` appears before.
name_index = -2
# Verify `name` parameter is as expected.
self.assertEqual(
static_parameters[name_index],
NAME_PARAMETER,
f"The last parameter of `{op_class.__name__}.__init__` "
"should be `name`, should be a keyword only, and should "
"have a default value of `None`",
)
# Remove `name`, it's not part of the op signature.
static_parameters.pop(name_index)
# ==== Check static parameters ====
# Static parameters are declared in the class' `__init__`.
# Dynamic parameters are declared in the class' `call` method.
# - they should all appear in the op signature with the same name
# - they should have the same default value
# - they should appear in the same order and usually with the
# dynamic parameters first, and the static parameters last.
dynamic_parameters = list(
inspect.signature(op_class.call).parameters.values()
)[1:] # Remove self
op_signature = inspect.signature(op_function)
for p in dynamic_parameters + static_parameters:
# Check the same name appears in the op signature
self.assertIn(
p.name,
op_signature.parameters,
f"Op function `{name}` is missing a parameter that is in "
f"op class `{op_class.__name__}`",
)
# Check default values are the same
self.assertEqual(
p.default,
op_signature.parameters[p.name].default,
f"Default mismatch for parameter `{p.name}` between op "
f"function `{name}` and op class `{op_class.__name__}`",
)
dynamic_parameter_names = [p.name for p in dynamic_parameters]
static_parameter_names = [p.name for p in static_parameters]
# Check for obvious mistakes in parameters that were made dynamic
# but should be static.
for p in dynamic_parameters:
self.assertNotIn(
p.name,
STATIC_PARAMETER_NAMES,
f"`{p.name}` should not be a dynamic parameter in op class "
f"`{op_class.__name__}` based on its name.",
)
self.assertNotIsInstance(
p.default,
(bool, str),
f"`{p.name}` should not be a dynamic parameter in op class "
f"`{op_class.__name__}` based on default `{p.default}`.",
)
# Check order of parameters.
if name in (
"fori_loop",
"vectorized_map",
"while_loop",
"batch_normalization",
"dot_product_attention",
"average",
"einsum",
"full",
"pad",
):
# Loose case:
# order of of parameters is preserved but they are interspersed.
op_dynamic_parameter_names = [
name
for name in op_signature.parameters.keys()
if name in dynamic_parameter_names
]
self.assertEqual(
op_dynamic_parameter_names,
dynamic_parameter_names,
"Inconsistent dynamic parameter order for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
op_static_parameter_names = [
name
for name in op_signature.parameters.keys()
if name in static_parameter_names
]
self.assertEqual(
op_static_parameter_names,
static_parameter_names,
"Inconsistent static parameter order for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
else:
# Strict case:
# dynamic parameters first and static parameters at the end.
self.assertEqual(
list(op_signature.parameters.keys()),
dynamic_parameter_names + static_parameter_names,
"Inconsistent static parameter position for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
# ==== Check compute_output_spec is implement ====
# - op class should override Operation's `compute_output_spec`
self.assertTrue(
hasattr(op_class, "compute_output_spec")
and op_class.compute_output_spec
is not Operation.compute_output_spec,
f"Op class `{op_class.__name__}` should override "
"`compute_output_spec`",
)
@parameterized.named_parameters(named_product(module_name=OPS_MODULES))
def test_backend_consistency(self, module_name):
ops_module = getattr(ops, module_name)
backend_ops_module = getattr(backend, module_name)
for op_function, _ in op_functions_and_classes(ops_module):
name = op_function.__name__
if hasattr(ops_module, f"_{name}"):
# For an op function `foo`, if there is a function named `_foo`,
# that means we have a backend independent implementation.
continue
if name in ("view_as_complex", "view_as_real", "get_item"):
# These ops have an inlined backend independent implementation.
continue
# ==== Check backend implementation ====
# - op should have an implementation in every backend
# - op implementation should have the same signature (same
# parameters, same order, same defaults)
backend_op_function = getattr(backend_ops_module, name, None)
if backend.backend() == "openvino" and backend_op_function is None:
# Openvino is still missing a number of ops.
continue
self.assertIsNotNone(backend_op_function, f"Missing op `{name}`")
if name == "multi_hot":
# multi_hot has code to massage the input parameters before
# calling the backend implementation, so the signature is
# different on purpose.
continue
# Signature should match in every way.
self.assertEqual(
inspect.signature(backend_op_function),
inspect.signature(op_function),
f"Signature mismatch for `{name}`",
)
| OperationTest |
python | numpy__numpy | numpy/polynomial/tests/test_printing.py | {
"start": 7442,
"end": 12308
} | class ____:
@pytest.fixture(scope='class', autouse=True)
def use_ascii(self):
poly.set_default_printstyle('ascii')
def test_single_line_one_less(self):
# With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74)
p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 123])
assert_equal(len(str(p)), 74)
assert_equal(str(p), (
'12345678.0 + 12345678.0 x + 12345678.0 x**2 + '
'12345678.0 x**3 + 123.0 x**4'
))
def test_num_chars_is_linewidth(self):
# len(str(p)) == default linewidth == 75
p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 1234])
assert_equal(len(str(p)), 75)
assert_equal(str(p), (
'12345678.0 + 12345678.0 x + 12345678.0 x**2 + '
'12345678.0 x**3 +\n1234.0 x**4'
))
def test_first_linebreak_multiline_one_less_than_linewidth(self):
# Multiline str where len(first_line) + len(next_term) == lw - 1 == 74
p = poly.Polynomial(
[12345678, 12345678, 12345678, 12345678, 1, 12345678]
)
assert_equal(len(str(p).split('\n')[0]), 74)
assert_equal(str(p), (
'12345678.0 + 12345678.0 x + 12345678.0 x**2 + '
'12345678.0 x**3 + 1.0 x**4 +\n12345678.0 x**5'
))
def test_first_linebreak_multiline_on_linewidth(self):
# First line is one character longer than previous test
p = poly.Polynomial(
[12345678, 12345678, 12345678, 12345678.12, 1, 12345678]
)
assert_equal(str(p), (
'12345678.0 + 12345678.0 x + 12345678.0 x**2 + '
'12345678.12 x**3 +\n1.0 x**4 + 12345678.0 x**5'
))
@pytest.mark.parametrize(('lw', 'tgt'), (
(75, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + '
'500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + '
'900.0 x**9')),
(45, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 +\n40000.0 x**4 + '
'500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 +\n'
'900.0 x**9')),
(132, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + '
'500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + '
'900.0 x**9')),
))
def test_linewidth_printoption(self, lw, tgt):
p = poly.Polynomial(
[0, 10, 200, 3000, 40000, 500000, 600000, 70000, 8000, 900]
)
with printoptions(linewidth=lw):
assert_equal(str(p), tgt)
for line in str(p).split('\n'):
assert_(len(line) < lw)
@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state")
def test_set_default_printoptions():
p = poly.Polynomial([1, 2, 3])
c = poly.Chebyshev([1, 2, 3])
poly.set_default_printstyle('ascii')
assert_equal(str(p), "1.0 + 2.0 x + 3.0 x**2")
assert_equal(str(c), "1.0 + 2.0 T_1(x) + 3.0 T_2(x)")
poly.set_default_printstyle('unicode')
assert_equal(str(p), "1.0 + 2.0·x + 3.0·x²")
assert_equal(str(c), "1.0 + 2.0·T₁(x) + 3.0·T₂(x)")
with pytest.raises(ValueError):
poly.set_default_printstyle('invalid_input')
@pytest.mark.thread_unsafe(reason="set_default_printstyle() is global state")
def test_complex_coefficients():
"""Test both numpy and built-in complex."""
coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j]
# numpy complex
p1 = poly.Polynomial(coefs)
# Python complex
p2 = poly.Polynomial(array(coefs, dtype=object))
poly.set_default_printstyle('unicode')
assert_equal(str(p1), "1j + (1+1j)·x - (2-2j)·x² + (3+0j)·x³")
assert_equal(str(p2), "1j + (1+1j)·x + (-2+2j)·x² + (3+0j)·x³")
poly.set_default_printstyle('ascii')
assert_equal(str(p1), "1j + (1+1j) x - (2-2j) x**2 + (3+0j) x**3")
assert_equal(str(p2), "1j + (1+1j) x + (-2+2j) x**2 + (3+0j) x**3")
@pytest.mark.parametrize(('coefs', 'tgt'), (
(array([Fraction(1, 2), Fraction(3, 4)], dtype=object), (
"1/2 + 3/4·x"
)),
(array([1, 2, Fraction(5, 7)], dtype=object), (
"1 + 2·x + 5/7·x²"
)),
(array([Decimal('1.00'), Decimal('2.2'), 3], dtype=object), (
"1.00 + 2.2·x + 3·x²"
)),
))
def test_numeric_object_coefficients(coefs, tgt):
p = poly.Polynomial(coefs)
poly.set_default_printstyle('unicode')
assert_equal(str(p), tgt)
@pytest.mark.parametrize(('coefs', 'tgt'), (
(array([1, 2, 'f'], dtype=object), '1 + 2·x + f·x²'),
(array([1, 2, [3, 4]], dtype=object), '1 + 2·x + [3, 4]·x²'),
))
def test_nonnumeric_object_coefficients(coefs, tgt):
"""
Test coef fallback for object arrays of non-numeric coefficients.
"""
p = poly.Polynomial(coefs)
poly.set_default_printstyle('unicode')
assert_equal(str(p), tgt)
| TestLinebreaking |
python | coleifer__peewee | tests/reflection.py | {
"start": 406,
"end": 894
} | class ____(TestModel):
f1 = BigIntegerField(index=True)
f2 = BlobField()
f3 = BooleanField()
f4 = CharField(max_length=50)
f5 = DateField()
f6 = DateTimeField()
f7 = DecimalField()
f8 = DoubleField()
f9 = FloatField()
f10 = IntegerField(unique=True)
f11 = AutoField()
f12 = TextField()
f13 = TimeField()
class Meta:
indexes = (
(('f10', 'f11'), True),
(('f11', 'f8', 'f13'), False),
)
| ColTypes |
python | django__django | tests/migrations/test_multidb.py | {
"start": 698,
"end": 893
} | class ____:
"""
A router that allows migrating depending on a hint.
"""
def allow_migrate(self, db, app_label, **hints):
return hints.get("foo", False)
| MigrateWhenFooRouter |
python | tensorflow__tensorflow | tensorflow/python/eager/forwardprop_test.py | {
"start": 37719,
"end": 38816
} | class ____(test.TestCase, parameterized.TestCase):
def testHessian1D(self):
# Note: stolen from ops/gradients_test.py
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
x_value = rng.randn(m).astype("float32")
hess_value = mat_value + mat_value.T
mat = variables.Variable(mat_value)
def _f(x):
return math_ops.reduce_sum(x[:, None] * mat * x[None, :])
hessian_eager, = _forward_over_back_hessian(
_f, [constant_op.constant(x_value)],
use_pfor=False,
dtype=[dtypes.float32])
self.assertAllClose(hess_value, hessian_eager)
hessian_function, = def_function.function(_forward_over_back_hessian)(
_f, [constant_op.constant(x_value)],
use_pfor=False,
dtype=[dtypes.float32])
self.assertAllClose(hess_value, hessian_function)
hessian_pfor, = def_function.function(_forward_over_back_hessian)(
_f, [constant_op.constant(x_value)],
use_pfor=True,
dtype=[dtypes.float32])
self.assertAllClose(hess_value, hessian_pfor)
| HessianTests |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 185340,
"end": 186156
} | class ____(TemplateBuffer):
def __init__(
self,
layout: Layout,
inputs: Sequence[IRNode],
make_kernel_render: Callable[_P, _T],
workspace_size: int,
template: CUDATemplate,
supports_epilogue_fusion: bool,
) -> None:
super().__init__(layout, inputs, make_kernel_render)
# Global memory (in bytes) needed for this template.
self.workspace_size = workspace_size
self.template = template
self.supports_epilogue_fusion = supports_epilogue_fusion
def get_workspace_size(self) -> int:
return self.workspace_size if self.workspace_size is not None else 0
def emulate_store_fn(self) -> None:
for output in self.get_outputs():
ops.store(output.get_name(), None, None)
| CUDATemplateBuffer |
python | tensorflow__tensorflow | tensorflow/compiler/tests/data_format_ops_test.py | {
"start": 3211,
"end": 5843
} | class ____(xla_test.XLATestCase):
def _runPermuteAndCompare(self, x, src_format, dst_format, expected):
with self.session() as session:
with self.test_scope():
placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape)
param = {placeholder: x}
output = nn_ops.data_format_vec_permute(
placeholder, src_format=src_format, dst_format=dst_format)
result = session.run(output, param)
self.assertAllEqual(result, expected)
def testNHWCToNCHW(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW", [7, 3, 4, 9])
def testNHWCToNCHW_Size2(self):
for dtype in {np.int32, np.int64}:
x = np.array([4, 9], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW", [4, 9])
def testNCHWToNHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC", [7, 9, 3, 4])
def testNCHWToNHWC_Size2(self):
for dtype in {np.int32, np.int64}:
x = np.array([9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC", [9, 3])
def testNHWCToHWNC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "HWNC", [4, 9, 7, 3])
def testHWNCToNHWC(self):
for dtype in {np.int32, np.int64}:
x = np.array([7, 4, 9, 3], dtype=dtype)
self._runPermuteAndCompare(x, "HWNC", "NHWC", [9, 7, 4, 3])
def testNHWCToNCHW2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "NCHW",
[[7, 4], [5, 1], [9, 3], [4, 5]])
def testNHWCToHWNC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NHWC", "HWNC",
[[9, 3], [4, 5], [7, 4], [5, 1]])
def testHWNCToNHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "HWNC", "NHWC",
[[4, 5], [7, 4], [9, 3], [5, 1]])
def testNCHWToNHWC2D(self):
for dtype in {np.int32, np.int64}:
x = np.array([[7, 4], [9, 3], [4, 5], [5, 1]], dtype=dtype)
self._runPermuteAndCompare(x, "NCHW", "NHWC",
[[7, 4], [4, 5], [5, 1], [9, 3]])
if __name__ == "__main__":
test.main()
| XlaPermuteOpTest |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 34883,
"end": 35460
} | class ____(ColorProperty):
"""A descriptor to set scrollbar color(s)."""
def __set__(self, obj: StylesBase, color: Color | str | None) -> None:
super().__set__(obj, color)
if obj.node is None:
return
from textual.widget import Widget
if isinstance(obj.node, Widget):
widget = obj.node
if widget.show_horizontal_scrollbar:
widget.horizontal_scrollbar.refresh()
if widget.show_vertical_scrollbar:
widget.vertical_scrollbar.refresh()
| ScrollbarColorProperty |
python | pypa__warehouse | warehouse/rate_limiting/__init__.py | {
"start": 3481,
"end": 3731
} | class ____:
def test(self, *identifiers):
return True
def hit(self, *identifiers):
return True
def clear(self, *identifiers):
return None
def resets_in(self, *identifiers):
return None
| DummyRateLimiter |
python | getsentry__sentry | src/sentry/workflow_engine/types.py | {
"start": 6255,
"end": 8676
} | class ____:
"""
This is the result of `process_workflows`, and is used to
encapsulate different stages of completion for the method.
The `tainted` flag is used to indicate whether or not actions
have been triggered during the workflows evaluation.
The `msg` field is used for debug information during the evaluation.
The `data` attribute will include all the data used to evaluate the
workflows, and determine if an action should be triggered.
"""
tainted: bool
data: WorkflowEvaluationData
msg: str | None = None
def to_log(self, logger: Logger) -> None:
"""
Determines how far in the process the evaluation got to
and creates a structured log string to quickly find.
Then this will return the that log string, and the
relevant processing data to be logged.
"""
log_str = "workflow_engine.process_workflows.evaluation"
if self.tainted:
if self.data.triggered_workflows is None:
log_str = f"{log_str}.workflows.not_triggered"
else:
log_str = f"{log_str}.workflows.triggered"
else:
log_str = f"{log_str}.actions.triggered"
data_snapshot = self.data.get_snapshot()
detection_type = (
data_snapshot["associated_detector"]["type"]
if data_snapshot["associated_detector"]
else None
)
group_id = data_snapshot["group"].id if data_snapshot["group"] else None
triggered_workflows = data_snapshot["triggered_workflows"] or []
action_filter_conditions = data_snapshot["action_filter_conditions"] or []
triggered_actions = data_snapshot["triggered_actions"] or []
logger.info(
log_str,
extra={
"event_id": data_snapshot["event_id"],
"group_id": group_id,
"detection_type": detection_type,
"workflow_ids": data_snapshot["workflow_ids"],
"triggered_workflow_ids": [w["id"] for w in triggered_workflows],
"delayed_conditions": data_snapshot["delayed_conditions"],
"action_filter_group_ids": [afg["id"] for afg in action_filter_conditions],
"triggered_action_ids": [a["id"] for a in triggered_actions],
"debug_msg": self.msg,
},
)
| WorkflowEvaluation |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 8593,
"end": 8688
} | class ____(UUIDPlainB):
field3 = models.CharField(max_length=30)
# base -> proxy
| UUIDPlainC |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 8180,
"end": 11064
} | class ____(Protocol):
def _generate(self) -> Self: ...
def _generative(fn: _Fn) -> _Fn:
"""non-caching _generative() decorator.
This is basically the legacy decorator that copies the object and
runs a method on the new copy.
"""
@util.decorator
def _generative(
fn: _Fn, self: _SelfGenerativeType, *args: Any, **kw: Any
) -> _SelfGenerativeType:
"""Mark a method as generative."""
self = self._generate()
x = fn(self, *args, **kw)
assert x is self, "generative methods must return self"
return self
decorated = _generative(fn)
decorated.non_generative = fn # type: ignore
return decorated
def _exclusive_against(*names: str, **kw: Any) -> Callable[[_Fn], _Fn]:
msgs: Dict[str, str] = kw.pop("msgs", {})
defaults: Dict[str, str] = kw.pop("defaults", {})
getters: List[Tuple[str, operator.attrgetter[Any], Optional[str]]] = [
(name, operator.attrgetter(name), defaults.get(name, None))
for name in names
]
@util.decorator
def check(fn: _Fn, *args: Any, **kw: Any) -> Any:
# make pylance happy by not including "self" in the argument
# list
self = args[0]
args = args[1:]
for name, getter, default_ in getters:
if getter(self) is not default_:
msg = msgs.get(
name,
"Method %s() has already been invoked on this %s construct"
% (fn.__name__, self.__class__),
)
raise exc.InvalidRequestError(msg)
return fn(self, *args, **kw)
return check
def _clone(element, **kw):
return element._clone(**kw)
def _expand_cloned(
elements: Iterable[_CLE],
) -> Iterable[_CLE]:
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
# TODO: cython candidate
return itertools.chain(*[x._cloned_set for x in elements])
def _de_clone(
elements: Iterable[_CLE],
) -> Iterable[_CLE]:
for x in elements:
while x._is_clone_of is not None:
x = x._is_clone_of
yield x
def _cloned_intersection(a: Iterable[_CLE], b: Iterable[_CLE]) -> Set[_CLE]:
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap: Set[_CLE] = set(_expand_cloned(a)).intersection(
_expand_cloned(b)
)
return {elem for elem in a if all_overlap.intersection(elem._cloned_set)}
def _cloned_difference(a: Iterable[_CLE], b: Iterable[_CLE]) -> Set[_CLE]:
all_overlap: Set[_CLE] = set(_expand_cloned(a)).intersection(
_expand_cloned(b)
)
return {
elem for elem in a if not all_overlap.intersection(elem._cloned_set)
}
| _GenerativeType |
python | ray-project__ray | python/ray/train/_internal/storage.py | {
"start": 12014,
"end": 27646
} | class ____:
"""Shared context that holds the source of truth for all paths and
storage utilities, passed along from the driver to workers.
This object defines a few types of paths:
1. *_fs_path: A path on the `storage_filesystem`. This is a regular path
which has been prefix-stripped by pyarrow.fs.FileSystem.from_uri and
can be joined with `Path(...).as_posix()`.
2. *_driver_staging_path: The temporary staging directory on the local filesystem
where driver artifacts are saved to before persisting them to storage.
3. trial_working_directory: The local filesystem path that the remote
actors' working directories are moved to by default.
This is separated from the driver staging path so that driver syncing
does not implicitly upload the trial working directory, for trials on the
driver node.
Example with storage_path="mock:///bucket/path?param=1":
>>> import ray
>>> from ray.train._internal.storage import StorageContext
>>> import os
>>> _ = ray.init()
>>> storage = StorageContext(
... storage_path="mock://netloc/bucket/path?param=1",
... experiment_dir_name="exp_name",
... )
>>> storage.storage_filesystem # Auto-resolved # doctest: +ELLIPSIS
<pyarrow._fs._MockFileSystem object...
>>> storage.experiment_fs_path
'bucket/path/exp_name'
>>> storage.experiment_driver_staging_path # doctest: +ELLIPSIS
'/tmp/ray/session_.../artifacts/.../exp_name/driver_artifacts'
>>> storage.trial_dir_name = "trial_dir"
>>> storage.trial_fs_path
'bucket/path/exp_name/trial_dir'
>>> storage.trial_driver_staging_path # doctest: +ELLIPSIS
'/tmp/ray/session_.../artifacts/.../exp_name/driver_artifacts/trial_dir'
>>> storage.trial_working_directory # doctest: +ELLIPSIS
'/tmp/ray/session_.../artifacts/.../exp_name/working_dirs/trial_dir'
>>> storage.current_checkpoint_index = 1
>>> storage.checkpoint_fs_path
'bucket/path/exp_name/trial_dir/checkpoint_000001'
>>> ray.shutdown()
Example with storage_path="/tmp/ray_results":
>>> from ray.train._internal.storage import StorageContext
>>> storage = StorageContext(
... storage_path="/tmp/ray_results",
... experiment_dir_name="exp_name",
... )
>>> storage.storage_fs_path
'/tmp/ray_results'
>>> storage.experiment_fs_path
'/tmp/ray_results/exp_name'
>>> storage.storage_filesystem # Auto-resolved # doctest: +ELLIPSIS
<pyarrow._fs.LocalFileSystem object...
Internal Usage Examples:
- To copy files to the trial directory on the storage filesystem:
pyarrow.fs.copy_files(
local_dir,
Path(storage.trial_fs_path, "subdir").as_posix(),
destination_filesystem=storage.filesystem
)
.. warning::
This is an experimental developer API and is subject to change
without notice between versions.
"""
def __init__(
self,
storage_path: Union[str, os.PathLike],
experiment_dir_name: str,
sync_config: Optional[SyncConfig] = None,
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
trial_dir_name: Optional[str] = None,
current_checkpoint_index: int = -1,
):
from ray.tune.utils import date_str
self.custom_fs_provided = storage_filesystem is not None
# Invariant: (`storage_filesystem`, `storage_path`) is the location where
# *all* results can be accessed.
self.experiment_dir_name = experiment_dir_name
self.trial_dir_name = trial_dir_name
self.current_checkpoint_index = current_checkpoint_index
self.sync_config = sync_config or SyncConfig()
self.storage_filesystem, self.storage_fs_path = get_fs_and_path(
storage_path, storage_filesystem
)
self.storage_fs_path = Path(self.storage_fs_path).as_posix()
self.syncer: Syncer = _FilesystemSyncer(
storage_filesystem=self.storage_filesystem,
sync_period=self.sync_config.sync_period,
sync_timeout=self.sync_config.sync_timeout,
)
self._create_validation_file()
self._check_validation_file()
# Timestamp is used to create a unique session directory for the current
# training job. This is used to avoid conflicts when multiple training jobs
# run with the same name in the same cluster.
# This is set ONCE at the creation of the storage context, on the driver.
self._timestamp = date_str()
def __str__(self):
return (
"StorageContext<\n"
f" storage_filesystem='{self.storage_filesystem.type_name}',\n"
f" storage_fs_path='{self.storage_fs_path}',\n"
f" experiment_dir_name='{self.experiment_dir_name}',\n"
f" trial_dir_name='{self.trial_dir_name}',\n"
f" current_checkpoint_index={self.current_checkpoint_index},\n"
">"
)
def _create_validation_file(self):
"""On the creation of a storage context, create a validation file at the
storage path to verify that the storage path can be written to.
This validation file is also used to check whether the storage path is
accessible by all nodes in the cluster."""
valid_file = Path(
self.experiment_fs_path, _VALIDATE_STORAGE_MARKER_FILENAME
).as_posix()
self.storage_filesystem.create_dir(self.experiment_fs_path)
with self.storage_filesystem.open_output_stream(valid_file):
pass
def _check_validation_file(self):
"""Checks that the validation file exists at the storage path."""
valid_file = Path(
self.experiment_fs_path, _VALIDATE_STORAGE_MARKER_FILENAME
).as_posix()
if not _exists_at_fs_path(fs=self.storage_filesystem, fs_path=valid_file):
raise RuntimeError(
f"Unable to set up cluster storage with the following settings:\n{self}"
"\nCheck that all nodes in the cluster have read/write access "
"to the configured storage path. `RunConfig(storage_path)` should be "
"set to a cloud storage URI or a shared filesystem path accessible "
"by all nodes in your cluster ('s3://bucket' or '/mnt/nfs'). "
"A local path on the head node is not accessible by worker nodes. "
"See: https://docs.ray.io/en/latest/train/user-guides/persistent-storage.html" # noqa: E501
)
def _update_checkpoint_index(self, metrics: Dict):
# Per default, increase by 1. This can be overwritten to customize checkpoint
# directories.
self.current_checkpoint_index += 1
def persist_current_checkpoint(self, checkpoint: "Checkpoint") -> "Checkpoint":
"""Persists a given checkpoint to the current checkpoint path on the filesystem.
"Current" is defined by the `current_checkpoint_index` attribute of the
storage context.
This method copies the checkpoint files to the storage location.
It's up to the user to delete the original checkpoint files if desired.
For example, the original directory is typically a local temp directory.
Args:
checkpoint: The checkpoint to persist to (fs, checkpoint_fs_path).
Returns:
Checkpoint: A Checkpoint pointing to the persisted checkpoint location.
"""
# TODO(justinvyu): Fix this cyclical import.
logger.debug(
"Copying checkpoint files to storage path:\n"
"({source_fs}, {source}) -> ({dest_fs}, {destination})".format(
source=checkpoint.path,
destination=self.checkpoint_fs_path,
source_fs=checkpoint.filesystem,
dest_fs=self.storage_filesystem,
)
)
# Raise an error if the storage path is not accessible when
# attempting to upload a checkpoint from a remote worker.
# Ex: If storage_path is a local path, then a validation marker
# will only exist on the head node but not the worker nodes.
self._check_validation_file()
self.storage_filesystem.create_dir(self.checkpoint_fs_path)
_pyarrow_fs_copy_files(
source=checkpoint.path,
destination=self.checkpoint_fs_path,
source_filesystem=checkpoint.filesystem,
destination_filesystem=self.storage_filesystem,
)
persisted_checkpoint = checkpoint.__class__(
filesystem=self.storage_filesystem,
path=self.checkpoint_fs_path,
)
logger.info(f"Checkpoint successfully created at: {persisted_checkpoint}")
return persisted_checkpoint
def persist_artifacts(self, force: bool = False) -> None:
"""Persists all artifacts within `trial_local_dir` to storage.
This method possibly launches a background task to sync the trial dir,
depending on the `sync_period` + `sync_artifacts_on_checkpoint`
settings of `SyncConfig`.
`(local_fs, trial_working_dir) -> (storage_filesystem, trial_fs_path)`
Args:
force: If True, wait for a previous sync to finish, launch a new one,
and wait for that one to finish. By the end of a `force=True` call, the
latest version of the trial artifacts will be persisted.
"""
if not self.sync_config.sync_artifacts:
return
# Skip if there are no artifacts to sync
is_empty = not any(os.scandir(self.trial_working_directory))
if is_empty:
return
if force:
self.syncer.wait()
self.syncer.sync_up(
local_dir=self.trial_working_directory, remote_dir=self.trial_fs_path
)
self.syncer.wait()
else:
self.syncer.sync_up_if_needed(
local_dir=self.trial_working_directory, remote_dir=self.trial_fs_path
)
@property
def experiment_fs_path(self) -> str:
"""The path on the `storage_filesystem` to the experiment directory.
NOTE: This does not have a URI prefix anymore, since it has been stripped
by pyarrow.fs.FileSystem.from_uri already. The URI scheme information is
kept in `storage_filesystem` instead.
"""
return Path(self.storage_fs_path, self.experiment_dir_name).as_posix()
def _get_session_path(self) -> str:
"""The Ray Train/Tune session local directory used to stage files
before persisting to the storage filesystem."""
return Path(
_get_ray_train_session_dir(), self._timestamp, self.experiment_dir_name
).as_posix()
@property
def experiment_driver_staging_path(self) -> str:
"""The local filesystem path of the experiment directory on the driver node.
The driver is the node where `Trainer.fit`/`Tuner.fit` is being called.
This path is of the form:
`/tmp/ray/session_<session_id>/artifacts/<ray-train-job-timestamp>/
<experiment_dir_name>/driver_artifacts`
This should be used as the temporary staging location for files *on the driver*
before syncing them to `experiment_fs_path`.
For example, the search algorithm should dump its state to this directory.
See `trial_driver_staging_path` for writing trial-specific artifacts.
The directory is synced to
`{storage_path}/{experiment_dir_name}` periodically.
See `_ExperimentCheckpointManager.checkpoint` for where that happens.
"""
return Path(self._get_session_path(), "driver_artifacts").as_posix()
@property
def trial_fs_path(self) -> str:
"""The trial directory path on the `storage_filesystem`.
Raises a ValueError if `trial_dir_name` is not set beforehand.
"""
if self.trial_dir_name is None:
raise RuntimeError(
"Should not access `trial_fs_path` without setting `trial_dir_name`"
)
return Path(self.experiment_fs_path, self.trial_dir_name).as_posix()
@property
def trial_driver_staging_path(self) -> str:
"""The local filesystem path of the trial directory on the driver.
The driver is the node where `Trainer.fit`/`Tuner.fit` is being called.
This path is of the form:
`/tmp/ray/session_<session_id>/artifacts/<ray-train-job-timestamp>/
<experiment_dir_name>/driver_artifacts/<trial_dir_name>`
This should be used as the temporary location for files on the driver
before persisting them to `trial_fs_path`.
For example, callbacks (e.g., JsonLoggerCallback) should write trial-specific
logfiles within this directory.
"""
if self.trial_dir_name is None:
raise RuntimeError(
"Should not access `trial_driver_staging_path` "
"without setting `trial_dir_name`"
)
return Path(self.experiment_driver_staging_path, self.trial_dir_name).as_posix()
@property
def trial_working_directory(self) -> str:
"""The local filesystem path to trial working directory.
This path is of the form:
`/tmp/ray/session_<session_id>/artifacts/<ray-train-job-timestamp>/
<experiment_dir_name>/working_dirs/<trial_dir_name>`
Ray Train/Tune moves the remote actor's working directory to this path
by default, unless disabled by `RAY_CHDIR_TO_TRIAL_DIR` environment variable.
Writing files to this directory allows users to persist training artifacts
if `SyncConfig(sync_artifacts=True)` is set.
"""
if self.trial_dir_name is None:
raise RuntimeError(
"Cannot access `trial_working_directory` without "
"setting `trial_dir_name`"
)
return Path(
self._get_session_path(), "working_dirs", self.trial_dir_name
).as_posix()
@property
def checkpoint_fs_path(self) -> str:
"""The current checkpoint directory path on the `storage_filesystem`.
"Current" refers to the checkpoint that is currently being created/persisted.
The user of this class is responsible for setting the `current_checkpoint_index`
(e.g., incrementing when needed).
"""
return Path(self.trial_fs_path, self.checkpoint_dir_name).as_posix()
@property
def checkpoint_dir_name(self) -> str:
"""The current checkpoint directory name, based on the checkpoint index."""
return StorageContext._make_checkpoint_dir_name(self.current_checkpoint_index)
@staticmethod
def get_experiment_dir_name(run_obj: Union[str, Callable, Type]) -> str:
from ray.tune.experiment import Experiment
from ray.tune.utils import date_str
run_identifier = Experiment.get_trainable_name(run_obj)
if bool(int(os.environ.get("TUNE_DISABLE_DATED_SUBDIR", 0))):
dir_name = run_identifier
else:
dir_name = "{}_{}".format(run_identifier, date_str())
return dir_name
@staticmethod
def _make_checkpoint_dir_name(index: int):
"""Get the name of the checkpoint directory, given an index."""
return f"checkpoint_{index:06d}"
| StorageContext |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_stateful.py | {
"start": 7741,
"end": 9019
} | class ____(RuleBasedStateMachine):
a = Bundle("a")
def __init__(self):
super().__init__()
self.calls = set()
@rule(target=a, a=st.integers())
def add_a(self, a):
self.calls.add("add")
return a
@rule(v=a)
def f(self, v):
self.calls.add("f")
@precondition(lambda self: "add" in self.calls)
@rule(value=st.integers())
def unrelated(self, value):
self.calls.add("unrelated")
@rule()
def invariant(self):
# force all three calls to be made in a particular order (with the
# `unrelated` precondition) so we always shrink to a particular counterexample.
assert len(self.calls) != 3
def test_unrelated_rule_does_not_use_var_reference_repr():
# we are specifically looking for state.unrelated(value=0) not being replaced
# with state.unrelated(value=a_0). The `unrelated` rule is drawing from
# st.integers, not a bundle, so the values should not be conflated even if
# they're both 0.
assert_runs_to_output(
UnrelatedCall,
"""
state = UnrelatedCall()
a_0 = state.add_a(a=0)
state.f(v=a_0)
state.unrelated(value=0)
state.invariant()
state.teardown()
""",
)
| UnrelatedCall |
python | openai__openai-python | tests/api_resources/test_videos.py | {
"start": 10519,
"end": 21673
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.create(
prompt="x",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.create(
prompt="x",
input_reference=b"raw file contents",
model="sora-2",
seconds="4",
size="720x1280",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.create(
prompt="x",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.create(
prompt="x",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.retrieve(
"video_123",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.retrieve(
"video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.retrieve(
"video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
await async_client.videos.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.list()
assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.list(
after="after",
limit=0,
order="asc",
)
assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(AsyncConversationCursorPage[Video], video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.delete(
"video_123",
)
assert_matches_type(VideoDeleteResponse, video, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.delete(
"video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(VideoDeleteResponse, video, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.delete(
"video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(VideoDeleteResponse, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
await async_client.videos.with_raw_response.delete(
"",
)
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
video = await async_client.videos.download_content(
video_id="video_123",
)
assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
assert video.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_download_content_with_all_params(
self, async_client: AsyncOpenAI, respx_mock: MockRouter
) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
video = await async_client.videos.download_content(
video_id="video_123",
variant="video",
)
assert isinstance(video, _legacy_response.HttpxBinaryResponseContent)
assert video.json() == {"foo": "bar"}
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await async_client.videos.with_raw_response.download_content(
video_id="video_123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(_legacy_response.HttpxBinaryResponseContent, video, path=["response"])
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_streaming_response_download_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None:
respx_mock.get("/videos/video_123/content").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
async with async_client.videos.with_streaming_response.download_content(
video_id="video_123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(bytes, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_path_params_download_content(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
await async_client.videos.with_raw_response.download_content(
video_id="",
)
@parametrize
async def test_method_remix(self, async_client: AsyncOpenAI) -> None:
video = await async_client.videos.remix(
video_id="video_123",
prompt="x",
)
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_raw_response_remix(self, async_client: AsyncOpenAI) -> None:
response = await async_client.videos.with_raw_response.remix(
video_id="video_123",
prompt="x",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = response.parse()
assert_matches_type(Video, video, path=["response"])
@parametrize
async def test_streaming_response_remix(self, async_client: AsyncOpenAI) -> None:
async with async_client.videos.with_streaming_response.remix(
video_id="video_123",
prompt="x",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
video = await response.parse()
assert_matches_type(Video, video, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_remix(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `video_id` but received ''"):
await async_client.videos.with_raw_response.remix(
video_id="",
prompt="x",
)
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_create_and_poll_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
assert_signatures_in_sync(
checking_client.videos.create,
checking_client.videos.create_and_poll,
exclude_params={"extra_headers", "extra_query", "extra_body", "timeout"},
)
| TestAsyncVideos |
python | getsentry__sentry | src/sentry/models/artifactbundle.py | {
"start": 1617,
"end": 4627
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField(db_index=True)
# We use 00000000-00000000-00000000-00000000 in place of NULL because the uniqueness constraint doesn't play well
# with nullable fields, since NULL != NULL.
bundle_id = models.UUIDField(default=NULL_UUID, db_index=True)
file = FlexibleForeignKey("sentry.File")
artifact_count = BoundedPositiveIntegerField()
indexing_state = models.IntegerField(
default=None, null=True, choices=ArtifactBundleIndexingState.choices()
)
# This field represents the date in which the bundle was renewed, since we have a renewal mechanism in place. The
# name is the same across entities connected to this bundle named *ArtifactBundle.
date_added = models.DateTimeField(default=timezone.now, db_index=True)
# This field represents the date of upload of this bundle, and it's not mutated afterward.
date_uploaded = models.DateTimeField(default=timezone.now)
# This field represents the date in which this bundle was last modified, where modification means that an
# association has been added or any of its fields have been modified.
date_last_modified = models.DateTimeField(null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_artifactbundle"
@classmethod
def get_release_associations(
cls, organization_id: int, artifact_bundle: ArtifactBundle
) -> list[Mapping[str, str | None]]:
# We sort by id, since it's the best (already existing) field to define total order of
# release associations that is somehow consistent with upload sequence.
release_artifact_bundles = ReleaseArtifactBundle.objects.filter(
organization_id=organization_id, artifact_bundle=artifact_bundle
).order_by("-id")
return [
{
"release": release_artifact_bundle.release_name,
"dist": release_artifact_bundle.dist_name or None,
}
for release_artifact_bundle in release_artifact_bundles
]
@classmethod
def get_ident(cls, url, dist=None):
if dist is not None:
return sha1_text(url + "\x00\x00" + dist).hexdigest()
return sha1_text(url).hexdigest()
def delete_file_for_artifact_bundle(instance, **kwargs):
from sentry.models.files import File
from sentry.tasks.assemble import AssembleTask, delete_assemble_status
checksum = None
try:
checksum = instance.file.checksum
except File.DoesNotExist:
pass
else:
if instance.organization_id is not None and checksum is not None:
delete_assemble_status(
AssembleTask.ARTIFACT_BUNDLE,
instance.organization_id,
checksum,
)
finally:
instance.file.delete()
post_delete.connect(delete_file_for_artifact_bundle, sender=ArtifactBundle)
@region_silo_model
| ArtifactBundle |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/sqlite/pysqlite.py | {
"start": 23750,
"end": 27153
} | class ____(SQLiteDialect_pysqlite):
"""numeric dialect for testing only
internal use only. This dialect is **NOT** supported by SQLAlchemy
and may change at any time.
"""
supports_statement_cache = True
default_paramstyle = "numeric"
driver = "pysqlite_numeric"
_first_bind = ":1"
_not_in_statement_regexp: Optional[Pattern[str]] = None
def __init__(self, *arg: Any, **kw: Any) -> None:
kw.setdefault("paramstyle", "numeric")
super().__init__(*arg, **kw)
def create_connect_args(self, url: URL) -> ConnectArgsType:
arg, opts = super().create_connect_args(url)
opts["factory"] = self._fix_sqlite_issue_99953()
return arg, opts
def _fix_sqlite_issue_99953(self) -> Any:
import sqlite3
first_bind = self._first_bind
if self._not_in_statement_regexp:
nis = self._not_in_statement_regexp
def _test_sql(sql: str) -> None:
m = nis.search(sql)
assert not m, f"Found {nis.pattern!r} in {sql!r}"
else:
def _test_sql(sql: str) -> None:
pass
def _numeric_param_as_dict(
parameters: Any,
) -> Union[dict[str, Any], tuple[Any, ...]]:
if parameters:
assert isinstance(parameters, tuple)
return {
str(idx): value for idx, value in enumerate(parameters, 1)
}
else:
return ()
class SQLiteFix99953Cursor(sqlite3.Cursor):
def execute(self, sql: str, parameters: Any = ()) -> Self:
_test_sql(sql)
if first_bind in sql:
parameters = _numeric_param_as_dict(parameters)
return super().execute(sql, parameters)
def executemany(self, sql: str, parameters: Any) -> Self:
_test_sql(sql)
if first_bind in sql:
parameters = [
_numeric_param_as_dict(p) for p in parameters
]
return super().executemany(sql, parameters)
class SQLiteFix99953Connection(sqlite3.Connection):
_CursorT = TypeVar("_CursorT", bound=sqlite3.Cursor)
def cursor(
self,
factory: Optional[
Callable[[sqlite3.Connection], _CursorT]
] = None,
) -> _CursorT:
if factory is None:
factory = SQLiteFix99953Cursor # type: ignore[assignment]
return super().cursor(factory=factory) # type: ignore[return-value] # noqa[E501]
def execute(
self, sql: str, parameters: Any = ()
) -> sqlite3.Cursor:
_test_sql(sql)
if first_bind in sql:
parameters = _numeric_param_as_dict(parameters)
return super().execute(sql, parameters)
def executemany(self, sql: str, parameters: Any) -> sqlite3.Cursor:
_test_sql(sql)
if first_bind in sql:
parameters = [
_numeric_param_as_dict(p) for p in parameters
]
return super().executemany(sql, parameters)
return SQLiteFix99953Connection
| _SQLiteDialect_pysqlite_numeric |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 95567,
"end": 110521
} | class ____:
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
# Stores CUDA memory data provided by PyTorch's caching allocator and
# the CUDA driver.
#
# NOTE: The undocumented torch.cuda.mem_get_info() returns
# (#free bytes, #total bytes available) on the GPU
def __enter__(self):
self.caching_allocator_befores = []
self.driver_befores = []
# Performs a gc if required (required if any CUDA memory is held)
num_devices = torch.cuda.device_count()
for i in range(num_devices):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
# NOTE: gc is based exclusively on caching allocator memory
# because the driver will always have some bytes in use (context size?)
if caching_allocator_mem_allocated > 0:
gc.collect()
torch._C._cuda_clearCublasWorkspaces()
torch.cuda.empty_cache()
break
# Acquires caching allocator and driver statistics before the test is run
for i in range(num_devices):
self.caching_allocator_befores.append(torch.cuda.memory_allocated(i))
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
self.driver_befores.append(driver_mem_allocated)
def __exit__(self, exc_type, exc_value, traceback):
# Don't check for leaks if an exception was thrown
if exc_type is not None:
return
# Compares caching allocator before/after statistics
# An increase in allocated memory is a discrepancy indicating a possible
# memory leak
discrepancy_detected = False
num_devices = torch.cuda.device_count()
for i in range(num_devices):
# avoid counting cublasWorkspace allocations
torch._C._cuda_clearCublasWorkspaces()
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
discrepancy_detected = True
break
# Short-circuits if no discrepancy detected
if not discrepancy_detected:
return
# Validates the discrepancy persists after garbage collection and
# is confirmed by the driver API
# NOTE: driver API iscrepancies alone are ignored because with the jiterator
# some tests may permanently increase the CUDA context size and
# that will appear as a driver memory leak but is the expected behavior.
# GCs and clears the cache
gc.collect()
torch.cuda.empty_cache()
for i in range(num_devices):
discrepancy_detected = True
# Query memory multiple items to ensure leak was not transient
for _ in range(3):
caching_allocator_mem_allocated = torch.cuda.memory_allocated(i)
bytes_free, bytes_total = torch.cuda.mem_get_info(i)
driver_mem_allocated = bytes_total - bytes_free
caching_allocator_discrepancy = False
driver_discrepancy = False
if caching_allocator_mem_allocated > self.caching_allocator_befores[i]:
caching_allocator_discrepancy = True
if driver_mem_allocated > self.driver_befores[i]:
driver_discrepancy = True
if not (caching_allocator_discrepancy or driver_discrepancy):
# Leak was false positive, exit loop
discrepancy_detected = False
break
if not discrepancy_detected:
continue
if caching_allocator_discrepancy and not driver_discrepancy: # type: ignore[possibly-undefined]
# Just raises a warning if the leak is not validated by the
# driver API
# NOTE: this may be a problem with how the caching allocator collects its
# statistics or a leak too small to trigger the allocation of an
# additional block of memory by the CUDA driver
msg = ("CUDA caching allocator reports a memory leak not " # type: ignore[possibly-undefined]
f"verified by the driver API in {self.name}! "
f"Caching allocator allocated memory was {self.caching_allocator_befores[i]} "
f"and is now reported as {caching_allocator_mem_allocated} " # type: ignore[possibly-undefined]
f"on device {i}. "
f"CUDA driver allocated memory was {self.driver_befores[i]} and is now {driver_mem_allocated}.") # type: ignore[possibly-undefined]
warnings.warn(msg, stacklevel=2)
elif caching_allocator_discrepancy and driver_discrepancy: # type: ignore[possibly-undefined]
# A caching allocator discrepancy validated by the driver API is a
# failure (except on ROCm, see below)
msg = (f"CUDA driver API confirmed a leak in {self.name}! " # type: ignore[possibly-undefined]
f"Caching allocator allocated memory was {self.caching_allocator_befores[i]} "
f"and is now reported as {caching_allocator_mem_allocated} " # type: ignore[possibly-undefined]
f"on device {i}. "
f"CUDA driver allocated memory was {self.driver_befores[i]} and is now {driver_mem_allocated}.") # type: ignore[possibly-undefined]
raise RuntimeError(msg)
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
@contextmanager
def print_repro_on_failure(repro_parts):
try:
yield
except unittest.SkipTest:
raise
except Exception as e:
# Get the index of the sample input that failed the test if possible.
sample_isolation_prefix = ""
tracked_input = getattr(e, "_tracked_input", None)
if tracked_input is not None:
sample_isolation_prefix = f"PYTORCH_OPINFO_SAMPLE_INPUT_INDEX={tracked_input.index}"
repro_str = " ".join(filter(None, (sample_isolation_prefix, *repro_parts)))
open_source_signpost(
subsystem="test_repros",
name="test_failure",
parameters=json.dumps(
{
"repro": " ".join(filter(None, (sample_isolation_prefix, *repro_parts))),
}
),
)
repro_msg = f"""
To execute this test, run the following from the base repo dir:
{repro_str}
This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0"""
# NB: Hacking the exception args is the cleanest way I've found to append
# failure reproduction info without poisoning the stack trace.
if len(e.args) >= 1:
e.args = (f"{e.args[0]}\n{repro_msg}", *e.args[1:])
raise
# "min_satisfying_examples" setting has been deprecated in hypothesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
warnings.warn('Fail to import hypothesis in common_utils, tests are not derandomized', ImportWarning, stacklevel=2)
# Used in check_if_enable to see if a test method should be disabled by an issue,
# sanitizes a test method name from appended suffixes by @dtypes parametrization.
# e.g., an issue with title "DISABLED test_bitwise_ops (__main__.TestBinaryUfuncs)" should
# disabled ALL parametrized test_bitwise_ops tests, such test_bitwise_ops_cuda_int32
def remove_device_and_dtype_suffixes(test_name: str) -> str:
# import statement is localized to avoid circular dependency issues with common_device_type.py
from torch.testing._internal.common_device_type import get_device_type_test_bases
device_suffixes = [x.device_type for x in get_device_type_test_bases()]
dtype_suffixes = [str(dt)[len("torch."):] for dt in get_all_dtypes()]
test_name_chunks = test_name.split("_")
if len(test_name_chunks) > 0 and test_name_chunks[-1] in dtype_suffixes:
if len(test_name_chunks) > 1 and test_name_chunks[-2] in device_suffixes:
return "_".join(test_name_chunks[0:-2])
return "_".join(test_name_chunks[0:-1])
return test_name
def check_if_enable(test: unittest.TestCase):
classname = str(test.__class__).split("'")[1].split(".")[-1]
sanitized_testname = remove_device_and_dtype_suffixes(test._testMethodName)
def matches_test(target: str):
target_test_parts = target.split()
if len(target_test_parts) < 2:
# poorly formed target test name
return False
target_testname = target_test_parts[0]
target_classname = target_test_parts[1][1:-1].split(".")[-1]
# if test method name or its sanitized version exactly matches the disabled
# test method name AND allow non-parametrized suite names to disable
# parametrized ones (TestSuite disables TestSuiteCPU)
return classname.startswith(target_classname) and (target_testname in (test._testMethodName, sanitized_testname))
if any(matches_test(x) for x in slow_tests_dict):
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
if not IS_SANDCASTLE:
should_skip = False
skip_msg = ""
for disabled_test, (issue_url, platforms) in disabled_tests_dict.items():
if matches_test(disabled_test):
platform_to_conditional: dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM,
"xpu": TEST_XPU,
"asan": TEST_WITH_ASAN,
"dynamo": TEST_WITH_TORCHDYNAMO,
"dynamo_wrapped": TEST_WITH_TORCHDYNAMO,
"inductor": TEST_WITH_TORCHINDUCTOR,
"slow": TEST_WITH_SLOW,
}
invalid_platforms = list(filter(lambda p: p not in platform_to_conditional, platforms))
if len(invalid_platforms) > 0:
invalid_plats_str = ", ".join(invalid_platforms)
valid_plats = ", ".join(platform_to_conditional.keys())
print(f"Test {disabled_test} is disabled for some unrecognized ",
f"platforms: [{invalid_plats_str}]. Please edit issue {issue_url} to fix the platforms ",
'assigned to this flaky test, changing "Platforms: ..." to a comma separated ',
f"subset of the following (or leave it blank to match all platforms): {valid_plats}")
# Sanitize the platforms list so that we continue to disable the test for any valid platforms given
platforms = list(filter(lambda p: p in platform_to_conditional, platforms))
if platforms == [] or any(platform_to_conditional[platform] for platform in platforms):
should_skip = True
skip_msg = f"Test is disabled because an issue exists disabling it: {issue_url}" \
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " \
"If you're seeing this on your local machine and would like to enable this test, " \
"please make sure CI is not set and you are not using the flag --import-disabled-tests."
break
if should_skip and not RERUN_DISABLED_TESTS:
# Skip the disabled test when not running under --rerun-disabled-tests verification mode
raise unittest.SkipTest(skip_msg)
if not should_skip and RERUN_DISABLED_TESTS:
# Probably test has disable issue but not for this platform
skip_msg = "Test is enabled but --rerun-disabled-tests verification mode is set, so only" \
" disabled tests are run"
raise unittest.SkipTest(skip_msg)
if TEST_SKIP_FAST:
if hasattr(test, test._testMethodName) and not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# `TestCase.assertEqual` is very permissive and coerced the inputs into a format that could be compared. This is very
# convenient when writing tests, but not so much while reviewing them. By default, the comparison `Pair` framework of
# `torch.testing._comparison.are_equal`, used for example by the public testing function
# `torch.testing.assert_close`, is more strict. In order to use the same framework and thus reduce the divergence
# between internal and external comparison logic as much as possible, we define some "relaxed" pairs here. They only
# change the supported inputs, but the comparison logic is the same.
# TODO: Revisit the relaxed pairs and check how much work it is to fix the tests that would fail without the relaxation.
| CudaMemoryLeakCheck |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/url_only_override/package.py | {
"start": 217,
"end": 728
} | class ____(Package):
homepage = "http://www.example.com"
version(
"1.0.0",
md5="0123456789abcdef0123456789abcdef",
url="http://a.example.com/url_override-1.0.0.tar.gz",
)
version(
"0.9.0",
md5="fedcba9876543210fedcba9876543210",
url="http://b.example.com/url_override-0.9.0.tar.gz",
)
version(
"0.8.1",
md5="0123456789abcdef0123456789abcdef",
url="http://c.example.com/url_override-0.8.1.tar.gz",
)
| UrlOnlyOverride |
python | django__django | tests/delete_regress/models.py | {
"start": 2332,
"end": 2651
} | class ____(models.Model):
version = models.ForeignKey(Version, models.CASCADE)
location = models.ForeignKey(Location, models.SET_NULL, blank=True, null=True)
location_value = models.ForeignKey(
Location, models.SET(42), default=1, db_constraint=False, related_name="+"
)
# Models for #16128
| Item |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 254974,
"end": 256550
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[2, 4, 3]"):
l_x_ = L_x_
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(2, 'error'); _vmap_increment_nesting = None
_add_batch_dim: "f32[4, 3]" = torch._functorch.predispatch._add_batch_dim(l_x_, 0, 1); l_x_ = None
child: "f32[3]" = _add_batch_dim.sum(0)
child_1: "f32[4]" = _add_batch_dim.sum(1); _add_batch_dim = None
_remove_batch_dim: "f32[3, 2]" = torch._functorch.predispatch._remove_batch_dim(child, 1, 2, 1); child = None
_remove_batch_dim_1: "f32[2, 4]" = torch._functorch.predispatch._remove_batch_dim(child_1, 1, 2, 0); child_1 = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
return (_remove_batch_dim, _remove_batch_dim_1)
""",
)
def test_vmap_multiple_outputs_out_dims_tuple(self):
x = torch.ones(2, 4, 3)
out_dims = (1, 0)
def fn(x):
return torch.vmap(lambda x: (x.sum(0), x.sum(1)), out_dims=out_dims)(x)
wrapped_gm = self._compile_check(fn, (x,))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | doocs__leetcode | lcci/16.14.Best Line/Solution2.py | {
"start": 0,
"end": 688
} | class ____:
def bestLine(self, points: List[List[int]]) -> List[int]:
def gcd(a, b):
return a if b == 0 else gcd(b, a % b)
n = len(points)
mx = 0
for i in range(n):
x1, y1 = points[i]
cnt = defaultdict(list)
for j in range(i + 1, n):
x2, y2 = points[j]
dx, dy = x2 - x1, y2 - y1
g = gcd(dx, dy)
k = (dx // g, dy // g)
cnt[k].append((i, j))
if mx < len(cnt[k]) or (mx == len(cnt[k]) and (x, y) > cnt[k][0]):
mx = len(cnt[k])
x, y = cnt[k][0]
return [x, y]
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1404758,
"end": 1405012
} | class ____(sgqlc.types.Type, Node, AuditEntry, RepositoryAuditEntryData, OrganizationAuditEntryData, TopicAuditEntryData):
"""Audit log entry for a repo.add_topic event."""
__schema__ = github_schema
__field_names__ = ()
| RepoAddTopicAuditEntry |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_concurrency_limits/client.py | {
"start": 14748,
"end": 29108
} | class ____(BaseAsyncClient):
async def create_concurrency_limit(
self,
tag: str,
concurrency_limit: int,
) -> "UUID":
"""
Create a tag concurrency limit in the Prefect API. These limits govern concurrently
running tasks.
Args:
tag: a tag the concurrency limit is applied to
concurrency_limit: the maximum number of concurrent task runs for a given tag
Raises:
httpx.RequestError: if the concurrency limit was not created for any reason
Returns:
the ID of the concurrency limit in the backend
"""
from prefect.client.schemas.actions import ConcurrencyLimitCreate
concurrency_limit_create = ConcurrencyLimitCreate(
tag=tag,
concurrency_limit=concurrency_limit,
)
response = await self.request(
"POST",
"/concurrency_limits/",
json=concurrency_limit_create.model_dump(mode="json"),
)
concurrency_limit_id = response.json().get("id")
if not concurrency_limit_id:
raise RequestError(f"Malformed response: {response}")
from uuid import UUID
return UUID(concurrency_limit_id)
async def read_concurrency_limit_by_tag(
self,
tag: str,
) -> "ConcurrencyLimit":
"""
Read the concurrency limit set on a specific tag.
Args:
tag: a tag the concurrency limit is applied to
Raises:
ObjectNotFound: If request returns 404
httpx.RequestError: if the concurrency limit was not created for any reason
Returns:
the concurrency limit set on a specific tag
"""
try:
response = await self.request(
"GET",
"/concurrency_limits/tag/{tag}",
path_params={"tag": tag},
)
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
concurrency_limit_id = response.json().get("id")
if not concurrency_limit_id:
raise RequestError(f"Malformed response: {response}")
from prefect.client.schemas.objects import ConcurrencyLimit
return ConcurrencyLimit.model_validate(response.json())
async def read_concurrency_limits(
self,
limit: int,
offset: int,
) -> list["ConcurrencyLimit"]:
"""
Lists concurrency limits set on task run tags.
Args:
limit: the maximum number of concurrency limits returned
offset: the concurrency limit query offset
Returns:
a list of concurrency limits
"""
body = {
"limit": limit,
"offset": offset,
}
response = await self.request("POST", "/concurrency_limits/filter", json=body)
from prefect.client.schemas.objects import ConcurrencyLimit
return ConcurrencyLimit.model_validate_list(response.json())
async def reset_concurrency_limit_by_tag(
self,
tag: str,
slot_override: list["UUID | str"] | None = None,
) -> None:
"""
Resets the concurrency limit slots set on a specific tag.
Args:
tag: a tag the concurrency limit is applied to
slot_override: a list of task run IDs that are currently using a
concurrency slot, please check that any task run IDs included in
`slot_override` are currently running, otherwise those concurrency
slots will never be released.
Raises:
ObjectNotFound: If request returns 404
httpx.RequestError: If request fails
"""
if slot_override is not None:
slot_override = [str(slot) for slot in slot_override]
try:
await self.request(
"POST",
"/concurrency_limits/tag/{tag}/reset",
path_params={"tag": tag},
json=dict(slot_override=slot_override),
)
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
async def delete_concurrency_limit_by_tag(
self,
tag: str,
) -> None:
"""
Delete the concurrency limit set on a specific tag.
Args:
tag: a tag the concurrency limit is applied to
Raises:
ObjectNotFound: If request returns 404
httpx.RequestError: If request fails
"""
try:
await self.request(
"DELETE",
"/concurrency_limits/tag/{tag}",
path_params={"tag": tag},
)
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
async def increment_v1_concurrency_slots(
self,
names: list[str],
task_run_id: "UUID",
) -> "Response":
"""
Increment concurrency limit slots for the specified limits.
Args:
names: A list of limit names for which to increment limits.
task_run_id: The task run ID incrementing the limits.
"""
data: dict[str, Any] = {
"names": names,
"task_run_id": str(task_run_id),
}
return await self.request(
"POST",
"/concurrency_limits/increment",
json=data,
)
async def decrement_v1_concurrency_slots(
self,
names: list[str],
task_run_id: "UUID",
occupancy_seconds: float,
) -> "Response":
"""
Decrement concurrency limit slots for the specified limits.
Args:
names: A list of limit names to decrement.
task_run_id: The task run ID that incremented the limits.
occupancy_seconds (float): The duration in seconds that the limits
were held.
Returns:
"Response": The HTTP response from the server.
"""
data: dict[str, Any] = {
"names": names,
"task_run_id": str(task_run_id),
"occupancy_seconds": occupancy_seconds,
}
return await self.request(
"POST",
"/concurrency_limits/decrement",
json=data,
)
async def increment_concurrency_slots(
self,
names: list[str],
slots: int,
mode: Literal["concurrency", "rate_limit"],
) -> "Response":
"""
Increment concurrency slots for the specified limits.
Args:
names: A list of limit names for which to occupy slots.
slots: The number of concurrency slots to occupy.
mode: The mode of the concurrency limits.
"""
return await self.request(
"POST",
"/v2/concurrency_limits/increment",
json={
"names": names,
"slots": slots,
"mode": mode,
},
)
async def increment_concurrency_slots_with_lease(
self,
names: list[str],
slots: int,
mode: Literal["concurrency", "rate_limit"],
lease_duration: float,
holder: "ConcurrencyLeaseHolder | None" = None,
) -> "Response":
"""
Increment concurrency slots for the specified limits with a lease.
Args:
names: A list of limit names for which to occupy slots.
slots: The number of concurrency slots to occupy.
mode: The mode of the concurrency limits.
lease_duration: The duration of the lease in seconds.
holder: Optional holder information for tracking who holds the slots.
"""
body: dict[str, Any] = {
"names": names,
"slots": slots,
"mode": mode,
"lease_duration": lease_duration,
}
if holder is not None:
body["holder"] = holder.model_dump(mode="json")
return await self.request(
"POST",
"/v2/concurrency_limits/increment-with-lease",
json=body,
)
async def renew_concurrency_lease(
self,
lease_id: "UUID",
lease_duration: float,
) -> "Response":
"""
Renew a concurrency lease.
Args:
lease_id: The ID of the lease to renew.
lease_duration: The new lease duration in seconds.
"""
return await self.request(
"POST",
"/v2/concurrency_limits/leases/{lease_id}/renew",
path_params={"lease_id": lease_id},
json={"lease_duration": lease_duration},
)
async def release_concurrency_slots(
self, names: list[str], slots: int, occupancy_seconds: float
) -> "Response":
"""
Release concurrency slots for the specified limits.
Args:
names: A list of limit names for which to release slots.
slots: The number of concurrency slots to release.
occupancy_seconds (float): The duration in seconds that the slots
were occupied.
Returns:
"Response": The HTTP response from the server.
"""
return await self.request(
"POST",
"/v2/concurrency_limits/decrement",
json={
"names": names,
"slots": slots,
"occupancy_seconds": occupancy_seconds,
},
)
async def release_concurrency_slots_with_lease(
self,
lease_id: "UUID",
) -> "Response":
"""
Release concurrency slots for the specified lease.
Args:
lease_id: The ID of the lease corresponding to the concurrency limits to release.
"""
return await self.request(
"POST",
"/v2/concurrency_limits/decrement-with-lease",
json={
"lease_id": str(lease_id),
},
)
async def create_global_concurrency_limit(
self, concurrency_limit: "GlobalConcurrencyLimitCreate"
) -> "UUID":
try:
response = await self.request(
"POST",
"/v2/concurrency_limits/",
json=concurrency_limit.model_dump(mode="json", exclude_unset=True),
)
except HTTPStatusError as e:
if e.response.status_code == 409:
raise ObjectAlreadyExists(http_exc=e) from e
else:
raise
from uuid import UUID
return UUID(response.json()["id"])
async def update_global_concurrency_limit(
self, name: str, concurrency_limit: "GlobalConcurrencyLimitUpdate"
) -> "Response":
try:
response = await self.request(
"PATCH",
"/v2/concurrency_limits/{id_or_name}",
path_params={"id_or_name": name},
json=concurrency_limit.model_dump(mode="json", exclude_unset=True),
)
return response
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
async def delete_global_concurrency_limit_by_name(self, name: str) -> "Response":
try:
response = await self.request(
"DELETE",
"/v2/concurrency_limits/{id_or_name}",
path_params={"id_or_name": name},
)
return response
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
async def read_global_concurrency_limit_by_name(
self, name: str
) -> "GlobalConcurrencyLimitResponse":
try:
response = await self.request(
"GET",
"/v2/concurrency_limits/{id_or_name}",
path_params={"id_or_name": name},
)
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
return GlobalConcurrencyLimitResponse.model_validate(response.json())
except HTTPStatusError as e:
if e.response.status_code == 404:
raise ObjectNotFound(http_exc=e) from e
else:
raise
async def upsert_global_concurrency_limit_by_name(
self, name: str, limit: int
) -> None:
"""Creates a global concurrency limit with the given name and limit if one does not already exist.
If one does already exist matching the name then update it's limit if it is different.
Note: This is not done atomically.
"""
from prefect.client.schemas.actions import (
GlobalConcurrencyLimitCreate,
GlobalConcurrencyLimitUpdate,
)
try:
existing_limit = await self.read_global_concurrency_limit_by_name(name)
except ObjectNotFound:
existing_limit = None
if not existing_limit:
await self.create_global_concurrency_limit(
GlobalConcurrencyLimitCreate(
name=name,
limit=limit,
)
)
elif existing_limit.limit != limit:
await self.update_global_concurrency_limit(
name, GlobalConcurrencyLimitUpdate(limit=limit)
)
async def read_global_concurrency_limits(
self, limit: int = 10, offset: int = 0
) -> list["GlobalConcurrencyLimitResponse"]:
response = await self.request(
"POST",
"/v2/concurrency_limits/filter",
json={
"limit": limit,
"offset": offset,
},
)
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
return GlobalConcurrencyLimitResponse.model_validate_list(response.json())
| ConcurrencyLimitAsyncClient |
python | apache__airflow | providers/standard/src/airflow/providers/standard/sensors/bash.py | {
"start": 1179,
"end": 4792
} | class ____(BaseSensorOperator):
"""
Executes a bash command/script.
Return True if and only if the return code is 0.
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed.
:param env: If env is not None, it must be a mapping that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:param output_encoding: output encoding of bash command.
:param retry_exit_code: If task exits with this code, treat the sensor
as not-yet-complete and retry the check later according to the
usual retry/timeout settings. Any other non-zero return code will
be treated as an error, and cause the sensor to fail. If set to
``None`` (the default), any non-zero exit code will cause a retry
and the task will never raise an error except on time-out.
.. seealso::
For more information on how to use this sensor,take a look at the guide:
:ref:`howto/operator:BashSensor`
"""
template_fields: Sequence[str] = ("bash_command", "env")
def __init__(
self, *, bash_command, env=None, output_encoding="utf-8", retry_exit_code: int | None = None, **kwargs
):
super().__init__(**kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
self.retry_exit_code = retry_exit_code
def poke(self, context: Context):
"""Execute the bash command in a temporary directory."""
bash_command = self.bash_command
self.log.info("Tmp dir root location: %s", gettempdir())
with (
TemporaryDirectory(prefix="airflowtmp") as tmp_dir,
NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f,
):
f.write(bytes(bash_command, "utf_8"))
f.flush()
fname = f.name
script_location = tmp_dir + "/" + fname
self.log.info("Temporary script location: %s", script_location)
self.log.info("Running command: %s", bash_command)
with Popen(
["bash", fname],
stdout=PIPE,
stderr=STDOUT,
close_fds=True,
cwd=tmp_dir,
env=self.env,
start_new_session=True,
) as resp:
if resp.stdout:
self.log.info("Output:")
for line in iter(resp.stdout.readline, b""):
self.log.info(line.decode(self.output_encoding).strip())
resp.wait()
self.log.info("Command exited with return code %s", resp.returncode)
# zero code means success, the sensor can go green
if resp.returncode == 0:
return True
# we have a retry exit code, sensor retries if return code matches, otherwise error
if self.retry_exit_code is not None:
if resp.returncode == self.retry_exit_code:
self.log.info("Return code matches retry code, will retry later")
return False
raise AirflowFailException(f"Command exited with return code {resp.returncode}")
# backwards compatibility: sensor retries no matter the error code
self.log.info("Non-zero return code and no retry code set, will retry later")
return False
| BashSensor |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/version_test_dependency_preferred/package.py | {
"start": 227,
"end": 860
} | class ____(AutotoolsPackage):
"""Dependency of version-test-pkg, which has a multi-valued
variant with two default values (a very low priority optimization
criterion for clingo is to maximize their number)
"""
homepage = "http://www.spack.org"
url = "http://www.spack.org/downloads/xz-1.0.tar.gz"
version("5.2.5", sha256="5117f930900b341493827d63aa910ff5e011e0b994197c3b71c08a20228a42df")
variant(
"libs",
default="shared,static",
values=("shared", "static"),
multi=True,
description="Build shared libs, static libs or both",
)
| VersionTestDependencyPreferred |
python | neetcode-gh__leetcode | python/0119-pascal-triangle-ii.py | {
"start": 0,
"end": 456
} | class ____:
Memo = {}
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex in self.Memo:
return self.Memo[rowIndex]
if rowIndex == 0:
return [1]
ListPrec = self.getRow(rowIndex - 1)
Result = [1]
for i in range(0, len(ListPrec) - 1):
Result.append(ListPrec[i] + ListPrec[i + 1])
Result.append(1)
self.Memo[rowIndex] = Result
return Result
| Solution |
python | celery__celery | t/unit/worker/test_consumer.py | {
"start": 1580,
"end": 28639
} | class ____(ConsumerTestCase):
def setup_method(self):
@self.app.task(shared=False)
def add(x, y):
return x + y
self.add = add
def test_repr(self):
assert repr(self.get_consumer())
def test_taskbuckets_defaultdict(self):
c = self.get_consumer()
assert c.task_buckets['fooxasdwx.wewe'] is None
def test_sets_heartbeat(self):
c = self.get_consumer(amqheartbeat=10)
assert c.amqheartbeat == 10
self.app.conf.broker_heartbeat = 20
c = self.get_consumer(amqheartbeat=None)
assert c.amqheartbeat == 20
def test_gevent_bug_disables_connection_timeout(self):
with patch('celery.worker.consumer.consumer._detect_environment') as d:
d.return_value = 'gevent'
self.app.conf.broker_connection_timeout = 33.33
self.get_consumer()
assert self.app.conf.broker_connection_timeout is None
def test_limit_moved_to_pool(self):
with patch('celery.worker.consumer.consumer.task_reserved') as task_reserved:
c = self.get_consumer()
c.on_task_request = Mock(name='on_task_request')
request = Mock(name='request')
c._limit_move_to_pool(request)
task_reserved.assert_called_with(request)
c.on_task_request.assert_called_with(request)
def test_update_prefetch_count(self):
c = self.get_consumer()
c._update_qos_eventually = Mock(name='update_qos')
c.initial_prefetch_count = None
c.pool.num_processes = None
c.prefetch_multiplier = 10
assert c._update_prefetch_count(1) is None
c.initial_prefetch_count = 10
c.pool.num_processes = 10
c._update_prefetch_count(8)
c._update_qos_eventually.assert_called_with(8)
assert c.initial_prefetch_count == 10 * 10
@pytest.mark.parametrize(
'active_requests_count,expected_initial,expected_maximum,enabled',
[
[0, 2, True, True],
[1, 1, False, True],
[2, 1, False, True],
[0, 2, True, False],
[1, 2, True, False],
[2, 2, True, False],
]
)
@patch('celery.worker.consumer.consumer.active_requests', new_callable=set)
def test_restore_prefetch_count_on_restart(self, active_requests_mock, active_requests_count,
expected_initial, expected_maximum, enabled, subtests):
self.app.conf.worker_enable_prefetch_count_reduction = enabled
reqs = {Mock() for _ in range(active_requests_count)}
active_requests_mock.update(reqs)
c = self.get_consumer()
c.qos = Mock()
c.blueprint = Mock()
def bp_start(*_, **__):
if c.restart_count > 1:
c.blueprint.state = CLOSE
else:
raise ConnectionError
c.blueprint.start.side_effect = bp_start
c.start()
with subtests.test("initial prefetch count is never 0"):
assert c.initial_prefetch_count != 0
with subtests.test(f"initial prefetch count is equal to {expected_initial}"):
assert c.initial_prefetch_count == expected_initial
with subtests.test("maximum prefetch is reached"):
assert c._maximum_prefetch_restored is expected_maximum
def test_restore_prefetch_count_after_connection_restart_negative(self):
self.app.conf.worker_enable_prefetch_count_reduction = False
c = self.get_consumer()
c.qos = Mock()
# Overcome TypeError: 'Mock' object does not support the context manager protocol
class MutexMock:
def __enter__(self):
pass
def __exit__(self, *args):
pass
c.qos._mutex = MutexMock()
assert c._restore_prefetch_count_after_connection_restart(None) is None
def test_create_task_handler(self, subtests):
c = self.get_consumer()
c.qos = MagicMock()
c.qos.value = 1
c._maximum_prefetch_restored = False
sig = self.add.s(2, 2)
message = self.task_message_from_sig(self.app, sig)
def raise_exception():
raise KeyError('Foo')
def strategy(_, __, ack_log_error_promise, ___, ____):
ack_log_error_promise()
c.strategies[sig.task] = strategy
c.call_soon = raise_exception
on_task_received = c.create_task_handler()
on_task_received(message)
with subtests.test("initial prefetch count is never 0"):
assert c.initial_prefetch_count != 0
with subtests.test("initial prefetch count is 2"):
assert c.initial_prefetch_count == 2
with subtests.test("maximum prefetch is reached"):
assert c._maximum_prefetch_restored is True
def test_flush_events(self):
c = self.get_consumer()
c.event_dispatcher = None
c._flush_events()
c.event_dispatcher = Mock(name='evd')
c._flush_events()
c.event_dispatcher.flush.assert_called_with()
def test_on_send_event_buffered(self):
c = self.get_consumer()
c.hub = None
c.on_send_event_buffered()
c.hub = Mock(name='hub')
c.on_send_event_buffered()
c.hub._ready.add.assert_called_with(c._flush_events)
def test_schedule_bucket_request(self):
c = self.get_consumer()
c.timer = Mock()
bucket = Mock()
request = Mock()
bucket.pop = lambda: bucket.contents.popleft()
bucket.can_consume.return_value = True
bucket.contents = deque()
with patch(
'celery.worker.consumer.consumer.Consumer._limit_move_to_pool'
) as task_reserved:
bucket.contents.append((request, 3))
c._schedule_bucket_request(bucket)
bucket.can_consume.assert_called_with(3)
task_reserved.assert_called_with(request)
bucket.can_consume.return_value = False
bucket.contents = deque()
bucket.expected_time.return_value = 3.33
bucket.contents.append((request, 4))
limit_order = c._limit_order
c._schedule_bucket_request(bucket)
assert c._limit_order == limit_order + 1
bucket.can_consume.assert_called_with(4)
c.timer.call_after.assert_called_with(
3.33, c._schedule_bucket_request, (bucket,),
priority=c._limit_order,
)
bucket.expected_time.assert_called_with(4)
assert bucket.pop() == (request, 4)
bucket.contents = deque()
bucket.can_consume.reset_mock()
c._schedule_bucket_request(bucket)
bucket.can_consume.assert_not_called()
def test_limit_task(self):
c = self.get_consumer()
bucket = Mock()
request = Mock()
with patch(
'celery.worker.consumer.consumer.Consumer._schedule_bucket_request'
) as task_reserved:
c._limit_task(request, bucket, 1)
bucket.add.assert_called_with((request, 1))
task_reserved.assert_called_with(bucket)
def test_post_eta(self):
c = self.get_consumer()
c.qos = Mock()
bucket = Mock()
request = Mock()
with patch(
'celery.worker.consumer.consumer.Consumer._schedule_bucket_request'
) as task_reserved:
c._limit_post_eta(request, bucket, 1)
c.qos.decrement_eventually.assert_called_with()
bucket.add.assert_called_with((request, 1))
task_reserved.assert_called_with(bucket)
def test_max_restarts_exceeded(self):
c = self.get_consumer()
def se(*args, **kwargs):
c.blueprint.state = CLOSE
raise RestartFreqExceeded()
c._restart_state.step.side_effect = se
c.blueprint.start.side_effect = socket.error()
with patch('celery.worker.consumer.consumer.sleep') as sleep:
c.start()
sleep.assert_called_with(1)
def test_do_not_restart_when_closed(self):
c = self.get_consumer()
c.blueprint.state = None
def bp_start(*args, **kwargs):
c.blueprint.state = CLOSE
c.blueprint.start.side_effect = bp_start
with patch('celery.worker.consumer.consumer.sleep'):
c.start()
c.blueprint.start.assert_called_once_with(c)
def test_do_not_restart_when_terminated(self):
c = self.get_consumer()
c.blueprint.state = None
def bp_start(*args, **kwargs):
c.blueprint.state = TERMINATE
c.blueprint.start.side_effect = bp_start
with patch('celery.worker.consumer.consumer.sleep'):
c.start()
c.blueprint.start.assert_called_once_with(c)
def test_too_many_open_files_raises_error(self):
c = self.get_consumer()
err = OSError()
err.errno = errno.EMFILE
c.blueprint.start.side_effect = err
with pytest.raises(WorkerTerminate):
c.start()
def _closer(self, c):
def se(*args, **kwargs):
c.blueprint.state = CLOSE
return se
@pytest.mark.parametrize("broker_connection_retry", [True, False])
def test_blueprint_restart_when_state_not_in_stop_conditions(self, broker_connection_retry):
c = self.get_consumer()
# ensure that WorkerShutdown is not raised
c.app.conf['broker_connection_retry'] = broker_connection_retry
c.app.conf['broker_connection_retry_on_startup'] = True
c.restart_count = -1
# ensure that blueprint state is not in stop conditions
c.blueprint.state = bootsteps.RUN
c.blueprint.start.side_effect = ConnectionError()
# stops test from running indefinitely in the while loop
c.blueprint.restart.side_effect = self._closer(c)
c.start()
c.blueprint.restart.assert_called_once()
@pytest.mark.parametrize("broker_channel_error_retry", [True, False])
def test_blueprint_restart_for_channel_errors(self, broker_channel_error_retry):
c = self.get_consumer()
# ensure that WorkerShutdown is not raised
c.app.conf['broker_connection_retry'] = True
c.app.conf['broker_connection_retry_on_startup'] = True
c.app.conf['broker_channel_error_retry'] = broker_channel_error_retry
c.restart_count = -1
# ensure that blueprint state is not in stop conditions
c.blueprint.state = bootsteps.RUN
c.blueprint.start.side_effect = ChannelError()
# stops test from running indefinitely in the while loop
c.blueprint.restart.side_effect = self._closer(c)
# restarted only when broker_channel_error_retry is True
if broker_channel_error_retry:
c.start()
c.blueprint.restart.assert_called_once()
else:
with pytest.raises(ChannelError):
c.start()
def test_collects_at_restart(self):
c = self.get_consumer()
c.connection.collect.side_effect = MemoryError()
c.blueprint.start.side_effect = socket.error()
c.blueprint.restart.side_effect = self._closer(c)
c.start()
c.connection.collect.assert_called_with()
def test_register_with_event_loop(self):
c = self.get_consumer()
c.register_with_event_loop(Mock(name='loop'))
def test_on_close_clears_semaphore_timer_and_reqs(self):
with patch('celery.worker.consumer.consumer.reserved_requests') as res:
c = self.get_consumer()
c.on_close()
c.controller.semaphore.clear.assert_called_with()
c.timer.clear.assert_called_with()
res.clear.assert_called_with()
c.pool.flush.assert_called_with()
c.controller = None
c.timer = None
c.pool = None
c.on_close()
def test_connect_error_handler(self):
self.app._connection = _amqp_connection()
conn = self.app._connection.return_value
c = self.get_consumer()
assert c.connect()
conn.ensure_connection.assert_called()
errback = conn.ensure_connection.call_args[0][0]
errback(Mock(), 0)
@patch('celery.worker.consumer.consumer.error')
def test_connect_error_handler_progress(self, error):
self.app.conf.broker_connection_retry = True
self.app.conf.broker_connection_max_retries = 3
self.app._connection = _amqp_connection()
conn = self.app._connection.return_value
# Placeholder alt connection to satisfy failover condition
conn.alt = [conn]
c = self.get_consumer()
assert c.connect()
errback = conn.ensure_connection.call_args[0][0]
errback(Mock(), 2)
assert error.call_args[0][3] == 'Trying again in 2.00 seconds... (1/3)'
errback(Mock(), 4)
assert error.call_args[0][3] == 'Trying again in 4.00 seconds... (2/3)'
errback(Mock(), 12)
assert error.call_args[0][3] == 'Trying again in 12.00 seconds... (3/3)'
errback(Mock(), 0)
assert getattr(c, 'broker_connection_retry_attempt', 0) == 3
def test_cancel_long_running_tasks_on_connection_loss(self):
c = self.get_consumer()
c.app.conf.worker_cancel_long_running_tasks_on_connection_loss = True
mock_request_acks_late_not_acknowledged = Mock()
mock_request_acks_late_not_acknowledged.task.acks_late = True
mock_request_acks_late_not_acknowledged.acknowledged = False
mock_request_acks_late_acknowledged = Mock()
mock_request_acks_late_acknowledged.task.acks_late = True
mock_request_acks_late_acknowledged.acknowledged = True
mock_request_acks_early = Mock()
mock_request_acks_early.task.acks_late = False
mock_request_acks_early.acknowledged = False
active_requests.add(mock_request_acks_late_not_acknowledged)
active_requests.add(mock_request_acks_late_acknowledged)
active_requests.add(mock_request_acks_early)
c.on_connection_error_after_connected(Mock())
mock_request_acks_late_not_acknowledged.cancel.assert_called_once_with(c.pool)
mock_request_acks_late_acknowledged.cancel.assert_not_called()
mock_request_acks_early.cancel.assert_not_called()
active_requests.clear()
def test_cancel_long_running_tasks_on_connection_loss__warning(self):
c = self.get_consumer()
c.app.conf.worker_cancel_long_running_tasks_on_connection_loss = False
with pytest.deprecated_call(match=CANCEL_TASKS_BY_DEFAULT):
c.on_connection_error_after_connected(Mock())
@pytest.mark.usefixtures('depends_on_current_app')
def test_cancel_active_requests(self):
c = self.get_consumer()
mock_request_acks_late_not_acknowledged = Mock(id='1')
mock_request_acks_late_not_acknowledged.task.acks_late = True
mock_request_acks_late_not_acknowledged.acknowledged = False
mock_request_acks_late_acknowledged = Mock(id='2')
mock_request_acks_late_acknowledged.task.acks_late = True
mock_request_acks_late_acknowledged.acknowledged = True
mock_request_acks_early = Mock(id='3')
mock_request_acks_early.task.acks_late = False
active_requests.add(mock_request_acks_late_not_acknowledged)
active_requests.add(mock_request_acks_late_acknowledged)
active_requests.add(mock_request_acks_early)
c.cancel_active_requests()
# acks_late unacknowledged tasks should be cancelled without RETRY
mock_request_acks_late_not_acknowledged.cancel.assert_called_once_with(c.pool, emit_retry=False)
# acks_late acknowledged tasks should NOT be cancelled
mock_request_acks_late_acknowledged.cancel.assert_not_called()
# Non-acks_late tasks should be cancelled normally (with RETRY)
mock_request_acks_early.cancel.assert_called_once_with(c.pool, emit_retry=True)
active_requests.clear()
@pytest.mark.usefixtures('depends_on_current_app')
def test_cancel_active_requests_preserves_successful_tasks(self):
c = self.get_consumer()
mock_successful_request = Mock(id='successful-task')
mock_successful_request.task.acks_late = True
mock_successful_request.acknowledged = False
active_requests.add(mock_successful_request)
successful_requests.add('successful-task')
try:
c.cancel_active_requests()
mock_successful_request.cancel.assert_not_called()
finally:
active_requests.clear()
successful_requests.clear()
@pytest.mark.parametrize("broker_connection_retry", [True, False])
@pytest.mark.parametrize("broker_connection_retry_on_startup", [None, False])
@pytest.mark.parametrize("first_connection_attempt", [True, False])
def test_ensure_connected(self, subtests, broker_connection_retry, broker_connection_retry_on_startup,
first_connection_attempt):
c = self.get_consumer()
c.first_connection_attempt = first_connection_attempt
c.app.conf.broker_connection_retry_on_startup = broker_connection_retry_on_startup
c.app.conf.broker_connection_retry = broker_connection_retry
if broker_connection_retry is False:
if broker_connection_retry_on_startup is None:
with subtests.test("Deprecation warning when startup is None"):
with pytest.deprecated_call():
c.ensure_connected(Mock())
with subtests.test("Does not retry when connect throws an error and retry is set to false"):
conn = Mock()
conn.connect.side_effect = ConnectionError()
with pytest.raises(ConnectionError):
c.ensure_connected(conn)
def test_disable_prefetch_not_enabled(self):
"""Test that disable_prefetch doesn't affect behavior when disabled"""
self.app.conf.worker_disable_prefetch = False
# Test the core logic by creating a mock consumer and Tasks instance
from celery.worker.consumer.tasks import Tasks
consumer = Mock()
consumer.app = self.app
consumer.pool = Mock()
consumer.pool.num_processes = 4
consumer.controller = Mock()
consumer.controller.max_concurrency = None
consumer.initial_prefetch_count = 16
consumer.connection = Mock()
consumer.connection.default_channel = Mock()
consumer.connection.transport = Mock()
consumer.connection.transport.driver_type = 'redis'
consumer.update_strategies = Mock()
consumer.on_decode_error = Mock()
# Mock task consumer
consumer.task_consumer = Mock()
consumer.task_consumer.channel = Mock()
consumer.task_consumer.channel.qos = Mock()
original_can_consume = Mock(return_value=True)
consumer.task_consumer.channel.qos.can_consume = original_can_consume
consumer.task_consumer.qos = Mock()
consumer.app.amqp = Mock()
consumer.app.amqp.TaskConsumer = Mock(return_value=consumer.task_consumer)
tasks_instance = Tasks(consumer)
tasks_instance.start(consumer)
# Should not modify can_consume method when disabled
assert consumer.task_consumer.channel.qos.can_consume == original_can_consume
def test_disable_prefetch_enabled_basic(self):
"""Test that disable_prefetch modifies can_consume when enabled"""
self.app.conf.worker_disable_prefetch = True
# Test the core logic by creating a mock consumer and Tasks instance
from celery.worker.consumer.tasks import Tasks
consumer = Mock()
consumer.app = self.app
consumer.pool = Mock()
consumer.pool.num_processes = 4
consumer.controller = Mock()
consumer.controller.max_concurrency = None
consumer.initial_prefetch_count = 16
consumer.connection = Mock()
consumer.connection.default_channel = Mock()
consumer.connection.transport = Mock()
consumer.connection.transport.driver_type = 'redis'
consumer.update_strategies = Mock()
consumer.on_decode_error = Mock()
# Mock task consumer
consumer.task_consumer = Mock()
consumer.task_consumer.channel = Mock()
consumer.task_consumer.channel.qos = Mock()
original_can_consume = Mock(return_value=True)
consumer.task_consumer.channel.qos.can_consume = original_can_consume
consumer.task_consumer.qos = Mock()
consumer.app.amqp = Mock()
consumer.app.amqp.TaskConsumer = Mock(return_value=consumer.task_consumer)
tasks_instance = Tasks(consumer)
with patch('celery.worker.state.reserved_requests', []):
tasks_instance.start(consumer)
# Should modify can_consume method when enabled
assert callable(consumer.task_consumer.channel.qos.can_consume)
assert consumer.task_consumer.channel.qos.can_consume != original_can_consume
def test_disable_prefetch_respects_reserved_requests_limit(self):
"""Test that disable_prefetch respects reserved requests limit"""
self.app.conf.worker_disable_prefetch = True
# Test the core logic by creating a mock consumer and Tasks instance
from celery.worker.consumer.tasks import Tasks
consumer = Mock()
consumer.app = self.app
consumer.pool = Mock()
consumer.pool.num_processes = 4
consumer.controller = Mock()
consumer.controller.max_concurrency = None
consumer.initial_prefetch_count = 16
consumer.connection = Mock()
consumer.connection.default_channel = Mock()
consumer.connection.transport = Mock()
consumer.connection.transport.driver_type = 'redis'
consumer.update_strategies = Mock()
consumer.on_decode_error = Mock()
# Mock task consumer
consumer.task_consumer = Mock()
consumer.task_consumer.channel = Mock()
consumer.task_consumer.channel.qos = Mock()
consumer.task_consumer.channel.qos.can_consume = Mock(return_value=True)
consumer.task_consumer.qos = Mock()
consumer.app.amqp = Mock()
consumer.app.amqp.TaskConsumer = Mock(return_value=consumer.task_consumer)
tasks_instance = Tasks(consumer)
# Mock 4 reserved requests (at limit of 4)
mock_requests = [Mock(), Mock(), Mock(), Mock()]
with patch('celery.worker.state.reserved_requests', mock_requests):
tasks_instance.start(consumer)
# Should not be able to consume when at limit
assert consumer.task_consumer.channel.qos.can_consume() is False
def test_disable_prefetch_respects_autoscale_max_concurrency(self):
"""Test that disable_prefetch respects autoscale max_concurrency limit"""
self.app.conf.worker_disable_prefetch = True
# Test the core logic by creating a mock consumer and Tasks instance
from celery.worker.consumer.tasks import Tasks
consumer = Mock()
consumer.app = self.app
consumer.pool = Mock()
consumer.pool.num_processes = 4
consumer.controller = Mock()
consumer.controller.max_concurrency = 2 # Lower than pool processes
consumer.initial_prefetch_count = 16
consumer.connection = Mock()
consumer.connection.default_channel = Mock()
consumer.connection.transport = Mock()
consumer.connection.transport.driver_type = 'redis'
consumer.update_strategies = Mock()
consumer.on_decode_error = Mock()
# Mock task consumer
consumer.task_consumer = Mock()
consumer.task_consumer.channel = Mock()
consumer.task_consumer.channel.qos = Mock()
consumer.task_consumer.channel.qos.can_consume = Mock(return_value=True)
consumer.task_consumer.qos = Mock()
consumer.app.amqp = Mock()
consumer.app.amqp.TaskConsumer = Mock(return_value=consumer.task_consumer)
tasks_instance = Tasks(consumer)
# Mock 2 reserved requests (at autoscale limit of 2)
mock_requests = [Mock(), Mock()]
with patch('celery.worker.state.reserved_requests', mock_requests):
tasks_instance.start(consumer)
# Should not be able to consume when at autoscale limit
assert consumer.task_consumer.channel.qos.can_consume() is False
def test_disable_prefetch_ignored_for_non_redis_brokers(self):
"""Test that disable_prefetch is ignored for non-Redis brokers."""
self.app.conf.worker_disable_prefetch = True
# Test the core logic by creating a mock consumer and Tasks instance
from celery.worker.consumer.tasks import Tasks
consumer = Mock()
consumer.app = self.app
consumer.pool = Mock()
consumer.pool.num_processes = 4
consumer.controller = Mock()
consumer.controller.max_concurrency = None
consumer.initial_prefetch_count = 16
consumer.connection = Mock()
consumer.connection.default_channel = Mock()
consumer.connection.transport = Mock()
consumer.connection.transport.driver_type = 'amqp' # RabbitMQ
consumer.connection.qos_semantics_matches_spec = True
consumer.update_strategies = Mock()
consumer.on_decode_error = Mock()
# Mock task consumer
consumer.task_consumer = Mock()
consumer.task_consumer.channel = Mock()
consumer.task_consumer.channel.qos = Mock()
original_can_consume = Mock(return_value=True)
consumer.task_consumer.channel.qos.can_consume = original_can_consume
consumer.task_consumer.qos = Mock()
consumer.app.amqp = Mock()
consumer.app.amqp.TaskConsumer = Mock(return_value=consumer.task_consumer)
consumer.app.amqp.queues = {} # Empty dict for quorum queue detection
tasks_instance = Tasks(consumer)
tasks_instance.start(consumer)
# Should not modify can_consume method for non-Redis brokers
assert consumer.task_consumer.channel.qos.can_consume == original_can_consume
@pytest.mark.parametrize(
"broker_connection_retry_on_startup,is_connection_loss_on_startup",
[
pytest.param(False, True, id='shutdown on connection loss on startup'),
pytest.param(None, True, id='shutdown on connection loss on startup when retry on startup is undefined'),
pytest.param(False, False, id='shutdown on connection loss not on startup but startup is defined as false'),
pytest.param(None, False, id='shutdown on connection loss not on startup and startup is not defined'),
pytest.param(True, False, id='shutdown on connection loss not on startup but startup is defined as true'),
]
)
| test_Consumer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance21.py | {
"start": 177,
"end": 851
} | class ____:
pass
def is_sentinel(value: object) -> TypeIs[type[Sentinel]]: ...
def _(a: dict[str, int] | type[Sentinel]):
if is_sentinel(a):
reveal_type(a, expected_text="type[Sentinel]")
else:
reveal_type(a, expected_text="dict[str, int]")
def is_str_type(typ: object) -> TypeIs[type[str]]:
return typ is str
def test_typevar[T](typ: type[T], val: T) -> None:
if is_str_type(typ):
reveal_type(typ, expected_text="type[str]*")
def func1(v: Sentinel | type[Sentinel]):
if isinstance(v, Sentinel):
reveal_type(v, expected_text="Sentinel")
else:
reveal_type(v, expected_text="type[Sentinel]")
| Sentinel |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-core/dagster_dg_core/config.py | {
"start": 17821,
"end": 21406
} | class ____(TypedDict):
directory_type: Required[Literal["project"]]
project: Required[DgRawProjectConfig]
cli: NotRequired[DgRawCliConfig]
def is_project_file_config(config: "DgFileConfig") -> TypeGuard[DgProjectFileConfig]:
return config["directory_type"] == "project"
DgFileConfig: TypeAlias = Union[DgWorkspaceFileConfig, DgProjectFileConfig]
@contextmanager
def modify_dg_toml_config(
path: Path,
) -> Iterator[Union["tomlkit.TOMLDocument", "tomlkit.items.Table"]]:
"""Modify a TOML file as a tomlkit.TOMLDocument, preserving comments and formatting."""
import tomlkit
import tomlkit.items
with modify_toml(path) as toml:
if detect_dg_config_file_format(path) == "root":
yield toml
elif not has_toml_node(toml, ("tool", "dg")):
raise KeyError(
"TOML file does not have a tool.dg section. This is required for pyproject.toml files."
)
else:
yield get_toml_node(toml, ("tool", "dg"), tomlkit.items.Table)
def load_dg_user_file_config(path: Optional[Path] = None) -> DgRawCliConfig:
path = path or get_dg_config_path()
contents = load_config(path).get("cli", {})
return DgRawCliConfig(**{k: v for k, v in contents.items() if k != "plus"})
_OLD_USER_FILE_CONFIG_LOCATION = Path.home() / ".dg.toml"
def has_dg_user_file_config() -> bool:
# Remove when we remove other deprecated stuff.
if (Path.home() / ".dg.toml").exists():
# We can't suppress this warning because we haven't loaded the config with the
# suppress_warnings list yet.
emit_warning(
"deprecated_user_config_location",
f"""
Found config file ~/.dg.toml. This location for user config is no longer being read.
Please move your configuration file to `{get_dg_config_path()}`.
""",
None,
include_suppression_instruction=False,
)
return does_dg_config_file_exist()
def load_dg_root_file_config(
path: Path, config_format: Optional[DgConfigFileFormat] = None
) -> DgFileConfig:
return _load_dg_file_config(path, config_format)
def load_dg_workspace_file_config(path: Path) -> "DgWorkspaceFileConfig":
config = _load_dg_file_config(path, None)
if is_workspace_file_config(config):
return config
else:
raise_file_config_validation_error("Expected a workspace configuration.", path)
def _load_dg_file_config(path: Path, config_format: Optional[DgConfigFileFormat]) -> DgFileConfig:
validation_result = validate_dg_file_config(path, config_format)
if validation_result.has_errors:
raise_file_config_validation_error(validation_result.message, path)
return validation_result.config
def validate_dg_file_config(
path: Path, config_format: Optional[DgConfigFileFormat] = None
) -> "DgConfigValidationResult":
"""Validate a Dg config file at the given path."""
import tomlkit
import tomlkit.items
toml = tomlkit.parse(path.read_text())
config_format = config_format or detect_dg_config_file_format(path)
if config_format == "root":
raw_dict = toml.unwrap()
path_prefix = None
else:
raw_dict = get_toml_node(toml, ("tool", "dg"), tomlkit.items.Table).unwrap()
path_prefix = "tool.dg"
return _DgConfigValidator(path_prefix).validate({k: v for k, v in raw_dict.items()})
_DgConfigErrorType: TypeAlias = Literal[
"unrecognized_field",
"missing_required_field",
"invalid_value",
]
@record
| DgProjectFileConfig |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/overrides.py | {
"start": 2426,
"end": 2638
} | class ____(D):
@overload
def methodA(self, arg: int) -> int:
...
@overload
def methodA(self, arg: str) -> str:
...
def methodA(self, arg):
return arg
| OverloadedOverride |
python | getsentry__sentry | tests/sentry/mail/test_adapter.py | {
"start": 7180,
"end": 40844
} | class ____(BaseMailAdapterTest):
@mock.patch("sentry.analytics.record")
def test_simple_notification(self, mock_record: MagicMock) -> None:
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
rule: Rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
assert msg.subject == "[Sentry] BAR-1 - Hello world"
assert isinstance(msg.alternatives[0][0], str)
assert "my rule" in msg.alternatives[0][0]
assert "notification_uuid" in msg.body
assert_any_analytics_event(
mock_record,
EmailNotificationSent(
organization_id=self.organization.id,
project_id=self.project.id,
category="issue_alert",
group_id=event.group_id,
user_id=0,
id=0,
actor_type="User",
notification_uuid="",
alert_id=rule.id,
),
exclude_fields=[
"id",
"project_id",
"actor_id",
"user_id",
"notification_uuid",
"alert_id",
],
)
assert_last_analytics_event(
mock_record,
AlertSentEvent(
organization_id=self.organization.id,
project_id=self.project.id,
provider="email",
alert_id=rule.id,
alert_type="issue_alert",
external_id="ANY",
notification_uuid="ANY",
),
exclude_fields=["external_id", "notification_uuid"],
)
@mock.patch("sentry.mail.notifications.get_context")
@mock.patch("sentry.analytics.record")
def test_email_with_reply_to(self, mock_record: MagicMock, mock_context: MagicMock) -> None:
mock_context.return_value = {"reply_to": "reply@example.com"}
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
msg = mail.outbox[0]
assert msg.message()["Reply-To"] == "reply@example.com"
def test_notification_with_environment(self) -> None:
environment = self.create_environment(self.project, name="production")
event = self.store_event(
data={"message": "Hello world", "level": "error", "environment": environment.name},
project_id=self.project.id,
)
rule = Rule.objects.create(
project=self.project, label="my rule", environment_id=environment.id
)
ProjectOwnership.objects.create(project_id=self.project.id)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
assert msg.subject == "[Sentry] BAR-1 - Hello world"
assert isinstance(msg.alternatives[0][0], str)
assert "my rule" in msg.alternatives[0][0]
assert f"&environment={environment.name}" in msg.body
assert "notification_uuid" in msg.body
def test_simple_snooze(self) -> None:
"""Test that notification for alert snoozed by user is not send to that user."""
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
rule = self.create_project_rule(project=self.project)
self.snooze_rule(user_id=self.user.id, owner_id=self.user.id, rule=rule)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(notification, ActionTargetType.ISSUE_OWNERS)
assert len(mail.outbox) == 0
def test_snooze_for_all(self) -> None:
"""Test that notification for alert snoozed for everyone is not send to user."""
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
rule = self.create_project_rule(project=self.project)
self.snooze_rule(owner_id=self.user.id, rule=rule)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(notification, ActionTargetType.ISSUE_OWNERS)
assert len(mail.outbox) == 0
def test_someone_else_snoozes_themself(self) -> None:
"""Test that notification for alert snoozed by user2 for themself is sent to user"""
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
rule = self.create_project_rule(project=self.project)
user2 = self.create_user(email="otheruser@example.com")
self.snooze_rule(user_id=user2.id, owner_id=user2.id, rule=rule)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.subject == "[Sentry] BAR-1 - Hello world"
def test_someone_else_snoozes_everyone(self) -> None:
"""Test that notification for alert snoozed by user2 for everyone is not sent to user"""
event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
rule = self.create_project_rule(project=self.project)
user2 = self.create_user(email="otheruser@example.com")
self.snooze_rule(owner_id=user2.id, rule=rule)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(notification, ActionTargetType.ISSUE_OWNERS)
assert len(mail.outbox) == 0
def test_simple_notification_generic(self) -> None:
"""Test that an issue that is neither error nor performance type renders a generic email template"""
orig_event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
event = orig_event.for_group(orig_event.groups[0])
occurrence = IssueOccurrence(
uuid.uuid4().hex,
self.project.id,
uuid.uuid4().hex,
["some-fingerprint"],
"something bad happened",
"it was bad",
"1234",
{"Test": 123},
[
IssueEvidence("Evidence 1", "Value 1", True),
IssueEvidence("Evidence 2", "Value 2", False),
IssueEvidence("Evidence 3", "Value 3", False),
],
MonitorIncidentType,
timezone.now(),
"info",
"/api/123",
)
occurrence.save()
event.occurrence = occurrence
event.group.type = MonitorIncidentType.type_id
rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
assert msg.subject == f"[Sentry] BAR-1 - {occurrence.issue_title}"
checked_values = [
"Issue Data",
"Evidence 1",
"Value 1",
"Evidence 2",
"Value 2",
"Evidence 3",
"Value 3",
]
for checked_value in checked_values:
assert isinstance(msg.alternatives[0][0], str)
assert (
checked_value in msg.alternatives[0][0]
), f"{checked_value} not present in message"
def test_simple_notification_generic_no_evidence(self) -> None:
"""Test that an issue with no evidence that is neither error nor performance type renders a generic email template"""
orig_event = self.store_event(
data={"message": "Hello world", "level": "error"}, project_id=self.project.id
)
event = orig_event.for_group(orig_event.groups[0])
occurrence = IssueOccurrence(
uuid.uuid4().hex,
self.project.id,
uuid.uuid4().hex,
["some-fingerprint"],
"something bad happened",
"it was bad",
"1234",
{"Test": 123},
[], # no evidence
MonitorIncidentType,
timezone.now(),
"info",
"/api/123",
)
occurrence.save()
event.occurrence = occurrence
event.group.type = MonitorIncidentType.type_id
rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
assert msg.subject == "[Sentry] BAR-1 - something bad happened"
assert isinstance(msg.alternatives[0][0], str)
assert "Issue Data" not in msg.alternatives[0][0]
def test_simple_notification_perf(self) -> None:
event = self.create_performance_issue()
rule = Rule.objects.create(project=self.project, label="my rule")
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
assert msg.subject == "[Sentry] BAR-1 - N+1 Query"
checked_values = [
"Transaction Name",
# TODO: Not sure if this is right
"db - SELECT `books_author`.`id`, `books_author`.`",
"Parent Span",
"django.view - index",
"Repeating Spans (10)",
"db - SELECT `books_author`.`id`, `books_author`.`name` FROM `books_autho...",
]
for checked_value in checked_values:
assert isinstance(msg.alternatives[0][0], str)
assert (
checked_value in msg.alternatives[0][0]
), f"{checked_value} not present in message"
assert "notification_uuid" in msg.body
@mock.patch("sentry.interfaces.stacktrace.Stacktrace.get_title")
@mock.patch("sentry.interfaces.stacktrace.Stacktrace.to_email_html")
def test_notify_users_renders_interfaces_with_utf8(
self, _to_email_html: MagicMock, _get_title: MagicMock
) -> None:
_to_email_html.return_value = "רונית מגן"
_get_title.return_value = "Stacktrace"
event = self.store_event(
data={"message": "Soubor ji\xc5\xbe existuje", "stacktrace": {"frames": [{}]}},
project_id=self.project.id,
)
notification = Notification(event=event)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
with self.options({"system.url-prefix": "http://example.com"}):
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
_get_title.assert_called_once_with()
_to_email_html.assert_called_once_with(event)
@mock_notify
@mock.patch("sentry.notifications.notifications.rules.logger")
def test_notify_users_does_email(self, mock_logger, mock_func) -> None:
self.create_user_option(user=self.user, key="timezone", value="Europe/Vienna")
event_manager = EventManager({"message": "hello world", "level": "error"})
event_manager.normalize()
event_data = event_manager.get_data()
event_type = get_event_type(event_data)
event_data["type"] = event_type.key
event_data["metadata"] = event_type.get_metadata(event_data)
event = event_manager.save(self.project.id)
group = event.group
self.create_notification_settings_provider(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
provider="slack",
type="alerts",
value="never",
)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
rule = self.create_project_rule(project=self.project)
with self.tasks():
AlertRuleNotification(
Notification(event=event, rules=[rule]),
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
).send()
assert mock_func.call_count == 1
args, kwargs = mock_func.call_args
notification = args[1]
recipient_context = notification.get_recipient_context(Actor.from_orm_user(self.user), {})
assert recipient_context["timezone"] == zoneinfo.ZoneInfo("Europe/Vienna")
self.assertEqual(notification.project, self.project)
self.assertEqual(notification.reference, group)
assert notification.get_subject() == "BAR-1 - hello world"
assert notification.get_context()["snooze_alert"] is True
assert group
mock_logger.info.assert_called_with(
"mail.adapter.notify",
extra={
"target_type": "IssueOwners",
"target_identifier": None,
"group": group.id,
"project_id": group.project.id,
"organization": group.organization.id,
"fallthrough_choice": "ActiveMembers",
"notification_uuid": mock.ANY,
},
)
@mock_notify
@mock.patch("sentry.notifications.notifications.rules.logger")
@with_feature("organizations:workflow-engine-ui-links")
def test_notify_users_does_email_workflow_engine_ui_links(self, mock_logger, mock_func) -> None:
self.create_user_option(user=self.user, key="timezone", value="Europe/Vienna")
event_manager = EventManager({"message": "hello world", "level": "error"})
event_manager.normalize()
event_data = event_manager.get_data()
event_type = get_event_type(event_data)
event_data["type"] = event_type.key
event_data["metadata"] = event_type.get_metadata(event_data)
event = event_manager.save(self.project.id)
group = event.group
self.create_notification_settings_provider(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
provider="slack",
type="alerts",
value="never",
)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
rule = self.create_project_rule(
project=self.project, action_data=[{"workflow_id": "1234567890"}]
)
with self.tasks():
AlertRuleNotification(
Notification(event=event, rules=[rule]),
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
).send()
assert mock_func.call_count == 1
args, kwargs = mock_func.call_args
notification = args[1]
recipient_context = notification.get_recipient_context(Actor.from_orm_user(self.user), {})
assert recipient_context["timezone"] == zoneinfo.ZoneInfo("Europe/Vienna")
self.assertEqual(notification.project, self.project)
self.assertEqual(notification.reference, group)
assert notification.get_subject() == "BAR-1 - hello world"
# Because we are using the workflow engine, the snooze_alert context should be False
# This is because a user cannot snooze a workflow for themselves
assert notification.get_context()["snooze_alert"] is False
assert group
mock_logger.info.assert_called_with(
"mail.adapter.notify",
extra={
"target_type": "IssueOwners",
"target_identifier": None,
"group": group.id,
"project_id": group.project.id,
"organization": group.organization.id,
"fallthrough_choice": "ActiveMembers",
"notification_uuid": mock.ANY,
},
)
@mock_notify
def test_email_notification_is_not_sent_to_deleted_email(self, mock_func) -> None:
"""
Test that ensures if we still have some stale emails in UserOption, then upon attempting
to send an email notification to those emails, these stale `UserOption` instances are
deleted
"""
# Initial Creation
self.organization = self.create_organization()
self.team = self.create_team(organization=self.organization)
user = self.create_user(email="foo@bar.dodo", is_active=True)
self.create_member(user=user, organization=self.organization, teams=[self.team])
with assume_test_silo_mode(SiloMode.CONTROL):
UserOption.objects.create(
user=user, key="mail:email", value="foo@bar.dodo", project_id=self.project.id
)
# disable slack
NotificationSettingProvider.objects.create(
user_id=user.id,
scope_type="user",
scope_identifier=user.id,
provider="slack",
type="alerts",
value="never",
)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
with assume_test_silo_mode(SiloMode.CONTROL):
# New secondary email is created
useremail = UserEmail.objects.create(
user=user, email="ahmed@ahmed.io", is_verified=True
)
# Set secondary email to be primary
user.email = useremail.email
user.save()
# Delete first email
old_useremail = UserEmail.objects.get(email="foo@bar.dodo")
old_useremail.delete()
event_manager = EventManager({"message": "hello world", "level": "error"})
event_manager.normalize()
event_data = event_manager.get_data()
event_type = get_event_type(event_data)
event_data["type"] = event_type.key
event_data["metadata"] = event_type.get_metadata(event_data)
event = event_manager.save(self.project.id)
with self.tasks():
AlertRuleNotification(
Notification(event=event),
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
).send()
assert mock_func.call_count == 1
args, kwargs = mock_func.call_args
notification = args[1]
user_ids = []
for user in list(notification.get_participants().values())[0]:
user_ids.append(user.id)
assert "ahmed@ahmed.io" in get_email_addresses(user_ids, self.project).values()
with assume_test_silo_mode(SiloMode.CONTROL):
assert not len(UserOption.objects.filter(key="mail:email", value="foo@bar.dodo"))
@mock_notify
def test_multiline_error(self, mock_func) -> None:
event_manager = EventManager({"message": "hello world\nfoo bar", "level": "error"})
event_manager.normalize()
event_data = event_manager.get_data()
event_type = get_event_type(event_data)
event_data["type"] = event_type.key
event_data["metadata"] = event_type.get_metadata(event_data)
# disable slack
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingProvider.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
provider="slack",
type="alerts",
value="never",
)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
event = event_manager.save(self.project.id)
with self.tasks():
AlertRuleNotification(
Notification(event=event),
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
).send()
assert mock_func.call_count == 1
args, kwargs = mock_func.call_args
notification = args[1]
assert notification.get_subject() == "BAR-1 - hello world"
def test_notify_users_with_utf8_subject(self) -> None:
event = self.store_event(
data={"message": "רונית מגן", "level": "error"}, project_id=self.project.id
)
notification = Notification(event=event)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.subject == "[Sentry] BAR-1 - רונית מגן"
def test_notify_users_with_their_timezones(self) -> None:
"""
Test that ensures that datetime in issue alert email is in the user's timezone
"""
from django.template.defaultfilters import date
timestamp = timezone.now()
local_timestamp_s = timezone.localtime(timestamp, zoneinfo.ZoneInfo("Europe/Vienna"))
local_timestamp = date(local_timestamp_s, "N j, Y, g:i:s a e")
with assume_test_silo_mode(SiloMode.CONTROL):
UserOption.objects.create(user=self.user, key="timezone", value="Europe/Vienna")
event = self.store_event(
data={"message": "foobar", "level": "error", "timestamp": timestamp.isoformat()},
project_id=self.project.id,
)
notification = Notification(event=event)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
with self.options({"system.url-prefix": "http://example.com"}), self.tasks():
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert isinstance(msg, EmailMultiAlternatives)
assert local_timestamp in str(msg.alternatives)
def _test_invalid_timezone(self, s: str) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
UserOption.objects.create(user=self.user, key="timezone", value=s)
event = self.store_event(
data={"message": "foobar", "level": "error"},
project_id=self.project.id,
)
notification = AlertRuleNotification(
Notification(event=event), ActionTargetType.ISSUE_OWNERS
)
recipient_context = notification.get_recipient_context(Actor.from_orm_user(self.user), {})
assert recipient_context["timezone"] == UTC
def test_context_invalid_timezone_empty_string(self) -> None:
self._test_invalid_timezone("")
def test_context_invalid_timezone_garbage_value(self) -> None:
self._test_invalid_timezone("not/a/real/timezone")
def test_notify_with_suspect_commits(self) -> None:
repo = Repository.objects.create(
organization_id=self.organization.id, name=self.organization.id
)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
event = self.store_event(
data={
"message": "Kaboom!",
"platform": "python",
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=self.project.id,
)
commit = Commit.objects.create(
organization_id=self.organization.id,
repository_id=repo.id,
key=uuid.uuid4().hex,
author=CommitAuthor.objects.create(
organization_id=self.organization.id,
name=self.user.name,
email=self.user.email,
),
)
# create the suspect commit record
assert event.group is not None
GroupOwner.objects.create(
group_id=event.group.id,
project=self.project,
organization_id=self.organization.id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
context={
"commitId": commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
},
)
with self.tasks():
notification = Notification(event=event)
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
assert len(mail.outbox) >= 1
msg = mail.outbox[-1]
assert "Suspect Commits" in msg.body
assert self.user.email in msg.body
assert commit.key[:7] in msg.body
def test_notify_with_replay_id(self) -> None:
project = self.project
organization = project.organization
event = self.store_event(
data={
"contexts": {"replay": {"replay_id": "46eb3948be25448abd53fe36b5891ff2"}},
"message": "Kaboom!",
"platform": "python",
"timestamp": before_now(seconds=1).isoformat(),
"tags": [("level", "error")],
"request": {"url": "example.com"},
},
project_id=project.id,
)
assert event.group is not None
event.group.substatus = GroupSubStatus.REGRESSED
event.group.save()
features = ["organizations:session-replay", "organizations:session-replay-issue-emails"]
with self.feature(features):
with self.tasks():
notification = Notification(event=event)
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
assert len(mail.outbox) >= 1
msg = mail.outbox[-1]
expected_url = f"/organizations/{organization.slug}/issues/{event.group.id}/replays/?referrer=issue_alert-email"
assert isinstance(msg, EmailMultiAlternatives)
assert isinstance(msg.alternatives[0][0], str)
assert expected_url in msg.alternatives[0][0]
def test_slack_link(self) -> None:
project = self.project
organization = project.organization
event = self.store_event(data=make_event_data("foo.jx"), project_id=project.id)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
with self.tasks():
notification = Notification(event=event)
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
assert len(mail.outbox) >= 1
msg = mail.outbox[-1]
assert isinstance(msg, EmailMultiAlternatives)
assert isinstance(msg.alternatives[0][0], str)
assert (
f"/settings/{organization.slug}/integrations/slack/?referrer=alert_email"
in msg.alternatives[0][0]
)
assert "notification_uuid" in msg.body
def test_slack_link_with_integration(self) -> None:
project = self.project
organization = project.organization
event = self.store_event(data=make_event_data("foo.jx"), project_id=project.id)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="msteams")
integration.add_organization(organization)
with self.tasks():
notification = Notification(event=event)
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
assert len(mail.outbox) >= 1
msg = mail.outbox[-1]
assert isinstance(msg, EmailMultiAlternatives)
assert isinstance(msg.alternatives[0][0], str)
assert (
f"/settings/{organization.slug}/integrations/slack/?referrer=alert_email"
not in msg.alternatives[0][0]
)
assert "notification_uuid" in msg.body
def test_slack_link_with_plugin(self) -> None:
project = self.project
organization = project.organization
event = self.store_event(data=make_event_data("foo.jx"), project_id=project.id)
ProjectOwnership.objects.create(project_id=self.project.id, fallthrough=True)
OpsGeniePlugin().enable(project)
with self.tasks():
notification = Notification(event=event)
self.adapter.notify(
notification,
ActionTargetType.ISSUE_OWNERS,
fallthrough_choice=FallthroughChoiceType.ACTIVE_MEMBERS,
)
assert len(mail.outbox) >= 1
msg = mail.outbox[-1]
assert isinstance(msg, EmailMultiAlternatives)
assert isinstance(msg.alternatives[0][0], str)
assert (
f"/settings/{organization.slug}/integrations/slack/?referrer=alert_email"
not in msg.alternatives[0][0]
)
def test_notify_team_members(self) -> None:
"""Test that each member of a team is notified"""
user = self.create_user(email="foo@example.com", is_active=True)
user2 = self.create_user(email="baz@example.com", is_active=True)
team = self.create_team(organization=self.organization, members=[user, user2])
project = self.create_project(teams=[team])
event = self.store_event(data=make_event_data("foo.py"), project_id=project.id)
self.assert_notify(event, [user.email, user2.email], ActionTargetType.TEAM, str(team.id))
def test_notify_user(self) -> None:
user = self.create_user(email="foo@example.com", is_active=True)
self.create_team(organization=self.organization, members=[user])
event = self.store_event(data=make_event_data("foo.py"), project_id=self.project.id)
self.assert_notify(event, [user.email], ActionTargetType.MEMBER, str(user.id))
| MailAdapterNotifyTest |
python | celery__celery | celery/exceptions.py | {
"start": 8253,
"end": 8520
} | class ____(BackendError):
"""An issue reading from the backend."""
def __init__(self, *args, **kwargs):
self.task_id = kwargs.get('task_id', "")
def __repr__(self):
return super().__repr__() + " task_id:" + self.task_id
| BackendGetMetaError |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/test_server.py | {
"start": 625,
"end": 4961
} | class ____(TestCase):
def setUp(self):
self.mock_validator = mock.MagicMock()
self.mock_validator.get_code_challenge.return_value = None
self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
auth_code = AuthorizationCodeGrant(
request_validator=self.mock_validator)
auth_code.save_authorization_code = mock.MagicMock()
implicit = ImplicitGrant(
request_validator=self.mock_validator)
implicit.save_token = mock.MagicMock()
response_types = {
'code': auth_code,
'token': implicit,
'none': auth_code
}
self.expires_in = 1800
token = tokens.BearerToken(
self.mock_validator,
expires_in=self.expires_in
)
self.endpoint = AuthorizationEndpoint(
default_response_type='code',
default_token_type=token,
response_types=response_types
)
@mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
def test_authorization_grant(self):
uri = 'http://i.b/l?response_type=code&client_id=me&scope=all+of+them&state=xyz'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me?code=abc&state=xyz')
@mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
def test_implicit_grant(self):
uri = 'http://i.b/l?response_type=token&client_id=me&scope=all+of+them&state=xyz'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me#access_token=abc&expires_in=' + str(self.expires_in) + '&token_type=Bearer&state=xyz&scope=all+of+them', parse_fragment=True)
def test_none_grant(self):
uri = 'http://i.b/l?response_type=none&client_id=me&scope=all+of+them&state=xyz'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me?state=xyz', parse_fragment=True)
self.assertIsNone(body)
self.assertEqual(status_code, 302)
# and without the state parameter
uri = 'http://i.b/l?response_type=none&client_id=me&scope=all+of+them'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me', parse_fragment=True)
self.assertIsNone(body)
self.assertEqual(status_code, 302)
def test_missing_type(self):
uri = 'http://i.b/l?client_id=me&scope=all+of+them'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
self.mock_validator.validate_request = mock.MagicMock(
side_effect=errors.InvalidRequestError())
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me?error=invalid_request&error_description=Missing+response_type+parameter.')
def test_invalid_type(self):
uri = 'http://i.b/l?response_type=invalid&client_id=me&scope=all+of+them'
uri += '&redirect_uri=http%3A%2F%2Fback.to%2Fme'
self.mock_validator.validate_request = mock.MagicMock(
side_effect=errors.UnsupportedResponseTypeError())
headers, body, status_code = self.endpoint.create_authorization_response(
uri, scopes=['all', 'of', 'them'])
self.assertIn('Location', headers)
self.assertURLEqual(headers['Location'], 'http://back.to/me?error=unsupported_response_type')
| AuthorizationEndpointTest |
python | pytorch__pytorch | tools/experimental/torchfuzz/checks.py | {
"start": 111,
"end": 298
} | class ____(ABC):
"""Base class for execution checks."""
@abstractmethod
def codegen(self, args_tuple: str) -> list[str]:
"""Generate code lines for this check."""
| Check |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classGetItem1.py | {
"start": 132,
"end": 456
} | class ____:
# This should generate a warning because __class_getitem__
# is implicitly a classmethod and should use cls rather than
# self.
def __class_getitem__(self, args: tuple[int, ...]) -> None: ...
reveal_type(ClassA[10, 63], expected_text="type[ClassA]")
_T = TypeVar("_T")
_S = TypeVar("_S")
| ClassA |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.