language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/2769. Find the Maximum Achievable Number/2769.py | {
"start": 0,
"end": 99
} | class ____:
def theMaximumAchievableX(self, num: int, t: int) -> int:
return num + 2 * t
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 58935,
"end": 64095
} | class ____(Patch):
"""
An elliptical annulus.
"""
@_docstring.interpd
def __init__(self, xy, r, width, angle=0.0, **kwargs):
"""
Parameters
----------
xy : (float, float)
xy coordinates of annulus centre.
r : float or (float, float)
The radius, or semi-axes:
- If float: radius of the outer circle.
- If two floats: semi-major and -minor axes of outer ellipse.
width : float
Width (thickness) of the annular ring. The width is measured inward
from the outer ellipse so that for the inner ellipse the semi-axes
are given by ``r - width``. *width* must be less than or equal to
the semi-minor axis.
angle : float, default: 0
Rotation angle in degrees (anti-clockwise from the positive
x-axis). Ignored for circular annuli (i.e., if *r* is a scalar).
**kwargs
Keyword arguments control the `Patch` properties:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self.set_radii(r)
self.center = xy
self.width = width
self.angle = angle
self._path = None
def __str__(self):
if self.a == self.b:
r = self.a
else:
r = (self.a, self.b)
return "Annulus(xy=(%s, %s), r=%s, width=%s, angle=%s)" % \
(*self.center, r, self.width, self.angle)
def set_center(self, xy):
"""
Set the center of the annulus.
Parameters
----------
xy : (float, float)
"""
self._center = xy
self._path = None
self.stale = True
def get_center(self):
"""Return the center of the annulus."""
return self._center
center = property(get_center, set_center)
def set_width(self, width):
"""
Set the width (thickness) of the annulus ring.
The width is measured inwards from the outer ellipse.
Parameters
----------
width : float
"""
if width > min(self.a, self.b):
raise ValueError(
'Width of annulus must be less than or equal to semi-minor axis')
self._width = width
self._path = None
self.stale = True
def get_width(self):
"""Return the width (thickness) of the annulus ring."""
return self._width
width = property(get_width, set_width)
def set_angle(self, angle):
"""
Set the tilt angle of the annulus.
Parameters
----------
angle : float
"""
self._angle = angle
self._path = None
self.stale = True
def get_angle(self):
"""Return the angle of the annulus."""
return self._angle
angle = property(get_angle, set_angle)
def set_semimajor(self, a):
"""
Set the semi-major axis *a* of the annulus.
Parameters
----------
a : float
"""
self.a = float(a)
self._path = None
self.stale = True
def set_semiminor(self, b):
"""
Set the semi-minor axis *b* of the annulus.
Parameters
----------
b : float
"""
self.b = float(b)
self._path = None
self.stale = True
def set_radii(self, r):
"""
Set the semi-major (*a*) and semi-minor radii (*b*) of the annulus.
Parameters
----------
r : float or (float, float)
The radius, or semi-axes:
- If float: radius of the outer circle.
- If two floats: semi-major and -minor axes of outer ellipse.
"""
if np.shape(r) == (2,):
self.a, self.b = r
elif np.shape(r) == ():
self.a = self.b = float(r)
else:
raise ValueError("Parameter 'r' must be one or two floats.")
self._path = None
self.stale = True
def get_radii(self):
"""Return the semi-major and semi-minor radii of the annulus."""
return self.a, self.b
radii = property(get_radii, set_radii)
def _transform_verts(self, verts, a, b):
return transforms.Affine2D() \
.scale(*self._convert_xy_units((a, b))) \
.rotate_deg(self.angle) \
.translate(*self._convert_xy_units(self.center)) \
.transform(verts)
def _recompute_path(self):
# circular arc
arc = Path.arc(0, 360)
# annulus needs to draw an outer ring
# followed by a reversed and scaled inner ring
a, b, w = self.a, self.b, self.width
v1 = self._transform_verts(arc.vertices, a, b)
v2 = self._transform_verts(arc.vertices[::-1], a - w, b - w)
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, Path.MOVETO,
arc.codes[1:], Path.MOVETO,
Path.CLOSEPOLY])
self._path = Path(v, c)
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
| Annulus |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol9.py | {
"start": 120,
"end": 192
} | class ____:
def __call__(self, v: int):
print("Received", v)
| A |
python | pytorch__pytorch | torch/_dynamo/test_case.py | {
"start": 4021,
"end": 8571
} | class ____(TestCase):
"""
Test class for CPython tests located in "test/dynamo/CPython/Py_version/*".
This class enables specific features that are disabled by default, such as
tracing through unittest methods.
"""
_stack: contextlib.ExitStack
dynamo_strict_nopython = True
# Restore original unittest methods to simplify tracing CPython test cases.
assertEqual = unittest.TestCase.assertEqual # type: ignore[assignment]
assertNotEqual = unittest.TestCase.assertNotEqual # type: ignore[assignment]
assertTrue = unittest.TestCase.assertTrue
assertFalse = unittest.TestCase.assertFalse
assertIs = unittest.TestCase.assertIs
assertIsNot = unittest.TestCase.assertIsNot
assertIsNone = unittest.TestCase.assertIsNone
assertIsNotNone = unittest.TestCase.assertIsNotNone
assertIn = unittest.TestCase.assertIn
assertNotIn = unittest.TestCase.assertNotIn
assertIsInstance = unittest.TestCase.assertIsInstance
assertNotIsInstance = unittest.TestCase.assertNotIsInstance
assertAlmostEqual = unittest.TestCase.assertAlmostEqual
assertNotAlmostEqual = unittest.TestCase.assertNotAlmostEqual
assertGreater = unittest.TestCase.assertGreater
assertGreaterEqual = unittest.TestCase.assertGreaterEqual
assertLess = unittest.TestCase.assertLess
assertLessEqual = unittest.TestCase.assertLessEqual
assertRegex = unittest.TestCase.assertRegex
assertNotRegex = unittest.TestCase.assertNotRegex
assertCountEqual = unittest.TestCase.assertCountEqual
assertMultiLineEqual = polyfills.assert_multi_line_equal
assertSequenceEqual = polyfills.assert_sequence_equal
assertListEqual = unittest.TestCase.assertListEqual
assertTupleEqual = unittest.TestCase.assertTupleEqual
assertSetEqual = unittest.TestCase.assertSetEqual
assertDictEqual = polyfills.assert_dict_equal
# pyrefly: ignore [bad-override]
assertRaises = unittest.TestCase.assertRaises
# pyrefly: ignore [bad-override]
assertRaisesRegex = unittest.TestCase.assertRaisesRegex
assertWarns = unittest.TestCase.assertWarns
assertWarnsRegex = unittest.TestCase.assertWarnsRegex
assertLogs = unittest.TestCase.assertLogs
fail = unittest.TestCase.fail
failureException = unittest.TestCase.failureException
def compile_fn(
self,
fn: Callable[..., Any],
backend: Union[str, Callable[..., Any]],
nopython: bool,
) -> Callable[..., Any]:
# We want to compile only the test function, excluding any setup code
# from unittest
method = getattr(self, self._testMethodName)
method = torch._dynamo.optimize(backend, error_on_graph_break=nopython)(method)
setattr(self, self._testMethodName, method)
return fn
def _dynamo_test_key(self) -> str:
suffix = super()._dynamo_test_key()
test_cls = self.__class__
test_file = inspect.getfile(test_cls).split(os.sep)[-1].split(".")[0]
py_ver = re.search(r"/([\d_]+)/", inspect.getfile(test_cls))
if py_ver:
py_ver = py_ver.group().strip(os.sep).replace("_", "") # type: ignore[assignment]
else:
return suffix
return f"CPython{py_ver}-{test_file}-{suffix}"
@classmethod
def tearDownClass(cls) -> None:
cls._stack.close()
super().tearDownClass()
@classmethod
def setUpClass(cls) -> None:
# Skip test if python versions doesn't match
prefix = os.path.join("dynamo", "cpython") + os.path.sep
regex = re.escape(prefix) + r"\d_\d{2}"
search_path = inspect.getfile(cls)
m = re.search(regex, search_path)
if m:
test_py_ver = tuple(map(int, m.group().removeprefix(prefix).split("_")))
py_ver = sys.version_info[:2]
if py_ver != test_py_ver:
expected = ".".join(map(str, test_py_ver))
got = ".".join(map(str, py_ver))
raise unittest.SkipTest(
f"Test requires Python {expected} but got Python {got}"
)
else:
raise unittest.SkipTest(
f"Test requires a specific Python version but not found in path {inspect.getfile(cls)}"
)
super().setUpClass()
cls._stack = contextlib.ExitStack() # type: ignore[attr-defined]
cls._stack.enter_context( # type: ignore[attr-defined]
config.patch(
enable_trace_unittest=True,
),
)
| CPythonTestCase |
python | PyCQA__pylint | doc/data/messages/i/invalid-enum-extension/bad.py | {
"start": 75,
"end": 137
} | class ____(Color): # [invalid-enum-extension]
APPLE = 3
| Fruit |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modular_glm4v_moe.py | {
"start": 26698,
"end": 29667
} | class ____(Glm4vForConditionalGeneration):
@auto_docstring
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.Tensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Glm4vMoeCausalLMOutputWithPast]:
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
aux_loss = None
if kwargs.get("output_router_logits", False):
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.config.text_config.router_aux_loss_coef * aux_loss.to(
loss.device
) # make sure to reside in the same device
return Glm4vMoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
rope_deltas=outputs.rope_deltas,
)
__all__ = [
"Glm4vMoeConfig",
"Glm4vMoeTextConfig",
"Glm4vMoeVisionConfig",
"Glm4vMoeForConditionalGeneration",
"Glm4vMoeModel", # noqa: F822
"Glm4vMoePreTrainedModel",
"Glm4vMoeTextModel",
"Glm4vMoeVisionModel",
]
| Glm4vMoeForConditionalGeneration |
python | python__mypy | mypy/refinfo.py | {
"start": 499,
"end": 2784
} | class ____(TraverserVisitor):
def __init__(self, type_map: dict[Expression, Type]) -> None:
super().__init__()
self.type_map = type_map
self.data: list[dict[str, object]] = []
def visit_name_expr(self, expr: NameExpr) -> None:
super().visit_name_expr(expr)
self.record_ref_expr(expr)
def visit_member_expr(self, expr: MemberExpr) -> None:
super().visit_member_expr(expr)
self.record_ref_expr(expr)
def visit_func_def(self, func: FuncDef) -> None:
if func.expanded:
for item in func.expanded:
if isinstance(item, FuncDef):
super().visit_func_def(item)
else:
super().visit_func_def(func)
def record_ref_expr(self, expr: RefExpr) -> None:
fullname = None
if expr.kind != LDEF and "." in expr.fullname:
fullname = expr.fullname
elif isinstance(expr, MemberExpr):
typ = self.type_map.get(expr.expr)
sym = None
if isinstance(expr.expr, RefExpr):
sym = expr.expr.node
if typ:
tfn = type_fullname(typ, sym)
if tfn:
fullname = f"{tfn}.{expr.name}"
if not fullname:
fullname = f"*.{expr.name}"
if fullname is not None:
self.data.append({"line": expr.line, "column": expr.column, "target": fullname})
def type_fullname(typ: Type, node: SymbolNode | None = None) -> str | None:
typ = get_proper_type(typ)
if isinstance(typ, Instance):
return typ.type.fullname
elif isinstance(typ, TypeType):
return type_fullname(typ.item)
elif isinstance(typ, FunctionLike) and typ.is_type_obj():
if isinstance(node, TypeInfo):
return node.fullname
return type_fullname(typ.fallback)
elif isinstance(typ, TupleType):
return type_fullname(tuple_fallback(typ))
elif isinstance(typ, TypeVarLikeType):
return type_fullname(typ.upper_bound)
return None
def get_undocumented_ref_info_json(
tree: MypyFile, type_map: dict[Expression, Type]
) -> list[dict[str, object]]:
visitor = RefInfoVisitor(type_map)
tree.accept(visitor)
return visitor.data
| RefInfoVisitor |
python | django__django | tests/gis_tests/geo3d/models.py | {
"start": 480,
"end": 638
} | class ____(NamedModel):
line = models.LineStringField(dim=3, srid=4269)
class Meta:
required_db_features = {"supports_3d_storage"}
| Interstate3D |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 95948,
"end": 97126
} | class ____(fixtures.TestBase):
def test_ascii_to_utf8(self):
eq_(
compat.decode_backslashreplace(util.b("hello world"), "utf-8"),
"hello world",
)
def test_utf8_to_utf8(self):
eq_(
compat.decode_backslashreplace(
"some message méil".encode(), "utf-8"
),
"some message méil",
)
def test_latin1_to_utf8(self):
eq_(
compat.decode_backslashreplace(
"some message méil".encode("latin-1"), "utf-8"
),
"some message m\\xe9il",
)
eq_(
compat.decode_backslashreplace(
"some message méil".encode("latin-1"), "latin-1"
),
"some message méil",
)
def test_cp1251_to_utf8(self):
message = "some message П".encode("cp1251")
eq_(message, b"some message \xcf")
eq_(
compat.decode_backslashreplace(message, "utf-8"),
"some message \\xcf",
)
eq_(
compat.decode_backslashreplace(message, "cp1251"),
"some message П",
)
| BackslashReplaceTest |
python | openai__openai-python | src/openai/resources/fine_tuning/jobs/checkpoints.py | {
"start": 817,
"end": 3612
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> CheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CheckpointsWithStreamingResponse(self)
def list(
self,
fine_tuning_job_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FineTuningJobCheckpoint]:
"""
List checkpoints for a fine-tuning job.
Args:
after: Identifier for the last checkpoint ID from the previous pagination request.
limit: Number of checkpoints to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
page=SyncCursorPage[FineTuningJobCheckpoint],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
checkpoint_list_params.CheckpointListParams,
),
),
model=FineTuningJobCheckpoint,
)
| Checkpoints |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/dagster_types.py | {
"start": 2302,
"end": 3973
} | class ____(
NamedTuple(
"_DagsterTypeSnap",
[
("kind", DagsterTypeKind),
("key", str),
("name", Optional[str]),
("description", Optional[str]),
("display_name", str),
("is_builtin", bool),
("type_param_keys", Sequence[str]),
("loader_schema_key", Optional[str]),
("materializer_schema_key", Optional[str]),
("metadata", Mapping[str, MetadataValue]),
],
)
):
def __new__(
cls,
kind,
key,
name,
description,
display_name,
is_builtin,
type_param_keys,
loader_schema_key=None,
materializer_schema_key=None,
metadata=None,
):
return super().__new__(
cls,
kind=check.inst_param(kind, "kind", DagsterTypeKind),
key=check.str_param(key, "key"),
name=check.opt_str_param(name, "name"),
display_name=check.str_param(display_name, "display_name"),
description=check.opt_str_param(description, "description"),
is_builtin=check.bool_param(is_builtin, "is_builtin"),
type_param_keys=check.list_param(type_param_keys, "type_param_keys", of_type=str),
loader_schema_key=check.opt_str_param(loader_schema_key, "loader_schema_key"),
materializer_schema_key=check.opt_str_param(
materializer_schema_key, "materializer_schema_key"
),
metadata=normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str)
),
)
| DagsterTypeSnap |
python | apache__airflow | helm-tests/tests/helm_tests/apiserver/test_apiserver.py | {
"start": 4215,
"end": 5942
} | class ____:
"""Tests apiserver configmap."""
def test_no_apiserver_config_configmap_by_default(self):
docs = render_chart(show_only=["templates/configmaps/api-server-configmap.yaml"])
assert len(docs) == 0
def test_no_apiserver_config_configmap_with_configmap_name(self):
docs = render_chart(
values={
"apiServer": {
"apiServerConfig": "CSRF_ENABLED = True # {{ .Release.Name }}",
"apiServerConfigConfigMapName": "my-configmap",
}
},
show_only=["templates/configmaps/api-server-configmap.yaml"],
)
assert len(docs) == 0
def test_apiserver_with_custom_configmap_name(self):
docs = render_chart(
values={
"apiServer": {
"apiServerConfigConfigMapName": "my-custom-configmap",
}
},
show_only=["templates/api-server/api-server-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.volumes[1].configMap.name", docs[0]) == "my-custom-configmap"
)
def test_apiserver_config_configmap(self):
docs = render_chart(
values={"apiServer": {"apiServerConfig": "CSRF_ENABLED = True # {{ .Release.Name }}"}},
show_only=["templates/configmaps/api-server-configmap.yaml"],
)
assert docs[0]["kind"] == "ConfigMap"
assert jmespath.search("metadata.name", docs[0]) == "release-name-api-server-config"
assert (
jmespath.search('data."webserver_config.py"', docs[0]).strip()
== "CSRF_ENABLED = True # release-name"
)
| TestApiserverConfigmap |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_detector.py | {
"start": 31883,
"end": 35562
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.group = self.create_group(project=self.project)
self.detector = self.create_detector(project=self.project, type=MetricIssue.slug)
self.error_detector = self.create_detector(project=self.project, type=ErrorGroupType.slug)
self.issue_stream_detector = self.create_detector(
project=self.project, type=IssueStreamGroupType.slug
)
self.event = self.store_event(project_id=self.project.id, data={})
self.occurrence = IssueOccurrence(
id=uuid.uuid4().hex,
project_id=1,
event_id="asdf",
fingerprint=["asdf"],
issue_title="title",
subtitle="subtitle",
resource_id=None,
evidence_data={"detector_id": self.detector.id},
evidence_display=[],
type=MetricIssue,
detection_time=timezone.now(),
level="error",
culprit="",
)
self.group_event = GroupEvent.from_event(self.event, self.group)
def test_activity_update(self) -> None:
activity = Activity.objects.create(
project=self.project,
group=self.group,
type=ActivityType.SET_RESOLVED.value,
user_id=self.user.id,
)
event_data = WorkflowEventData(event=activity, group=self.group)
result = get_detectors_for_event(event_data, detector=self.detector)
assert result is not None
assert result.preferred_detector == self.detector
assert result.detectors == {self.issue_stream_detector, self.detector}
def test_error_event(self) -> None:
event_data = WorkflowEventData(event=self.group_event, group=self.group)
result = get_detectors_for_event(event_data)
assert result is not None
assert result.preferred_detector == self.error_detector
assert result.detectors == {self.issue_stream_detector, self.error_detector}
def test_metric_issue(self) -> None:
self.group_event.occurrence = self.occurrence
event_data = WorkflowEventData(event=self.group_event, group=self.group)
result = get_detectors_for_event(event_data)
assert result is not None
assert result.preferred_detector == self.detector
assert result.detectors == {self.issue_stream_detector, self.detector}
def test_event_without_detector(self) -> None:
occurrence = IssueOccurrence(
id=uuid.uuid4().hex,
project_id=1,
event_id="asdf",
fingerprint=["asdf"],
issue_title="title",
subtitle="subtitle",
resource_id=None,
evidence_data={}, # no detector id
evidence_display=[],
type=PerformanceNPlusOneAPICallsGroupType,
detection_time=timezone.now(),
level="error",
culprit="",
)
self.group_event.occurrence = occurrence
event_data = WorkflowEventData(event=self.group_event, group=self.group)
result = get_detectors_for_event(event_data)
assert result is not None
assert result.preferred_detector == self.issue_stream_detector
assert result.detectors == {self.issue_stream_detector}
def test_no_detectors(self) -> None:
self.issue_stream_detector.delete()
self.error_detector.delete()
event_data = WorkflowEventData(event=self.group_event, group=self.group)
result = get_detectors_for_event(event_data)
assert result is None
| TestGetDetectorsForEvent |
python | kamyu104__LeetCode-Solutions | Python/find-the-count-of-monotonic-pairs-i.py | {
"start": 1374,
"end": 2350
} | class ____(object):
def countOfPairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
dp = [int(i <= nums[0]) for i in xrange(max(nums)+1)] # dp[j]: numbers of arr1, which is of length i+1 and arr1[i] is j
for i in xrange(1, len(nums)):
# arr1[i-1] <= arr1[i]
# => arr1[i]-arr1[i-1] >= 0 (1)
#
# arr2[i-1] >= arr2[i]
# => nums[i-1]-arr1[i-1] >= nums[i]-arr1[i]
# => arr1[i]-arr1[i-1] >= nums[i]-nums[i-1] (2)
#
# (1)+(2): arr1[i]-arr1[i-1] >= max(nums[i]-nums[i-1], 0)
new_dp = [0]*len(dp)
diff = max(nums[i]-nums[i-1], 0)
for j in xrange(diff, nums[i]+1):
new_dp[j] = (new_dp[j-1]+dp[j-diff])%MOD
dp = new_dp
return reduce(lambda accu, x: (accu+x)%MOD, dp, 0)
# Time: O(n * r^2), r = max(nums)
# Space: O(r)
# dp
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/longest-balanced-substring-i.py | {
"start": 91,
"end": 638
} | class ____(object):
def longestBalanced(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
for i in xrange(len(s)):
cnt = collections.defaultdict(int)
mx = 0
for j in xrange(i, len(s)):
cnt[s[j]] += 1
mx = max(mx, cnt[s[j]])
if (j-i+1)%len(cnt) == 0 and (j-i+1)//len(cnt) == mx:
result = max(result, j-i+1)
return result
# Time: O(n * (26 + n))
# Space: O(26)
# freq table
| Solution |
python | getsentry__sentry | src/sentry/api/serializers/snuba.py | {
"start": 1631,
"end": 5292
} | class ____:
"""
Serializer for time-series Snuba data.
"""
def __init__(self, organization, lookup, user):
self.organization = organization
self.lookup = lookup
self.user = user
def get_attrs(self, item_list):
if self.lookup is None:
return item_list
return self.lookup.serializer(self.organization, item_list, self.user)
def serialize(
self,
result,
column="count",
order=None,
allow_partial_buckets=False,
zerofill_results=True,
extra_columns=None,
confidence_column="count",
):
data = [
(key, list(group))
for key, group in itertools.groupby(result.data["data"], key=lambda r: r["time"])
]
attrs = {}
if self.lookup:
attrs = self.get_attrs(
[value_from_row(r, self.lookup.columns) for _, v in data for r in v]
)
rv = []
for k, v in data:
row = []
for r in v:
item = {"count": r.get(column, 0)}
if extra_columns is not None:
for extra_column in extra_columns:
item[extra_column] = r.get(extra_column, 0)
if self.lookup:
value = value_from_row(r, self.lookup.columns)
item[self.lookup.name] = (attrs.get(value),)
row.append(item)
rv.append((k, row))
res = {
"data": (
zerofill(
rv,
result.start,
result.end,
result.rollup,
allow_partial_buckets=allow_partial_buckets,
)
if zerofill_results
else rv
)
}
confidence_values = []
# TODO: remove this once frontend starts using `accuracy` in `meta`
if "processed_timeseries" in result.data:
for key, group in itertools.groupby(
result.data["processed_timeseries"].confidence, key=lambda r: r["time"]
):
result_row = []
for confidence_row in group:
item = {confidence_column: confidence_row.get(column, None)}
if extra_columns is not None:
for extra_column in extra_columns:
item[extra_column] = confidence_row.get(extra_column, 0)
if self.lookup:
value = value_from_row(confidence_row, self.lookup.columns)
item[self.lookup.name] = (attrs.get(value),)
result_row.append(item)
confidence_values.append((key, result_row))
# confidence only comes from the RPC which already helps us zerofill by returning all buckets
res["confidence"] = confidence_values
if result.data.get("totals"):
res["totals"] = {"count": result.data["totals"][column]}
# If order is passed let that overwrite whats in data since its order for multi-axis
if order is not None:
res["order"] = order
elif "order" in result.data:
res["order"] = result.data["order"]
res["isMetricsData"] = result.data.get("isMetricsData", False)
if hasattr(result, "start") and hasattr(result, "end"):
timeframe = calculate_time_frame(result.start, result.end, result.rollup)
res["start"] = timeframe["start"]
res["end"] = timeframe["end"]
return res
| SnubaTSResultSerializer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1328110,
"end": 1328412
} | class ____(sgqlc.types.Type, ProjectV2ItemFieldValueCommon, Node):
"""The value of a date field in a Project item."""
__schema__ = github_schema
__field_names__ = ("date",)
date = sgqlc.types.Field(Date, graphql_name="date")
"""Date value for the field"""
| ProjectV2ItemFieldDateValue |
python | instagram__MonkeyType | tests/test_typing.py | {
"start": 27905,
"end": 29751
} | class ____:
class Base:
pass
class Intermediate(Base):
pass
class FirstDerived(Intermediate):
pass
class SecondDerived(Intermediate):
pass
class Unrelated:
pass
class MoreDerived(SecondDerived):
pass
@pytest.mark.parametrize(
'typ, expected',
[
(
Union[FirstDerived, SecondDerived],
Intermediate,
),
(
Union[FirstDerived, Base],
Base,
),
(
Union[FirstDerived, MoreDerived],
Intermediate,
),
(
Union[FirstDerived, Unrelated],
Union[FirstDerived, Unrelated],
),
])
def test_rewrite(self, typ, expected):
rewritten = RewriteMostSpecificCommonBase().rewrite(typ)
assert rewritten == expected
def test_multiple_bases(self):
class Base1:
pass
class Base2:
pass
class FirstDerived(Base1, Base2):
pass
class SecondDerived(Base1, Base2):
pass
typ = Union[FirstDerived, SecondDerived]
assert RewriteMostSpecificCommonBase().rewrite(typ) == typ
def test_multiple_bases_nontrivial(self):
class Base1:
pass
class Base2:
pass
class FirstDerived(Base1, Base2):
pass
class SecondDerived(Base1, Base2):
pass
class FirstDerived1(FirstDerived):
pass
class FirstDerived2(FirstDerived):
pass
typ = Union[FirstDerived1, FirstDerived2]
rewritten = RewriteMostSpecificCommonBase().rewrite(typ)
assert rewritten == FirstDerived
| TestRewriteMostSpecificCommonBase |
python | Pylons__pyramid | tests/test_security.py | {
"start": 9492,
"end": 9929
} | class ____(unittest.TestCase):
def setUp(self):
testing.setUp()
def tearDown(self):
testing.tearDown()
def test_identity_no_security_policy(self):
request = _makeRequest()
self.assertEqual(request.identity, None)
def test_identity(self):
request = _makeRequest()
_registerSecurityPolicy(request.registry, 'yo')
self.assertEqual(request.identity, 'yo')
| TestIdentity |
python | GoogleCloudPlatform__python-docs-samples | dataflow/conftest.py | {
"start": 11036,
"end": 30994
} | class ____:
uuid: str = UUID
project: str = PROJECT
region: str = REGION
@staticmethod
def hyphen_name(name: str) -> str:
unique_name = f"{name}-py{PYTHON_VERSION}-{UUID}"
return HYPHEN_NAME_RE.sub("-", unique_name)
@staticmethod
def underscore_name(name: str) -> str:
return UNDERSCORE_NAME_RE.sub("_", Utils.hyphen_name(name))
@staticmethod
def wait_until(
is_done: Callable[[], bool],
timeout_sec: int = TIMEOUT_SEC,
poll_interval_sec: int = POLL_INTERVAL_SEC,
) -> bool:
for _ in range(0, timeout_sec, poll_interval_sec):
if is_done():
return True
time.sleep(poll_interval_sec)
return False
@staticmethod
def storage_bucket(name: str) -> str:
if bucket_name := os.environ.get("GOOGLE_CLOUD_BUCKET"):
logging.warning(f"Using bucket from GOOGLE_CLOUD_BUCKET: {bucket_name}")
yield bucket_name
return # don't delete
from google.cloud import storage
storage_client = storage.Client()
bucket = storage_client.create_bucket(Utils.hyphen_name(name))
logging.info(f"Created storage_bucket: {bucket.name}")
yield bucket.name
# Print all the objects in the bucket before deleting for debugging.
logging.info(f"Deleting bucket {bucket.name} with the following contents:")
total_files = 0
total_size = 0
for blob in bucket.list_blobs():
logging.info(f" - {blob.name} ({blob.size} bytes)")
total_files += 1
total_size += blob.size
logging.info(f"Total {total_files} files ({total_size} bytes)")
bucket.delete(force=True)
logging.info(f"Deleted storage_bucket: {bucket.name}")
@staticmethod
def bigquery_dataset(
name: str,
project: str = PROJECT,
location: str = REGION,
) -> str:
from google.api_core.exceptions import NotFound
from google.cloud import bigquery
bigquery_client = bigquery.Client()
dataset_name = Utils.underscore_name(name)
dataset = bigquery.Dataset(f"{project}.{dataset_name}")
dataset.location = location
result = bigquery_client.create_dataset(dataset)
logging.info(f"Created bigquery_dataset: {result.full_dataset_id}")
yield result.dataset_id
try:
bigquery_client.delete_dataset(
f"{project}.{dataset_name}", delete_contents=True
)
logging.info(f"Deleted bigquery_dataset: {result.full_dataset_id}")
except NotFound:
logging.info(f"{result.full_dataset_id} already deleted.")
@staticmethod
def bigquery_table(
dataset_name: str, table_name: str, project: str = PROJECT, **kwargs
) -> str:
from google.cloud import bigquery
bigquery_client = bigquery.Client()
table = bigquery.Table(f"{project}.{dataset_name}.{table_name}", **kwargs)
result = bigquery_client.create_table(table)
logging.info(f"Created bigquery_table: {result.full_table_id}")
yield result.table_id
# This table will be deleted when the dataset is deleted.
@staticmethod
def bigquery_table_exists(
dataset_name: str, table_name: str, project: str = PROJECT
) -> bool:
from google.cloud import bigquery
from google.cloud.exceptions import NotFound
bigquery_client = bigquery.Client()
try:
bigquery_client.get_table(f"{project}.{dataset_name}.{table_name}")
return True
except NotFound:
return False
@staticmethod
def bigquery_query(query: str, region: str = REGION) -> Iterator[dict[str, Any]]:
from google.cloud import bigquery
bigquery_client = bigquery.Client()
logging.info(f"Bigquery query: {query}")
for row in bigquery_client.query(query, location=region):
yield dict(row)
@staticmethod
def pubsub_topic(name: str, project: str = PROJECT) -> str:
from google.cloud import pubsub
publisher_client = pubsub.PublisherClient()
topic_path = publisher_client.topic_path(project, Utils.hyphen_name(name))
topic = publisher_client.create_topic(request={"name": topic_path})
logging.info(f"Created pubsub_topic: {topic.name}")
yield topic.name
# Due to the pinned library dependencies in apache-beam, client
# library throws an error upon deletion.
# We use gcloud for a workaround. See also:
# https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4492
cmd = ["gcloud", "pubsub", "--project", project, "topics", "delete", topic.name]
logging.info(f"{cmd}")
subprocess.check_call(cmd)
logging.info(f"Deleted pubsub_topic: {topic.name}")
@staticmethod
def pubsub_subscription(
topic_path: str,
name: str,
project: str = PROJECT,
) -> str:
from google.cloud import pubsub
subscriber = pubsub.SubscriberClient()
subscription_path = subscriber.subscription_path(
project, Utils.hyphen_name(name)
)
subscription = subscriber.create_subscription(
request={"name": subscription_path, "topic": topic_path}
)
logging.info(f"Created pubsub_subscription: {subscription.name}")
yield subscription.name
# Due to the pinned library dependencies in apache-beam, client
# library throws an error upon deletion.
# We use gcloud for a workaround. See also:
# https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4492
cmd = [
"gcloud",
"pubsub",
"--project",
project,
"subscriptions",
"delete",
subscription.name,
]
logging.info(f"{cmd}")
subprocess.check_call(cmd)
logging.info(f"Deleted pubsub_subscription: {subscription.name}")
@staticmethod
def pubsub_publisher(
topic_path: str,
new_msg: Callable[[int], str] = lambda i: json.dumps(
{"id": i, "content": f"message {i}"}
),
sleep_sec: int = 1,
) -> bool:
from google.cloud import pubsub
def _infinite_publish_job() -> None:
publisher_client = pubsub.PublisherClient()
for i in itertools.count():
msg = new_msg(i)
publisher_client.publish(topic_path, msg.encode("utf-8")).result()
time.sleep(sleep_sec)
# Start a subprocess in the background to do the publishing.
logging.info(f"Starting publisher on {topic_path}")
p = mp.Process(target=_infinite_publish_job)
# We set the subprocess as a daemon so the main process doesn't wait for
# the subprocess to finish. Since this is an infinite loop, it will
# never finish, so it would cause the whole test to hang.
# Typically, `terminate` should stop the subprocess during the fixture
# cleanup phase, but we've had cases where the tests hang, most likely
# due to concurrency issues with pytest running in parallel.
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Process.daemon
p.daemon = True
p.start()
yield p.is_alive()
# For cleanup, terminate the background process.
logging.info("Stopping publisher")
p.terminate()
@staticmethod
def cloud_build_submit(
image_name: str | None = None,
config: str | None = None,
source: str = ".",
substitutions: dict[str, str] | None = None,
project: str = PROJECT,
) -> None:
"""Sends a Cloud Build job, if an image_name is provided it will be deleted at teardown."""
cmd = ["gcloud", "auth", "configure-docker"]
logging.info(f"{cmd}")
subprocess.check_call(cmd)
gcr_project = project.replace(':', '/')
if substitutions:
cmd_substitutions = [
f"--substitutions={','.join([k + '=' + v for k, v in substitutions.items()])}"
]
else:
cmd_substitutions = []
if config:
try:
with open(config) as f:
cmd = [
"gcloud",
"builds",
"submit",
f"--project={project}",
f"--config={config}",
*cmd_substitutions,
source,
]
logging.info(f"{cmd}")
subprocess.check_call(cmd)
logging.info(f"Cloud build finished successfully: {config}")
yield f.read()
except Exception as e:
logging.exception(e)
logging.warning(f"Current directory: {os.getcwd()}")
yield config
elif image_name:
cmd = [
"gcloud",
"builds",
"submit",
f"--project={project}",
f"--tag=gcr.io/{gcr_project}/{image_name}:{UUID}",
*cmd_substitutions,
source,
]
logging.info(f"{cmd}")
subprocess.check_call(cmd)
logging.info(
f"Created image: gcr.io/{gcr_project}/{image_name}:{UUID}")
yield f"{image_name}:{UUID}"
else:
raise ValueError("must specify either `config` or `image_name`")
if image_name:
cmd = [
"gcloud",
"container",
"images",
"delete",
f"gcr.io/{gcr_project}/{image_name}:{UUID}",
f"--project={project}",
"--force-delete-tags",
"--quiet",
]
logging.info(f"{cmd}")
subprocess.check_call(cmd)
logging.info(
f"Deleted image: gcr.io/{gcr_project}/{image_name}:{UUID}")
@staticmethod
def dataflow_job_url(
job_id: str,
project: str = PROJECT,
region: str = REGION,
) -> str:
return f"https://console.cloud.google.com/dataflow/jobs/{region}/{job_id}?project={project}"
@staticmethod
def dataflow_jobs_list(
project: str = PROJECT, page_size: int = 30
) -> Iterator[dict]:
from googleapiclient.discovery import build
dataflow = build("dataflow", "v1b3")
response = {"nextPageToken": None}
while "nextPageToken" in response:
# For more info see:
# https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs/list
request = (
dataflow.projects()
.jobs()
.list(
projectId=project,
pageToken=response["nextPageToken"],
pageSize=page_size,
)
)
response = request.execute()
for job in response["jobs"]:
yield job
@staticmethod
def dataflow_job_id(
job_name: str, project: str = PROJECT, list_page_size: int = LIST_PAGE_SIZE
) -> str:
for job in Utils.dataflow_jobs_list(project, list_page_size):
if job["name"] == job_name:
logging.info(f"Found Dataflow job: {job}")
return job["id"]
raise ValueError(f"Dataflow job not found: job_name={job_name}")
@staticmethod
def dataflow_jobs_get(job_id: str, project: str = PROJECT) -> dict[str, Any]:
from googleapiclient.discovery import build
dataflow = build("dataflow", "v1b3")
# For more info see:
# https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs/get
request = (
dataflow.projects()
.jobs()
.get(
projectId=project,
jobId=job_id,
view="JOB_VIEW_SUMMARY",
)
)
# If the job is not found, this throws an HttpError exception.
return request.execute()
@staticmethod
def dataflow_jobs_wait(
job_id: str,
project: str = PROJECT,
region: str = REGION,
target_states: set[str] = {"JOB_STATE_DONE"},
timeout_sec: int = TIMEOUT_SEC,
poll_interval_sec: int = POLL_INTERVAL_SEC,
) -> str | None:
"""For a list of all the valid states:
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState
"""
finish_states = {
"JOB_STATE_DONE",
"JOB_STATE_FAILED",
"JOB_STATE_CANCELLED",
"JOB_STATE_DRAINED",
}
logging.info(
f"Waiting for Dataflow job {job_id} until {target_states}\n"
+ Utils.dataflow_job_url(job_id, project, region)
)
def job_is_done() -> bool:
try:
job = Utils.dataflow_jobs_get(job_id, project)
state = job["currentState"]
if state in target_states:
logging.info(f"Dataflow job found with state {state}")
return True
elif state in finish_states:
raise RuntimeError(
f"Dataflow job finished with state {state}, but we were expecting {target_states}\n"
+ Utils.dataflow_job_url(job_id, project, region)
)
return False
except Exception as e:
logging.exception(e)
return False
Utils.wait_until(job_is_done, timeout_sec, poll_interval_sec)
assert job_is_done(), (
f"Dataflow job is not done after {timeout_sec} seconds\n"
+ Utils.dataflow_job_url(job_id, project, region)
)
@staticmethod
def dataflow_jobs_cancel(
job_id: str,
drain: bool = False,
project: str = PROJECT,
region: str = REGION,
) -> None:
logging.info(f"Cancelling Dataflow job ID: {job_id}")
# We get an error using the googleapiclient.discovery APIs, probably
# due to incompatible dependencies with apache-beam.
# We use gcloud instead to cancel the job.
if drain:
# https://cloud.google.com/sdk/gcloud/reference/dataflow/jobs/drain
cmd = [
"gcloud",
f"--project={project}",
"dataflow",
"jobs",
"drain",
job_id,
f"--region={region}",
]
logging.info(f"{cmd}")
subprocess.check_call(cmd)
# After draining the job, we must wait until the job has actually finished.
Utils.dataflow_jobs_wait(
job_id,
target_states={
"JOB_STATE_DONE",
"JOB_STATE_FAILED",
"JOB_STATE_CANCELLED",
"JOB_STATE_DRAINED",
},
project=project,
region=region,
)
else:
# https://cloud.google.com/sdk/gcloud/reference/dataflow/jobs/cancel
cmd = [
"gcloud",
f"--project={project}",
"dataflow",
"jobs",
"cancel",
job_id,
f"--region={region}",
]
logging.info(f"{cmd}")
subprocess.check_call(cmd)
logging.info(f"Cancelled Dataflow job: {job_id}")
@staticmethod
def dataflow_flex_template_build(
bucket_name: str,
image_name: str,
metadata_file: str | None = "metadata.json",
template_file: str = "template.json",
project: str = PROJECT,
) -> str:
# https://cloud.google.com/sdk/gcloud/reference/dataflow/flex-template/build
template_gcs_path = f"gs://{bucket_name}/{template_file}"
gcr_project = project.replace(':', '/')
cmd = [
"gcloud",
"dataflow",
"flex-template",
"build",
template_gcs_path,
f"--project={project}",
f"--image=gcr.io/{gcr_project}/{image_name}",
"--sdk-language=PYTHON"
]
if metadata_file:
cmd.append(f"--metadata-file={metadata_file}")
logging.info(f"{cmd}")
subprocess.check_call(cmd)
logging.info(f"dataflow_flex_template_build: {template_gcs_path}")
yield template_gcs_path
# The template file gets deleted when we delete the bucket.
@staticmethod
def dataflow_flex_template_run(
job_name: str,
template_path: str,
bucket_name: str,
parameters: dict[str, str] = {},
project: str = PROJECT,
region: str = REGION,
additional_experiments: dict[str,str] = {},
) -> str:
import yaml
# https://cloud.google.com/sdk/gcloud/reference/dataflow/flex-template/run
unique_job_name = Utils.hyphen_name(job_name)
logging.info(f"dataflow_job_name: {unique_job_name}")
cmd = [
"gcloud",
"dataflow",
"flex-template",
"run",
unique_job_name,
f"--template-file-gcs-location={template_path}",
f"--project={project}",
f"--region={region}",
f"--staging-location=gs://{bucket_name}/staging",
] + [
f"--parameters={name}={value}"
for name, value in {
**parameters,
}.items()
] + [
f"--additional-experiments={name}={value}"
for name, value in {
**additional_experiments,
}.items()
]
logging.info(f"{cmd}")
stdout = subprocess.check_output(cmd).decode("utf-8")
logging.info(f"Launched Dataflow Flex Template job: {unique_job_name}")
job_id = yaml.safe_load(stdout)["job"]["id"]
logging.info(f"Dataflow Flex Template job id: {job_id}")
logging.info(f">> {Utils.dataflow_job_url(job_id, project, region)}")
yield job_id
Utils.dataflow_jobs_cancel(job_id, region=region)
@staticmethod
def dataflow_extensible_template_run(
job_name: str,
template_path: str,
bucket_name: str,
parameters: dict[str, str] = {},
project: str = PROJECT,
region: str = REGION,
) -> str:
import yaml
unique_job_name = Utils.hyphen_name(job_name)
logging.info(f"dataflow_job_name: {unique_job_name}")
cmd = [
"gcloud",
"dataflow",
"jobs",
"run",
unique_job_name,
f"--gcs-location={template_path}",
f"--project={project}",
f"--region={region}",
] + [
f"--parameters={name}={value}"
for name, value in {
**parameters,
}.items()
]
logging.info(cmd)
stdout = subprocess.check_output(cmd).decode("utf-8")
logging.info(f"Launched Dataflow Template job: {unique_job_name}")
job_id = yaml.safe_load(stdout)["id"]
logging.info(f"Dataflow Template job id: {job_id}")
logging.info(f">> {Utils.dataflow_job_url(job_id, project, region)}")
yield job_id
Utils.dataflow_jobs_cancel(job_id)
@pytest.fixture(scope="session")
def utils() -> Utils:
logging.getLogger().setLevel(logging.INFO)
logging.info(f"Test unique identifier: {UUID}")
subprocess.check_call(["gcloud", "version"])
return Utils()
| Utils |
python | google__pytype | pytype/rewrite/stack_test.py | {
"start": 96,
"end": 2998
} | class ____(test_utils.ContextfulTestBase):
def _var(self, val):
return self.ctx.consts[val].to_variable()
def test_push(self):
s = stack.DataStack()
var = self._var(5)
s.push(var)
self.assertEqual(s._stack, [var])
def test_pop(self):
s = stack.DataStack()
var = self._var(5)
var = self.ctx.consts[5].to_variable()
s.push(var)
popped = s.pop()
self.assertEqual(popped, var)
self.assertFalse(s._stack)
def test_top(self):
s = stack.DataStack()
var = self._var(5)
s.push(var)
top = s.top()
self.assertEqual(top, var)
self.assertEqual(s._stack, [var])
def test_bool(self):
s = stack.DataStack()
self.assertFalse(s)
s.push(self._var(5))
self.assertTrue(s)
def test_len(self):
s = stack.DataStack()
self.assertEqual(len(s), 0) # pylint: disable=g-generic-assert
s.push(self._var(5))
self.assertEqual(len(s), 1)
def test_popn(self):
s = stack.DataStack()
var1 = self._var(1)
var2 = self._var(2)
s.push(var1)
s.push(var2)
popped1, popped2 = s.popn(2)
self.assertEqual(popped1, var1)
self.assertEqual(popped2, var2)
self.assertFalse(s)
def test_popn_zero(self):
s = stack.DataStack()
popped = s.popn(0)
self.assertFalse(popped)
def test_popn_too_many(self):
s = stack.DataStack()
with self.assertRaises(IndexError):
s.popn(1)
def test_pop_and_discard(self):
s = stack.DataStack()
s.push(self._var(5))
ret = s.pop_and_discard()
self.assertIsNone(ret)
self.assertFalse(s)
def test_peek(self):
s = stack.DataStack()
var = self._var(5)
s.push(var)
peeked = s.peek(1)
self.assertEqual(peeked, var)
self.assertEqual(len(s), 1)
def test_peek_error(self):
s = stack.DataStack()
for n in (0, 1):
with self.subTest(n=n):
with self.assertRaises(IndexError):
s.peek(n)
def test_replace(self):
s = stack.DataStack()
s.push(self._var(5))
s.replace(1, self._var(3))
self.assertEqual(s.top(), self._var(3))
def test_replace_error(self):
s = stack.DataStack()
s.push(self._var(5))
for n in (0, 2):
with self.subTest(n=n):
with self.assertRaises(IndexError):
s.replace(n, self._var(3))
def test_rotn(self):
s = stack.DataStack()
data = [self._var(x) for x in (0, 1, 2, 3, 4, 5)]
for d in data:
s.push(d)
s.rotn(3)
new = [data[x] for x in (0, 1, 2, 5, 3, 4)]
self.assertEqual(s._stack, new)
def test_rotn_error(self):
s = stack.DataStack()
data = [self._var(x) for x in (0, 1, 2, 3, 4, 5)]
for d in data:
s.push(d)
with self.assertRaises(IndexError):
s.rotn(0)
with self.assertRaises(IndexError):
s.rotn(1)
with self.assertRaises(IndexError):
s.rotn(8)
if __name__ == '__main__':
unittest.main()
| DataStackTest |
python | sdispater__pendulum | src/pendulum/tz/exceptions.py | {
"start": 430,
"end": 636
} | class ____(TimezoneError):
message = "The datetime {} is ambiguous."
def __init__(self, dt: datetime) -> None:
message = self.message.format(dt)
super().__init__(message)
| AmbiguousTime |
python | pandas-dev__pandas | asv_bench/benchmarks/multiindex_object.py | {
"start": 5740,
"end": 6148
} | class ____:
def setup(self):
self.df = DataFrame(
{
"a": np.arange(1_000_000, dtype=np.int32),
"b": np.arange(1_000_000, dtype=np.int64),
"c": np.arange(1_000_000, dtype=float),
}
).astype({"a": "category", "b": "category"})
def time_categorical_level(self):
self.df.set_index(["a", "b"])
| CategoricalLevel |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/input_percentage_width.py | {
"start": 112,
"end": 753
} | class ____(App[None]):
CSS = """
Screen > *, Screen > *:focus {
width: 50%;
height: 1fr;
border: solid red;
}
App #ruler {
width: 1fr;
height: 1;
border: none;
}
"""
def compose(self) -> ComposeResult:
yield Label("[reverse]0123456789[/]0123456789" * 4, id="ruler")
input = Input()
input.cursor_blink = False
yield input
text_area = TextArea()
text_area.cursor_blink = False
yield text_area
yield Static()
yield Button()
if __name__ == "__main__":
InputVsTextArea().run()
| InputVsTextArea |
python | ray-project__ray | python/ray/llm/_internal/serve/serving_patterns/prefill_decode/builder.py | {
"start": 1396,
"end": 5908
} | class ____(BaseModelExtended):
"""Schema for P/D serving args."""
prefill_config: Union[str, dict, LLMConfig]
decode_config: Union[str, dict, LLMConfig]
proxy_cls_config: Union[dict, ProxyClsConfig] = Field(
default_factory=ProxyClsConfig,
description="The configuration for the proxy class.",
)
proxy_deployment_config: Optional[dict] = Field(
default_factory=dict,
description="The Ray @server.deployment options for the proxy server.",
)
ingress_cls_config: Union[dict, IngressClsConfig] = Field(
default_factory=IngressClsConfig,
description="The configuration for the ingress class.",
)
ingress_deployment_config: Optional[dict] = Field(
default_factory=dict,
description="The Ray @server.deployment options for the ingress.",
)
@field_validator("prefill_config", "decode_config")
@classmethod
def _validate_llm_config(cls, value: Any) -> LLMConfig:
if isinstance(value, str):
return LLMConfig.from_file(value)
elif isinstance(value, dict):
return LLMConfig.model_validate(value)
elif isinstance(value, LLMConfig):
return value
else:
raise TypeError(f"Invalid LLMConfig type: {type(value)}")
@field_validator("proxy_cls_config")
@classmethod
def _validate_proxy_cls_config(
cls, value: Union[dict, ProxyClsConfig]
) -> ProxyClsConfig:
if isinstance(value, dict):
return ProxyClsConfig.model_validate(value)
return value
@field_validator("ingress_cls_config")
@classmethod
def _validate_ingress_cls_config(
cls, value: Union[dict, IngressClsConfig]
) -> IngressClsConfig:
if isinstance(value, dict):
return IngressClsConfig.model_validate(value)
return value
@model_validator(mode="after")
def _validate_model_ids(self):
"""Validate that prefill and decode configs use the same model ID."""
if self.prefill_config.model_id != self.decode_config.model_id:
raise ValueError("P/D model id mismatch")
return self
@model_validator(mode="after")
def _validate_kv_transfer_config(self):
"""Validate that kv_transfer_config is set for both prefill and decode configs."""
for config in [self.prefill_config, self.decode_config]:
if config.engine_kwargs.get("kv_transfer_config") is None:
raise ValueError(
"kv_transfer_config is required for P/D disaggregation"
)
return self
def build_pd_openai_app(pd_serving_args: dict) -> Application:
"""Build a deployable application utilizing prefill/decode disaggregation."""
pd_config = PDServingArgs.model_validate(pd_serving_args)
prefill_deployment = build_llm_deployment(
pd_config.prefill_config, name_prefix="Prefill:"
)
decode_deployment = build_llm_deployment(
pd_config.decode_config, name_prefix="Decode:"
)
# Get the default deployment options from the PDProxyServer class based on the prefill and decode configs.
proxy_cls_config = pd_config.proxy_cls_config
pd_proxy_server_options = proxy_cls_config.proxy_cls.get_deployment_options(
pd_config.prefill_config, pd_config.decode_config
)
# Override if the proxy deployment config is provided.
if pd_config.proxy_deployment_config:
pd_proxy_server_options = deep_merge_dicts(
pd_proxy_server_options, pd_config.proxy_deployment_config
)
proxy_server_deployment = (
serve.deployment(proxy_cls_config.proxy_cls)
.options(**pd_proxy_server_options)
.bind(
prefill_server=prefill_deployment,
decode_server=decode_deployment,
**proxy_cls_config.proxy_extra_kwargs,
)
)
ingress_cls_config = pd_config.ingress_cls_config
ingress_options = ingress_cls_config.ingress_cls.get_deployment_options(
[pd_config.prefill_config, pd_config.decode_config]
)
if pd_config.ingress_deployment_config:
ingress_options = deep_merge_dicts(
ingress_options, pd_config.ingress_deployment_config
)
ingress_cls = make_fastapi_ingress(ingress_cls_config.ingress_cls)
return serve.deployment(ingress_cls, **ingress_options).bind(
llm_deployments=[proxy_server_deployment],
**ingress_cls_config.ingress_extra_kwargs,
)
| PDServingArgs |
python | google__pytype | pytype/tests/test_generic2.py | {
"start": 28457,
"end": 40417
} | class ____(test_base.BaseTest):
"""Tests for User-defined Generic Type."""
def test_type_parameter_duplicated(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import Generic, Dict
T = TypeVar("T")
class A(Dict[T, T], Generic[T]): pass
""",
)
ty = self.Infer(
"""
import a
def f():
x = a.A()
x[1] = 2
return x
d = None # type: a.A[int]
ks, vs = d.keys(), d.values()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
d = ... # type: a.A[int]
ks = ... # type: dict_keys[int]
vs = ... # type: dict_values[int]
def f() -> a.A[int]: ...
""",
)
def test_typevar_under_decorator(self):
self.Check("""
import abc
from typing import Generic, Tuple, TypeVar
T = TypeVar('T')
class Foo(abc.ABC, Generic[T]):
@abc.abstractmethod
def parse(self) -> Tuple[T]:
raise NotImplementedError()
""")
def test_typevar_in_class_attribute(self):
ty = self.Infer("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
x: T
x = Foo[int]().x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
x: T
x: int
""",
)
def test_bad_typevar_in_class_attribute(self):
errors = self.CheckWithErrors("""
from typing import Generic, TypeVar
T1 = TypeVar('T1')
T2 = TypeVar('T2')
class Foo(Generic[T1]):
x: T2 # invalid-annotation[e]
""")
self.assertErrorRegexes(
errors, {"e": r"TypeVar\(s\) 'T2' not in scope for class 'Foo'"}
)
def test_typevar_in_instance_attribute(self):
ty = self.Infer("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def __init__(self, x, y):
self.x: T = x
self.y = y # type: T
foo = Foo[int](__any_object__, __any_object__)
x, y = foo.x, foo.y
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
x: T
y: T
def __init__(self, x, y) -> None: ...
foo: Foo[int]
x: int
y: int
""",
)
def test_bad_typevar_in_instance_attribute(self):
errors = self.CheckWithErrors("""
from typing import Generic, TypeVar
T1 = TypeVar('T1')
T2 = TypeVar('T2')
class Foo(Generic[T1]):
def __init__(self, x, y):
self.x: T2 = x # invalid-annotation[e1]
self.y = y # type: T2 # invalid-annotation[e2]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"TypeVar\(s\) 'T2' not in scope for class 'Foo'",
"e2": r"TypeVar\(s\) 'T2' not in scope for class 'Foo'",
},
)
def test_typevar_in_classmethod(self):
self.Check("""
from typing import Generic, TypeVar
T = TypeVar('T')
class X(Generic[T]):
@classmethod
def f(cls, x: T) -> T:
y: T = x
return y
""")
def test_reingest_generic(self):
foo = self.Infer("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def __init__(self, x: T):
self.x = x
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo))
ty = self.Infer(
"""
import foo
x1 = foo.Foo(0).x
x2 = foo.Foo[str](__any_object__).x
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
x1: int
x2: str
""",
)
def test_inherit_from_nested_generic(self):
ty = self.Infer("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo:
class Bar(Generic[T]):
pass
class Baz(Bar[T]):
pass
class Qux(Foo.Bar[T]):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo:
class Bar(Generic[T]): ...
class Baz(Foo.Bar[T]): ...
class Qux(Foo.Bar[T]): ...
""",
)
def test_mutation_to_unknown(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Generic, TypeVar, overload
T1 = TypeVar('T1')
T2 = TypeVar('T2')
class A(Generic[T1, T2]):
@overload
def f(self, x: str) -> None:
self = A[bytes, T2]
@overload
def f(self, x: int) -> None:
self = A[float, T2]
""",
)]):
self.Check("""
import foo
from typing import Any
a = foo.A[int, int]()
a.f(__any_object__)
assert_type(a, foo.A[Any, int])
""")
def test_invalid_mutation(self):
with self.DepTree([
(
"_typing.pyi",
"""
from typing import Any
NDArray: Any
""",
),
(
"my_numpy.pyi",
"""
from _typing import NDArray
from typing import Any, Generic, TypeVar
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
class ndarray(Generic[_T1, _T2]):
def __getitem__(self: NDArray[Any], key: str) -> NDArray[Any]: ...
""",
),
]):
err = self.CheckWithErrors("""
import my_numpy as np
def aggregate_on_columns(matrix: np.ndarray):
matrix = matrix[None, :] # invalid-signature-mutation[e]
""")
self.assertErrorSequences(
err, {"e": ["ndarray.__getitem__", "self = Any"]}
)
def test_class_name_prefix(self):
ty = self.Infer("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Alpha(Generic[T]):
def __init__(self, x: T):
pass
class Alphabet(Alpha[str]):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class Alpha(Generic[T]):
def __init__(self, x: T):
self = Alpha[T]
class Alphabet(Alpha[str]): ...
""",
)
def test_inherit_generic_namedtuple(self):
self.Check("""
from typing import AnyStr, Generic, NamedTuple
class Base(NamedTuple, Generic[AnyStr]):
x: AnyStr
class Child(Base[str]):
pass
c: Child
assert_type(c.x, str)
""")
def test_inherit_generic_namedtuple_pyi(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import AnyStr, Generic, NamedTuple
class Base(NamedTuple, Generic[AnyStr]):
x: AnyStr
class Child(Base[str]): ...
""",
)]):
self.Check("""
import foo
c: foo.Child
assert_type(c.x, str)
""")
def test_generic_signature(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Generic, TypeVar, Union
T = TypeVar('T', bound=Union[int, str])
class A(Generic[T]):
def f(self, x: T): ...
""",
)]):
self.Check("""
import foo
class B(foo.A[str]):
def f(self, x: str):
pass
""")
def test_classmethod(self):
self.Check("""
from typing import Generic, Type, TypeVar
T = TypeVar('T')
class X(Generic[T]):
@classmethod
def f(cls) -> Type[T]:
return __any_object__
class Y(X[str]):
pass
assert_type(Y.f(), Type[str])
assert_type(Y().f(), Type[str])
""")
def test_classmethod_pyi(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class X(Generic[T]):
@classmethod
def f(cls) -> type[T]: ...
""",
)]):
self.Check("""
import foo
from typing import Type
class Y(foo.X[str]):
pass
assert_type(Y.f(), Type[str])
assert_type(Y().f(), Type[str])
""")
def test_classmethod_reingest(self):
with self.DepTree([(
"foo.py",
"""
from typing import Generic, Type, TypeVar
T = TypeVar('T')
class X(Generic[T]):
@classmethod
def f(cls) -> Type[T]:
return __any_object__
""",
)]):
self.Check("""
import foo
from typing import Type
class Y(foo.X[str]):
pass
assert_type(Y.f(), Type[str])
assert_type(Y().f(), Type[str])
""")
def test_annotated_cls(self):
self.Check("""
from typing import Generic, Type, TypeVar
T = TypeVar('T', int, str)
class A(Generic[T]):
@classmethod
def f(cls: Type['A[T]'], x: T) -> T:
return x
def f() -> str:
return A.f('')
""")
@test_base.skip("TODO(b/297390011): Support this.")
def test_annotated_cls_pyi(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Generic, Type, TypeVar
T = TypeVar('T', int, str)
class A(Generic[T]):
@classmethod
def f(cls: Type[A[T]], x: T) -> T: ...
""",
)]):
self.Check("""
import foo
def f() -> str:
return foo.A.f('')
""")
def test_generic_staticmethod(self):
# Regression test for a crash caused by
# InterpreterClass.update_method_type_params treating static methods as
# instance methods.
self.Check("""
from typing import Any, Callable, Generic, TypeVar, Union
T = TypeVar('T')
class Expr(Generic[T]):
def __call__(self, *args: Any, **kwargs: Any) -> T:
return __any_object__
@staticmethod
def make_unbound(
init: Union[Callable[..., T], 'Expr[T]'],
) -> 'Expr[T]':
return Expr()
def expr_var(initial_expr: Expr[T]) -> Expr[T]:
return Expr.make_unbound(init=initial_expr)
""")
def test_inherit_from_generic_class_with_generic_instance_method(self):
ty = self.Infer("""
from typing import Generic, TypeVar
T = TypeVar('T')
class Base(Generic[T]):
def __init__(self, x: T):
self.x: T = x
class Child(Base[bool]):
pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class Base(Generic[T]):
x: T
def __init__(self, x: T) -> None:
self = Base[T]
class Child(Base[bool]):
x: bool
""",
)
def test_use_super_with_mismatching_generic_type(self):
# Check that this doesn't crash. The type of `base` is declared incorrectly,
# but the following line used to cause a crash despite having the #ignore.
ty = self.Infer("""
from typing import Generic, List, Optional, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def __init__(self, default: Optional[T]):
pass
class MultiFoo(Generic[T], Foo[List[T]]):
def test(self) -> None:
base: Foo[T]
base = super(MultiFoo, self) # type: ignore[assignment]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generic, List, Optional, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def __init__(self, default: Optional[T]) -> None:
self = Foo[T]
class MultiFoo(Generic[T], Foo[List[T]]):
def test(self) -> None: ...
""",
)
if __name__ == "__main__":
test_base.main()
| GenericFeatureTest |
python | pytorch__pytorch | test/inductor/test_provenance_tracing.py | {
"start": 20303,
"end": 20603
} | class ____(logging.Filter):
def filter(self, record):
if "artifact" in record.metadata:
return (
record.metadata["artifact"]["name"]
== "inductor_provenance_tracking_kernel_stack_traces"
)
return False
| ProvenanceArtifactFilter |
python | realpython__materials | python-mutable-immutable/person.py | {
"start": 71,
"end": 329
} | class ____(Person):
def __init__(self, name, major):
super().__init__(name)
self.major = major
john = Student("John", "Computer Science")
print(type(john))
john.__class__ = Person
print(john.name)
print(john.major)
print(type(john))
| Student |
python | mlflow__mlflow | mlflow/pyfunc/model.py | {
"start": 56486,
"end": 61469
} | class ____:
"""
Wrapper class that creates a predict function such that
predict(model_input: pd.DataFrame) -> model's output as pd.DataFrame (pandas DataFrame)
"""
def __init__(self, python_model: PythonModel, context, signature):
"""
Args:
python_model: An instance of a subclass of :class:`~PythonModel`.
context: A :class:`~PythonModelContext` instance containing artifacts that
``python_model`` may use when performing inference.
signature: :class:`~ModelSignature` instance describing model input and output.
"""
self.python_model = python_model
self.context = context
self.signature = signature
def _convert_input(self, model_input):
hints = self.python_model.predict_type_hints
# we still need this for backwards compatibility
if isinstance(model_input, pd.DataFrame):
if _is_list_str(hints.input):
first_string_column = _get_first_string_column(model_input)
if first_string_column is None:
raise MlflowException.invalid_parameter_value(
"Expected model input to contain at least one string column"
)
return model_input[first_string_column].tolist()
elif _is_list_dict_str(hints.input):
if (
len(self.signature.inputs) == 1
and next(iter(self.signature.inputs)).name is None
):
if first_string_column := _get_first_string_column(model_input):
return model_input[[first_string_column]].to_dict(orient="records")
if len(model_input.columns) == 1:
return model_input.to_dict("list")[0]
return model_input.to_dict(orient="records")
elif isinstance(hints.input, type) and (
issubclass(hints.input, ChatCompletionRequest)
or issubclass(hints.input, SplitChatMessagesRequest)
):
# If the type hint is a RAG dataclass, we hydrate it
# If there are multiple rows, we should throw
if len(model_input) > 1:
raise MlflowException(
"Expected a single input for dataclass type hint, but got multiple rows"
)
# Since single input is expected, we take the first row
return _hydrate_dataclass(hints.input, model_input.iloc[0])
return model_input
def predict(self, model_input, params: dict[str, Any] | None = None):
"""
Args:
model_input: Model input data as one of dict, str, bool, bytes, float, int, str type.
params: Additional parameters to pass to the model for inference.
Returns:
Model predictions as an iterator of chunks. The chunks in the iterator must be type of
dict or string. Chunk dict fields are determined by the model implementation.
"""
parameters = inspect.signature(self.python_model.predict).parameters
kwargs = {}
if "params" in parameters:
kwargs["params"] = params
else:
_log_warning_if_params_not_in_predict_signature(_logger, params)
if _is_context_in_predict_function_signature(parameters=parameters):
return self.python_model.predict(
self.context, self._convert_input(model_input), **kwargs
)
else:
return self.python_model.predict(self._convert_input(model_input), **kwargs)
def predict_stream(self, model_input, params: dict[str, Any] | None = None):
"""
Args:
model_input: LLM Model single input.
params: Additional parameters to pass to the model for inference.
Returns:
Streaming predictions.
"""
parameters = inspect.signature(self.python_model.predict_stream).parameters
kwargs = {}
if "params" in parameters:
kwargs["params"] = params
else:
_log_warning_if_params_not_in_predict_signature(_logger, params)
if _is_context_in_predict_function_signature(parameters=parameters):
return self.python_model.predict_stream(
self.context, self._convert_input(model_input), **kwargs
)
else:
return self.python_model.predict_stream(self._convert_input(model_input), **kwargs)
def _get_pyfunc_loader_module(python_model):
if isinstance(python_model, ChatModel):
return mlflow.pyfunc.loaders.chat_model.__name__
elif isinstance(python_model, ChatAgent):
return mlflow.pyfunc.loaders.chat_agent.__name__
elif isinstance(python_model, ResponsesAgent):
return mlflow.pyfunc.loaders.responses_agent.__name__
return __name__
| _PythonModelPyfuncWrapper |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 259558,
"end": 268709
} | class ____:
# fitting assumes continuous parameters
skip = ['ncf', 'ksone', 'kstwo', 'irwinhall']
def setup_method(self):
self.rng = np.random.default_rng(4522425749)
# skip these b/c deprecated, or only loc and scale arguments
fitSkipNonFinite = ['expon', 'norm', 'uniform', 'irwinhall']
@pytest.mark.parametrize('dist,args', distcont)
def test_fit_w_non_finite_data_values(self, dist, args):
"""gh-10300"""
if dist in self.fitSkipNonFinite:
pytest.skip(f"{dist} fit known to fail or deprecated")
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
y = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
distfunc = getattr(stats, dist)
assert_raises(ValueError, distfunc.fit, x, fscale=1)
assert_raises(ValueError, distfunc.fit, y, fscale=1)
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20, random_state=self.rng)
expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean())
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[expected_shape, 0, 20], atol=1e-8)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_expon_fit(self):
x = np.array([2, 2, 4, 4, 4, 4, 4, 8])
loc, scale = stats.expon.fit(x)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 2) # x.mean() - x.min()
loc, scale = stats.expon.fit(x, fscale=3)
assert_equal(loc, 2) # x.min()
assert_equal(scale, 3) # fscale
loc, scale = stats.expon.fit(x, floc=0)
assert_equal(loc, 0) # floc
assert_equal(scale, 4) # x.mean() - loc
def test_lognorm_fit(self):
x = np.array([1.5, 3, 10, 15, 23, 59])
lnxm1 = np.log(x - 1)
shape, loc, scale = stats.lognorm.fit(x, floc=1)
assert_allclose(shape, lnxm1.std(), rtol=1e-12)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6)
assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()),
rtol=1e-12)
assert_equal(loc, 1)
assert_equal(scale, 6)
shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75)
assert_equal(shape, 0.75)
assert_equal(loc, 1)
assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12)
def test_uniform_fit(self):
x = np.array([1.0, 1.1, 1.2, 9.0])
loc, scale = stats.uniform.fit(x)
assert_equal(loc, x.min())
assert_equal(scale, np.ptp(x))
loc, scale = stats.uniform.fit(x, floc=0)
assert_equal(loc, 0)
assert_equal(scale, x.max())
loc, scale = stats.uniform.fit(x, fscale=10)
assert_equal(loc, 0)
assert_equal(scale, 10)
assert_raises(ValueError, stats.uniform.fit, x, floc=2.0)
assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0)
@pytest.mark.xslow
@pytest.mark.parametrize("method", ["MLE", "MM"])
def test_fshapes(self, method):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=self.rng)
res_1 = stats.beta.fit(x, f0=3., method=method)
res_2 = stats.beta.fit(x, fa=3., method=method)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3., method=method)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4., method=method)
res_4 = stats.beta.fit(x, fb=4., method=method)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4., method=method)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2, method=method)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3, method=method)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1, method=method)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100, random_state=self.rng)
aa, ll, ss = stats.gamma.fit(data, fa=a, method=method)
assert_equal(aa, a)
@pytest.mark.parametrize("method", ["MLE", "MM"])
def test_extra_params(self, method):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100, random_state=self.rng)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct, method=method)
| TestFitMethod |
python | PyCQA__pylint | tests/functional/a/arguments.py | {
"start": 3527,
"end": 3900
} | class ____:
""" lambda needs Test instance as first argument """
lam = lambda self, icon: (self, icon)
def test(self):
self.lam(42)
self.lam() # [no-value-for-parameter]
self.lam(1, 2, 3) # [too-many-function-args]
Test().lam() # [no-value-for-parameter]
# Don't emit a redundant-keyword-arg for this example,
# it's perfectly valid
| Test |
python | python__mypy | mypy/nodes.py | {
"start": 99054,
"end": 99430
} | class ____(Expression):
"""Ducktype class decorator expression _promote(...)."""
__slots__ = ("type",)
type: mypy.types.ProperType
def __init__(self, type: mypy.types.ProperType) -> None:
super().__init__()
self.type = type
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit__promote_expr(self)
| PromoteExpr |
python | streamlit__streamlit | lib/tests/streamlit/runtime/memory_session_storage_test.py | {
"start": 815,
"end": 2435
} | class ____(unittest.TestCase):
"""Test MemorySessionStorage.
These tests are intentionally extremely simple to ensure that we don't just end up
testing cachetools.TTLCache. We try to just verify that we've wrapped TTLCache
correctly, and in particular we avoid testing cache expiry functionality.
"""
def test_uses_ttl_cache(self):
"""Verify that the backing cache of a MemorySessionStorage is a TTLCache.
We do this because we're intentionally avoiding writing tests around cache
expiry because the cachetools library should do this for us. In the case
that the backing cache for a MemorySessionStorage ever changes, we'll likely be
responsible for adding our own tests.
"""
store = MemorySessionStorage()
assert isinstance(store._cache, TTLCache)
def test_get(self):
store = MemorySessionStorage()
store._cache["foo"] = "bar"
assert store.get("foo") == "bar"
assert store.get("baz") is None
def test_save(self):
store = MemorySessionStorage()
session_info = MagicMock()
session_info.session.id = "foo"
store.save(session_info)
assert store.get("foo") == session_info
def test_delete(self):
store = MemorySessionStorage()
store._cache["foo"] = "bar"
store.delete("foo")
assert store.get("foo") is None
def test_list(self):
store = MemorySessionStorage()
store._cache["foo"] = "bar"
store._cache["baz"] = "qux"
assert store.list() == ["bar", "qux"]
| MemorySessionStorageTest |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 128363,
"end": 130365
} | class ____(Response):
"""
Response of events.get_task_single_value_metrics endpoint.
:param tasks: Single value metrics grouped by task
:type tasks: Sequence[dict]
"""
_service = "events"
_action = "get_task_single_value_metrics"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"tasks": {
"description": "Single value metrics grouped by task",
"items": {
"properties": {
"task": {"description": "Task ID", "type": "string"},
"values": {
"items": {
"properties": {
"metric": {"type": "string"},
"timestamp": {"type": "number"},
"value": {"type": "number"},
"variant": {"type": "string"},
},
"type": "object",
},
"type": "array",
},
},
"type": "object",
},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, tasks: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(GetTaskSingleValueMetricsResponse, self).__init__(**kwargs)
self.tasks = tasks
@schema_property("tasks")
def tasks(self) -> Optional[List[dict]]:
return self._property_tasks
@tasks.setter
def tasks(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", (dict,), is_array=True)
self._property_tasks = value
| GetTaskSingleValueMetricsResponse |
python | scipy__scipy | scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py | {
"start": 1287,
"end": 26594
} | class ____:
"""The Hessian of the Lagrangian as LinearOperator.
The Lagrangian is computed as the objective function plus all the
constraints multiplied with some numbers (Lagrange multipliers).
"""
def __init__(self, n, objective_hess, constraints_hess):
self.n = n
self.objective_hess = objective_hess
self.constraints_hess = constraints_hess
def __call__(self, x, v_eq, v_ineq=None):
if v_ineq is None:
v_ineq = np.empty(0)
H_objective = self.objective_hess(x)
H_constraints = self.constraints_hess(x, v_eq, v_ineq)
def matvec(p):
return H_objective.dot(p) + H_constraints.dot(p)
return LinearOperator((self.n, self.n), matvec)
def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
start_time, tr_radius, constr_penalty, cg_info):
state.nit += 1
state.nfev = objective.nfev
state.njev = objective.ngev
state.nhev = objective.nhev
state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
for c in prepared_constraints]
if not last_iteration_failed:
state.x = x
state.fun = objective.f
state.grad = objective.g
state.v = [c.fun.v for c in prepared_constraints]
state.constr = [c.fun.f for c in prepared_constraints]
state.jac = [c.fun.J for c in prepared_constraints]
# Compute Lagrangian Gradient
state.lagrangian_grad = np.copy(state.grad)
for c in prepared_constraints:
state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
# Compute maximum constraint violation
state.constr_violation = 0
for i in range(len(prepared_constraints)):
lb, ub = prepared_constraints[i].bounds
c = state.constr[i]
state.constr_violation = np.max([state.constr_violation,
np.max(lb - c),
np.max(c - ub)])
state.execution_time = time.time() - start_time
state.tr_radius = tr_radius
state.constr_penalty = constr_penalty
state.cg_niter += cg_info["niter"]
state.cg_stop_cond = cg_info["stop_cond"]
return state
def update_state_ip(state, x, last_iteration_failed, objective,
prepared_constraints, start_time,
tr_radius, constr_penalty, cg_info,
barrier_parameter, barrier_tolerance):
state = update_state_sqp(state, x, last_iteration_failed, objective,
prepared_constraints, start_time, tr_radius,
constr_penalty, cg_info)
state.barrier_parameter = barrier_parameter
state.barrier_tolerance = barrier_tolerance
return state
def _minimize_trustregion_constr(fun, x0, args, grad,
hess, hessp, bounds, constraints,
xtol=1e-8, gtol=1e-8,
barrier_tol=1e-8,
sparse_jacobian=None,
callback=None, maxiter=1000,
verbose=0, finite_diff_rel_step=None,
initial_constr_penalty=1.0, initial_tr_radius=1.0,
initial_barrier_parameter=0.1,
initial_barrier_tolerance=0.1,
factorization_method=None,
disp=False,
workers=None):
"""Minimize a scalar function subject to constraints.
Parameters
----------
gtol : float, optional
Tolerance for termination by the norm of the Lagrangian gradient.
The algorithm will terminate when both the infinity norm (i.e., max
abs value) of the Lagrangian gradient and the constraint violation
are smaller than ``gtol``. Default is 1e-8.
xtol : float, optional
Tolerance for termination by the change of the independent variable.
The algorithm will terminate when ``tr_radius < xtol``, where
``tr_radius`` is the radius of the trust region used in the algorithm.
Default is 1e-8.
barrier_tol : float, optional
Threshold on the barrier parameter for the algorithm termination.
When inequality constraints are present, the algorithm will terminate
only when the barrier parameter is less than `barrier_tol`.
Default is 1e-8.
sparse_jacobian : {bool, None}, optional
Determines how to represent Jacobians of the constraints. If bool,
then Jacobians of all the constraints will be converted to the
corresponding format. If None (default), then Jacobians won't be
converted, but the algorithm can proceed only if they all have the
same format.
initial_tr_radius: float, optional
Initial trust radius. The trust radius gives the maximum distance
between solution points in consecutive iterations. It reflects the
trust the algorithm puts in the local approximation of the optimization
problem. For an accurate local approximation the trust-region should be
large and for an approximation valid only close to the current point it
should be a small one. The trust radius is automatically updated throughout
the optimization process, with ``initial_tr_radius`` being its initial value.
Default is 1 (recommended in [1]_, p. 19).
initial_constr_penalty : float, optional
Initial constraints penalty parameter. The penalty parameter is used for
balancing the requirements of decreasing the objective function
and satisfying the constraints. It is used for defining the merit function:
``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
the constraints. The merit function is used for accepting or rejecting
trial points and ``constr_penalty`` weights the two conflicting goals
of reducing objective function and constraints. The penalty is automatically
updated throughout the optimization process, with
``initial_constr_penalty`` being its initial value. Default is 1
(recommended in [1]_, p 19).
initial_barrier_parameter, initial_barrier_tolerance: float, optional
Initial barrier parameter and initial tolerance for the barrier subproblem.
Both are used only when inequality constraints are present. For dealing with
optimization problems ``min_x f(x)`` subject to inequality constraints
``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
is solved for decreasing values of ``barrier_parameter`` and with decreasing
tolerances for the termination, starting with ``initial_barrier_parameter``
for the barrier parameter and ``initial_barrier_tolerance`` for the
barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19).
Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated
with the same prefactor.
factorization_method : string or None, optional
Method to factorize the Jacobian of the constraints. Use None (default)
for the auto selection or one of:
- 'NormalEquation' (requires scikit-sparse)
- 'AugmentedSystem'
- 'QRFactorization'
- 'SVDFactorization'
The methods 'NormalEquation' and 'AugmentedSystem' can be used only
with sparse constraints. The projections required by the algorithm
will be computed using, respectively, the normal equation and the
augmented system approaches explained in [1]_. 'NormalEquation'
computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
performs the LU factorization of an augmented system. They usually
provide similar results. 'AugmentedSystem' is used by default for
sparse matrices.
The methods 'QRFactorization' and 'SVDFactorization' can be used
only with dense constraints. They compute the required projections
using, respectively, QR and SVD factorizations. The 'SVDFactorization'
method can cope with Jacobian matrices with deficient row rank and will
be used whenever other factorization methods fail (which may imply the
conversion of sparse matrices to a dense format when required).
By default, 'QRFactorization' is used for dense matrices.
finite_diff_rel_step : None or array_like, optional
Relative step size for the finite difference approximation.
maxiter : int, optional
Maximum number of algorithm iterations. Default is 1000.
verbose : {0, 1, 2, 3}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
* 3 : display progress during iterations (more complete report).
disp : bool, optional
If True (default), then `verbose` will be set to 1 if it was 0.
workers : int, map-like callable, optional
A map-like callable, such as `multiprocessing.Pool.map` for evaluating
any numerical differentiation in parallel.
This evaluation is carried out as ``workers(fun, iterable)``.
.. versionadded:: 1.16.0
Returns
-------
`OptimizeResult` with the fields documented below. Note the following:
1. All values corresponding to the constraints are ordered as they
were passed to the solver. And values corresponding to `bounds`
constraints are put *after* other constraints.
2. All numbers of function, Jacobian or Hessian evaluations correspond
to numbers of actual Python function calls. It means, for example,
that if a Jacobian is estimated by finite differences, then the
number of Jacobian evaluations will be zero and the number of
function evaluations will be incremented by all calls during the
finite difference estimation.
x : ndarray, shape (n,)
Solution found.
optimality : float
Infinity norm of the Lagrangian gradient at the solution.
constr_violation : float
Maximum constraint violation at the solution.
fun : float
Objective function at the solution.
grad : ndarray, shape (n,)
Gradient of the objective function at the solution.
lagrangian_grad : ndarray, shape (n,)
Gradient of the Lagrangian function at the solution.
nit : int
Total number of iterations.
nfev : integer
Number of the objective function evaluations.
njev : integer
Number of the objective function gradient evaluations.
nhev : integer
Number of the objective function Hessian evaluations.
cg_niter : int
Total number of the conjugate gradient method iterations.
method : {'equality_constrained_sqp', 'tr_interior_point'}
Optimization method used.
constr : list of ndarray
List of constraint values at the solution.
jac : list of {ndarray, sparse array}
List of the Jacobian matrices of the constraints at the solution.
v : list of ndarray
List of the Lagrange multipliers for the constraints at the solution.
For an inequality constraint a positive multiplier means that the upper
bound is active, a negative multiplier means that the lower bound is
active and if a multiplier is zero it means the constraint is not
active.
constr_nfev : list of int
Number of constraint evaluations for each of the constraints.
constr_njev : list of int
Number of Jacobian matrix evaluations for each of the constraints.
constr_nhev : list of int
Number of Hessian evaluations for each of the constraints.
tr_radius : float
Radius of the trust region at the last iteration.
constr_penalty : float
Penalty parameter at the last iteration, see `initial_constr_penalty`.
barrier_tolerance : float
Tolerance for the barrier subproblem at the last iteration.
Only for problems with inequality constraints.
barrier_parameter : float
Barrier parameter at the last iteration. Only for problems
with inequality constraints.
execution_time : float
Total execution time.
message : str
Termination message.
status : {0, 1, 2, 3, 4}
Termination status:
* 0 : The maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `xtol` termination condition is satisfied.
* 3 : `callback` raised `StopIteration`.
* 4 : Constraint violation exceeds 'gtol'.
.. versionchanged:: 1.15.0
If the constraint violation exceeds `gtol`, then ``result.success``
will now be False.
cg_stop_cond : int
Reason for CG subproblem termination at the last iteration:
* 0 : CG subproblem not evaluated.
* 1 : Iteration limit was reached.
* 2 : Reached the trust-region boundary.
* 3 : Negative curvature detected.
* 4 : Tolerance was satisfied.
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
x0 = np.atleast_1d(x0).astype(float)
n_vars = np.size(x0)
if hess is None:
if callable(hessp):
hess = HessianLinearOperator(hessp, n_vars)
else:
hess = BFGS()
if disp and verbose == 0:
verbose = 1
if bounds is not None:
modified_lb = np.nextafter(bounds.lb, -np.inf, where=bounds.lb > -np.inf,
out=None)
modified_ub = np.nextafter(bounds.ub, np.inf, where=bounds.ub < np.inf,
out=None)
modified_lb = np.where(np.isfinite(bounds.lb), modified_lb, bounds.lb)
modified_ub = np.where(np.isfinite(bounds.ub), modified_ub, bounds.ub)
bounds = Bounds(modified_lb, modified_ub, keep_feasible=bounds.keep_feasible)
finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
bounds.keep_feasible, n_vars)
else:
finite_diff_bounds = (-np.inf, np.inf)
# Define Objective Function
objective = ScalarFunction(fun, x0, args, grad, hess,
finite_diff_rel_step, finite_diff_bounds,
workers=workers)
# Put constraints in list format when needed.
if isinstance(constraints, (NonlinearConstraint | LinearConstraint)):
constraints = [constraints]
# Prepare constraints.
prepared_constraints = [
PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
for c in constraints]
# Check that all constraints are either sparse or dense.
n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
if 0 < n_sparse < len(prepared_constraints):
raise ValueError("All constraints must have the same kind of the "
"Jacobian --- either all sparse or all dense. "
"You can set the sparsity globally by setting "
"`sparse_jacobian` to either True of False.")
if prepared_constraints:
sparse_jacobian = n_sparse > 0
if bounds is not None:
if sparse_jacobian is None:
sparse_jacobian = True
prepared_constraints.append(PreparedConstraint(bounds, x0,
sparse_jacobian))
# Concatenate initial constraints to the canonical form.
c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
n_vars, prepared_constraints, sparse_jacobian)
# Prepare all canonical constraints and concatenate it into one.
canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
for c in prepared_constraints]
if len(canonical_all) == 0:
canonical = CanonicalConstraint.empty(n_vars)
elif len(canonical_all) == 1:
canonical = canonical_all[0]
else:
canonical = CanonicalConstraint.concatenate(canonical_all,
sparse_jacobian)
# Generate the Hessian of the Lagrangian.
lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
# Choose appropriate method
if canonical.n_ineq == 0:
method = 'equality_constrained_sqp'
else:
method = 'tr_interior_point'
# Construct OptimizeResult
state = OptimizeResult(
nit=0, nfev=0, njev=0, nhev=0,
cg_niter=0, cg_stop_cond=0,
fun=objective.f, grad=objective.g,
lagrangian_grad=np.copy(objective.g),
constr=[c.fun.f for c in prepared_constraints],
jac=[c.fun.J for c in prepared_constraints],
constr_nfev=[0 for c in prepared_constraints],
constr_njev=[0 for c in prepared_constraints],
constr_nhev=[0 for c in prepared_constraints],
v=[c.fun.v for c in prepared_constraints],
method=method)
# Start counting
start_time = time.time()
# Define stop criteria
if method == 'equality_constrained_sqp':
def stop_criteria(state, x, last_iteration_failed,
optimality, constr_violation,
tr_radius, constr_penalty, cg_info):
state = update_state_sqp(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
SQPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.cg_stop_cond)
state.status = None
state.niter = state.nit # Alias for callback (backward-compatibility)
if callback is not None:
callback_stop = False
try:
callback_stop = callback(state)
except StopIteration:
callback_stop = True
if callback_stop:
state.status = 3
return True
if state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif state.tr_radius < xtol:
state.status = 2
elif state.nit >= maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
elif method == 'tr_interior_point':
def stop_criteria(state, x, last_iteration_failed, tr_radius,
constr_penalty, cg_info, barrier_parameter,
barrier_tolerance):
state = update_state_ip(state, x, last_iteration_failed,
objective, prepared_constraints,
start_time, tr_radius, constr_penalty,
cg_info, barrier_parameter, barrier_tolerance)
if verbose == 2:
BasicReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation)
elif verbose > 2:
IPReport.print_iteration(state.nit,
state.nfev,
state.cg_niter,
state.fun,
state.tr_radius,
state.optimality,
state.constr_violation,
state.constr_penalty,
state.barrier_parameter,
state.cg_stop_cond)
state.status = None
state.niter = state.nit # Alias for callback (backward compatibility)
if callback is not None:
callback_stop = False
try:
callback_stop = callback(state)
except StopIteration:
callback_stop = True
if callback_stop:
state.status = 3
return True
if state.optimality < gtol and state.constr_violation < gtol:
state.status = 1
elif (state.tr_radius < xtol
and state.barrier_parameter < barrier_tol):
state.status = 2
elif state.nit >= maxiter:
state.status = 0
return state.status in (0, 1, 2, 3)
if verbose == 2:
BasicReport.print_header()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_header()
elif method == 'tr_interior_point':
IPReport.print_header()
# Call inferior function to do the optimization
if method == 'equality_constrained_sqp':
def fun_and_constr(x):
f = objective.fun(x)
c_eq, _ = canonical.fun(x)
return f, c_eq
def grad_and_jac(x):
g = objective.grad(x)
J_eq, _ = canonical.jac(x)
return g, J_eq
_, result = equality_constrained_sqp(
fun_and_constr, grad_and_jac, lagrangian_hess,
x0, objective.f, objective.g,
c_eq0, J_eq0,
stop_criteria, state,
initial_constr_penalty, initial_tr_radius,
factorization_method)
elif method == 'tr_interior_point':
_, result = tr_interior_point(
objective.fun, objective.grad, lagrangian_hess,
n_vars, canonical.n_ineq, canonical.n_eq,
canonical.fun, canonical.jac,
x0, objective.f, objective.g,
c_ineq0, J_ineq0, c_eq0, J_eq0,
stop_criteria,
canonical.keep_feasible,
xtol, state, initial_barrier_parameter,
initial_barrier_tolerance,
initial_constr_penalty, initial_tr_radius,
factorization_method, finite_diff_bounds)
# Status 4 occurs when minimize is successful but constraints are not satisfied.
if result.status in (1, 2) and state.constr_violation > gtol:
result.status = 4
# Status 3 occurs when the callback function requests termination,
# this is assumed to not be a success.
result.success = True if result.status in (1, 2) else False
result.message = TERMINATION_MESSAGES[result.status]
# Alias (for backward compatibility with 1.1.0)
result.niter = result.nit
if verbose == 2:
BasicReport.print_footer()
elif verbose > 2:
if method == 'equality_constrained_sqp':
SQPReport.print_footer()
elif method == 'tr_interior_point':
IPReport.print_footer()
if verbose >= 1:
print(result.message)
print(f"Number of iterations: {result.nit}, "
f"function evaluations: {result.nfev}, "
f"CG iterations: {result.cg_niter}, "
f"optimality: {result.optimality:.2e}, "
f"constraint violation: {result.constr_violation:.2e}, "
f"execution time: {result.execution_time:4.2} s.")
return result
| LagrangianHessian |
python | gevent__gevent | src/gevent/tests/test__fileobject.py | {
"start": 16313,
"end": 16919
} | class ____(CleanupMixin, unittest.TestCase):
def test_default_mode_writes_linesep(self):
# See https://github.com/gevent/gevent/issues/1282
# libuv 1.x interferes with the default line mode on
# Windows.
# First, make sure we initialize gevent
gevent.get_hub()
fileno, path = self._mkstemp('.gevent.test__fileobject.test_default')
os.close(fileno)
with open(path, "w") as f:
f.write("\n")
with open(path, "rb") as f:
data = f.read()
self.assertEqual(data, os.linesep.encode('ascii'))
| TestTextMode |
python | un33k__django-uuslug | uuslug/tests/tests.py | {
"start": 469,
"end": 4253
} | class ____(TestCase):
"""Tests for Slug - Unicode"""
def test_manager(self):
txt = "This is a test ---"
r = slugify(txt)
self.assertEqual(r, "this-is-a-test")
txt = "This -- is a ## test ---"
r = slugify(txt)
self.assertEqual(r, "this-is-a-test")
txt = '影師嗎'
r = slugify(txt)
self.assertEqual(r, "ying-shi-ma")
txt = 'C\'est déjà l\'été.'
r = slugify(txt)
self.assertEqual(r, "c-est-deja-l-ete")
txt = 'Nín hǎo. Wǒ shì zhōng guó rén'
r = slugify(txt)
self.assertEqual(r, "nin-hao-wo-shi-zhong-guo-ren")
txt = 'Компьютер'
r = slugify(txt)
self.assertEqual(r, "kompiuter")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt)
self.assertEqual(r, "jaja-lol-mememeoo-a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=9)
self.assertEqual(r, "jaja-lol")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=15)
self.assertEqual(r, "jaja-lol-mememe")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=50)
self.assertEqual(r, "jaja-lol-mememeoo-a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=15, word_boundary=True)
self.assertEqual(r, "jaja-lol-a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=17, word_boundary=True)
self.assertEqual(r, "jaja-lol-mememeoo")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=18, word_boundary=True)
self.assertEqual(r, "jaja-lol-mememeoo")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=19, word_boundary=True)
self.assertEqual(r, "jaja-lol-mememeoo-a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=20, word_boundary=True, separator=".")
self.assertEqual(r, "jaja.lol.mememeoo.a")
txt = 'jaja---lol-méméméoo--a'
r = slugify(txt, max_length=20, word_boundary=True, separator="ZZZZZZ")
self.assertEqual(r, "jajaZZZZZZlolZZZZZZmememeooZZZZZZa")
txt = "___This is a test ---"
r = slugify(txt)
self.assertEqual(r, "this-is-a-test")
txt = "___This is a test___"
r = slugify(txt)
self.assertEqual(r, "this-is-a-test")
txt = 'one two three four five'
r = slugify(txt, max_length=13, word_boundary=True, save_order=True)
self.assertEqual(r, "one-two-three")
txt = 'one two three four five'
r = slugify(txt, max_length=13, word_boundary=True, save_order=False)
self.assertEqual(r, "one-two-three")
txt = 'one two three four five'
r = slugify(txt, max_length=12, word_boundary=True, save_order=False)
self.assertEqual(r, "one-two-four")
txt = 'one two three four five'
r = slugify(txt, max_length=12, word_boundary=True, save_order=True)
self.assertEqual(r, "one-two")
txt = 'this has a stopword'
r = slugify(txt, stopwords=['stopword'])
self.assertEqual(r, 'this-has-a')
txt = 'the quick brown fox jumps over the lazy dog'
r = slugify(txt, stopwords=['the'])
self.assertEqual(r, 'quick-brown-fox-jumps-over-lazy-dog')
txt = 'Foo A FOO B foo C'
r = slugify(txt, stopwords=['foo'])
self.assertEqual(r, 'a-b-c')
txt = 'Foo A FOO B foo C'
r = slugify(txt, stopwords=['FOO'])
self.assertEqual(r, 'a-b-c')
txt = 'the quick brown fox jumps over the lazy dog in a hurry'
r = slugify(txt, stopwords=['the', 'in', 'a', 'hurry'])
self.assertEqual(r, 'quick-brown-fox-jumps-over-lazy-dog')
| SlugUnicodeTestCase |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/test_trainer.py | {
"start": 65610,
"end": 66008
} | class ____(BoringModel):
def on_train_start(self) -> None:
raise Exception("Error during train")
def on_validation_start(self) -> None:
raise Exception("Error during validation")
def on_test_start(self) -> None:
raise Exception("Error during test")
def on_predict_start(self) -> None:
raise Exception("Error during predict")
| TrainerStagesErrorsModel |
python | numba__numba | numba/tests/test_sort.py | {
"start": 35892,
"end": 40006
} | class ____(MemoryLeakMixin, TestCase):
def test_01(self):
a = [3, 1, 4, 1, 5, 9]
@njit
def external_key(z):
return 1. / z
@njit
def foo(x, key=None):
new_x = x[:]
new_x.sort(key=key)
return sorted(x[:], key=key), new_x
self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:]))
self.assertPreciseEqual(foo(a[:], external_key),
foo.py_func(a[:], external_key))
def test_02(self):
a = [3, 1, 4, 1, 5, 9]
@njit
def foo(x):
def closure_key(z):
return 1. / z
new_x = x[:]
new_x.sort(key=closure_key)
return sorted(x[:], key=closure_key), new_x
self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:]))
def test_03(self):
a = [3, 1, 4, 1, 5, 9]
def gen(compiler):
@compiler
def bar(x, func):
new_x = x[:]
new_x.sort(key=func)
return sorted(x[:], key=func), new_x
@compiler
def foo(x):
def closure_escapee_key(z):
return 1. / z
return bar(x, closure_escapee_key)
return foo
self.assertPreciseEqual(gen(njit)(a[:]), gen(nop_compiler)(a[:]))
def test_04(self):
a = ['a','b','B','b','C','A']
@njit
def external_key(z):
return z.upper()
@njit
def foo(x, key=None):
new_x = x[:]
new_x.sort(key=key)
return sorted(x[:], key=key), new_x
self.assertPreciseEqual(foo(a[:]), foo.py_func(a[:]))
self.assertPreciseEqual(foo(a[:], external_key),
foo.py_func(a[:], external_key))
def test_05(self):
a = ['a','b','B','b','C','A']
@njit
def external_key(z):
return z.upper()
@njit
def foo(x, key=None, reverse=False):
new_x = x[:]
new_x.sort(key=key, reverse=reverse)
return (sorted(x[:], key=key, reverse=reverse), new_x)
for key, rev in itertools.product((None, external_key),
(True, False, 1, -12, 0)):
self.assertPreciseEqual(foo(a[:], key, rev),
foo.py_func(a[:], key, rev))
def test_optional_on_key(self):
a = [3, 1, 4, 1, 5, 9]
@njit
def foo(x, predicate):
if predicate:
def closure_key(z):
return 1. / z
else:
closure_key = None
new_x = x[:]
new_x.sort(key=closure_key)
return (sorted(x[:], key=closure_key), new_x)
with self.assertRaises(errors.TypingError) as raises:
TF = True
foo(a[:], TF)
msg = "Key must concretely be None or a Numba JIT compiled function"
self.assertIn(msg, str(raises.exception))
def test_exceptions_sorted(self):
@njit
def foo_sorted(x, key=None, reverse=False):
return sorted(x[:], key=key, reverse=reverse)
@njit
def foo_sort(x, key=None, reverse=False):
new_x = x[:]
new_x.sort(key=key, reverse=reverse)
return new_x
@njit
def external_key(z):
return 1. / z
a = [3, 1, 4, 1, 5, 9]
for impl in (foo_sort, foo_sorted):
# check illegal key
with self.assertRaises(errors.TypingError) as raises:
impl(a, key="illegal")
expect = "Key must be None or a Numba JIT compiled function"
self.assertIn(expect, str(raises.exception))
# check illegal reverse
with self.assertRaises(errors.TypingError) as raises:
impl(a, key=external_key, reverse="go backwards")
expect = "an integer is required for 'reverse'"
self.assertIn(expect, str(raises.exception))
| TestSortSlashSortedWithKey |
python | ray-project__ray | python/ray/llm/_internal/serve/routing_policies/prefix_aware/prefix_tree.py | {
"start": 26034,
"end": 26387
} | class ____(PrefixTree):
def getattr(self, attribute: str) -> Any:
"""
Get an attribute of the PrefixTree.
Note: This method is intended to be used only in tests.
"""
return getattr(self, attribute)
def setattr(self, attribute: str, value: Any) -> None:
setattr(self, attribute, value)
| PrefixTreeActor |
python | spyder-ide__spyder | spyder/config/user.py | {
"start": 35850,
"end": 36016
} | class ____(MultiUserConfig):
"""Plugin configuration handler with multifile support."""
def get_config_class(self):
return PluginConfig
| PluginMultiConfig |
python | celery__celery | t/unit/backends/test_base.py | {
"start": 13423,
"end": 13907
} | class ____(BaseBackend):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._data = {'can-delete': {'result': 'foo'}}
def _restore_group(self, group_id):
if group_id == 'exists':
return {'result': 'group'}
def _get_task_meta_for(self, task_id):
if task_id == 'task-exists':
return {'result': 'task'}
def _delete_group(self, group_id):
self._data.pop(group_id, None)
| DictBackend |
python | PyCQA__pylint | tests/functional/s/super/super_checks.py | {
"start": 2283,
"end": 3461
} | class ____(BaseClass):
def __init__(self):
super(InvalidSuperChecks, self).not_a_method() # [not-callable]
super(InvalidSuperChecks, self).attribute_error() # [no-member]
super(InvalidSuperChecks, self).function(42)
super(InvalidSuperChecks, self).function() # [no-value-for-parameter]
super(InvalidSuperChecks, self).function(42, 24, 24) # [too-many-function-args]
# +1: [unexpected-keyword-arg,no-value-for-parameter]
super(InvalidSuperChecks, self).function(lala=42)
# Even though BaseClass has a __getattr__, that won't
# be called.
super(InvalidSuperChecks, self).attribute_error() # [no-member]
# Regression for pylint-dev/pylint/issues/773
import subprocess
# The problem was related to astroid not filtering statements
# at scope level properly, basically not doing strong updates.
try:
TimeoutExpired = subprocess.TimeoutExpired
except AttributeError:
class TimeoutExpired(subprocess.CalledProcessError):
def __init__(self):
returncode = -1
self.timeout = -1
super(TimeoutExpired, self).__init__("", returncode)
| InvalidSuperChecks |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 145227,
"end": 157411
} | class ____(HasSchemaAttr, IdentityOptions, DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`_engine.Engine`
or :class:`_engine.Connection`,
rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table(
"some_table",
metadata,
Column(
"id",
Integer,
Sequence("some_table_seq", start=1),
primary_key=True,
),
)
When CREATE TABLE is emitted for the above :class:`_schema.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
.. seealso::
:ref:`defaults_sequences`
:class:`.CreateSequence`
:class:`.DropSequence`
"""
__visit_name__ = "sequence"
is_sequence = True
column: Optional[Column[Any]]
data_type: Optional[TypeEngine[int]]
metadata: Optional[MetaData]
@util.deprecated_params(
order=(
"2.1",
"This parameter is supported only by Oracle Database, "
"use ``oracle_order`` instead.",
)
)
def __init__(
self,
name: str,
start: Optional[int] = None,
increment: Optional[int] = None,
minvalue: Optional[int] = None,
maxvalue: Optional[int] = None,
nominvalue: Optional[bool] = None,
nomaxvalue: Optional[bool] = None,
cycle: Optional[bool] = None,
schema: Optional[Union[str, Literal[SchemaConst.BLANK_SCHEMA]]] = None,
cache: Optional[int] = None,
order: Optional[bool] = None,
data_type: Optional[_TypeEngineArgument[int]] = None,
optional: bool = False,
quote: Optional[bool] = None,
metadata: Optional[MetaData] = None,
quote_schema: Optional[bool] = None,
for_update: bool = False,
**dialect_kw: Any,
) -> None:
"""Construct a :class:`.Sequence` object.
:param name: the name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
.. versionchanged:: 2.0 The :paramref:`.Sequence.start` parameter
is required in order to have DDL emit "START WITH". This is a
reversal of a change made in version 1.4 which would implicitly
render "START WITH 1" if the :paramref:`.Sequence.start` were
not included. See :ref:`change_7211` for more detail.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param minvalue: the minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
:param maxvalue: the maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
:param nominvalue: no minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
:param nomaxvalue: no maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached by an ascending or descending sequence
respectively. This value is used when the CREATE SEQUENCE command
is emitted to the database as the "CYCLE" clause. If the limit is
reached, the next number generated will be the minvalue or maxvalue,
respectively. If cycle=False (the default) any calls to nextval
after the sequence has reached its maximum value will return an
error.
:param schema: optional schema name for the sequence, if located
in a schema other than the default. The rules for selecting the
schema name when a :class:`_schema.MetaData`
is also present are the same
as that of :paramref:`_schema.Table.schema`.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance. Renders the CACHE keyword
understood by Oracle Database and PostgreSQL.
:param order: optional boolean value; if ``True``, renders the
ORDER keyword, understood by Oracle Database, indicating the sequence
is definitively ordered. May be necessary to provide deterministic
ordering using Oracle RAC.
:param data_type: The type to be returned by the sequence, for
dialects that allow us to choose between INTEGER, BIGINT, etc.
(e.g., mssql).
.. versionadded:: 1.4.0
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the PostgreSQL backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the :paramref:`_schema.Sequence.name` on or off.
When left at its default of ``None``, normal quoting rules based
on casing and reserved words take place.
:param quote_schema: Set the quoting preferences for the ``schema``
name.
:param metadata: optional :class:`_schema.MetaData` object which this
:class:`.Sequence` will be associated with. A :class:`.Sequence`
that is associated with a :class:`_schema.MetaData`
gains the following
capabilities:
* The :class:`.Sequence` will inherit the
:paramref:`_schema.MetaData.schema`
parameter specified to the target :class:`_schema.MetaData`, which
affects the production of CREATE / DROP DDL, if any.
* The :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods
automatically use the engine bound to the :class:`_schema.MetaData`
object, if any.
* The :meth:`_schema.MetaData.create_all` and
:meth:`_schema.MetaData.drop_all`
methods will emit CREATE / DROP for this :class:`.Sequence`,
even if the :class:`.Sequence` is not associated with any
:class:`_schema.Table` / :class:`_schema.Column`
that's a member of this
:class:`_schema.MetaData`.
The above behaviors can only occur if the :class:`.Sequence` is
explicitly associated with the :class:`_schema.MetaData`
via this parameter.
.. seealso::
:ref:`sequence_metadata` - full discussion of the
:paramref:`.Sequence.metadata` parameter.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`_schema.Column`,
should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
DefaultGenerator.__init__(self, for_update=for_update)
IdentityOptions.__init__(
self,
start=start,
increment=increment,
minvalue=minvalue,
maxvalue=maxvalue,
nominvalue=nominvalue,
nomaxvalue=nomaxvalue,
cycle=cycle,
cache=cache,
order=order,
**dialect_kw,
)
self.column = None
self.name = quoted_name(name, quote)
self.optional = optional
if schema is BLANK_SCHEMA:
self.schema = schema = None
elif metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
else:
self.schema = quoted_name.construct(schema, quote_schema)
self._key = _get_table_key(name, schema)
if data_type is not None:
self.data_type = to_instance(data_type)
else:
self.data_type = None
if metadata:
self._set_metadata(metadata)
else:
self.metadata = None
@util.preload_module("sqlalchemy.sql.functions")
def next_value(self) -> Function[int]:
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
return util.preloaded.sql_functions.func.next_value(self)
def _copy(self) -> Sequence:
return Sequence(
name=self.name,
schema=self.schema,
data_type=self.data_type,
optional=self.optional,
metadata=self.metadata,
for_update=self.for_update,
**self._as_dict(),
**self.dialect_kwargs,
)
def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None:
assert isinstance(parent, Column)
super()._set_parent(parent, **kw)
parent._on_table_attach(self._set_table)
def _set_table(self, column: Column[Any], table: Table) -> None:
self._set_metadata(table.metadata)
def _set_metadata(self, metadata: MetaData) -> None:
self.metadata = metadata
self.metadata._register_object(self)
metadata._sequences[self._key] = self
def create(
self,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.SEQUENCES,
) -> None:
"""Creates this sequence in the database."""
bind._run_ddl_visitor(ddl.SchemaGenerator, self, checkfirst=checkfirst)
def drop(
self,
bind: _CreateDropBind,
checkfirst: Union[bool, CheckFirst] = CheckFirst.SEQUENCES,
) -> None:
"""Drops this sequence from the database."""
bind._run_ddl_visitor(ddl.SchemaDropper, self, checkfirst=checkfirst)
def _not_a_column_expr(self) -> NoReturn:
raise exc.InvalidRequestError(
f"This {self.__class__.__name__} cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element."
)
@inspection._self_inspects
| Sequence |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 88733,
"end": 89049
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc="video classification")
# Private on purpose, the public class will add the deprecation warnings.
| AutoModelForVideoClassification |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/groups_response_builder.py | {
"start": 228,
"end": 460
} | class ____(HttpResponseBuilder):
@classmethod
def groups_response(cls) -> "GroupsResponseBuilder":
return cls(find_template("groups", __file__), FieldPath("groups"), CursorBasedPaginationStrategy())
| GroupsResponseBuilder |
python | django__django | tests/db_functions/math/test_degrees.py | {
"start": 272,
"end": 2476
} | class ____(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_degrees=Degrees("normal")).first()
self.assertIsNone(obj.null_degrees)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("-12.9"), n2=Decimal("0.6"))
obj = DecimalModel.objects.annotate(
n1_degrees=Degrees("n1"), n2_degrees=Degrees("n2")
).first()
self.assertIsInstance(obj.n1_degrees, Decimal)
self.assertIsInstance(obj.n2_degrees, Decimal)
self.assertAlmostEqual(obj.n1_degrees, Decimal(math.degrees(obj.n1)))
self.assertAlmostEqual(obj.n2_degrees, Decimal(math.degrees(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-27.5, f2=0.33)
obj = FloatModel.objects.annotate(
f1_degrees=Degrees("f1"), f2_degrees=Degrees("f2")
).first()
self.assertIsInstance(obj.f1_degrees, float)
self.assertIsInstance(obj.f2_degrees, float)
self.assertAlmostEqual(obj.f1_degrees, math.degrees(obj.f1))
self.assertAlmostEqual(obj.f2_degrees, math.degrees(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=-20, normal=15, big=-1)
obj = IntegerModel.objects.annotate(
small_degrees=Degrees("small"),
normal_degrees=Degrees("normal"),
big_degrees=Degrees("big"),
).first()
self.assertIsInstance(obj.small_degrees, float)
self.assertIsInstance(obj.normal_degrees, float)
self.assertIsInstance(obj.big_degrees, float)
self.assertAlmostEqual(obj.small_degrees, math.degrees(obj.small))
self.assertAlmostEqual(obj.normal_degrees, math.degrees(obj.normal))
self.assertAlmostEqual(obj.big_degrees, math.degrees(obj.big))
def test_transform(self):
with register_lookup(DecimalField, Degrees):
DecimalModel.objects.create(n1=Decimal("5.4"), n2=Decimal("0"))
DecimalModel.objects.create(n1=Decimal("-30"), n2=Decimal("0"))
obj = DecimalModel.objects.filter(n1__degrees__gt=0).get()
self.assertEqual(obj.n1, Decimal("5.4"))
| DegreesTests |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 9002,
"end": 9089
} | class ____(Iterator[str]):
def __iter__(self: Self) -> Self:
...
| GoodIterator |
python | encode__django-rest-framework | tests/test_request.py | {
"start": 13341,
"end": 13455
} | class ____(TestCase):
def test_request_is_subscriptable(self):
assert Request is Request["foo"]
| TestTyping |
python | simonw__sqlite-utils | sqlite_utils/db.py | {
"start": 6207,
"end": 6264
} | class ____(Exception):
"Record not found"
| NotFoundError |
python | Textualize__rich | rich/__main__.py | {
"start": 447,
"end": 7592
} | class ____:
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
for y in range(0, 5):
for x in range(options.max_width):
h = x / options.max_width
l = 0.1 + ((y / 5) * 0.7)
r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
yield Segment("▄", Style(color=color, bgcolor=bgcolor))
yield Segment.line()
def __rich_measure__(
self, console: "Console", options: ConsoleOptions
) -> Measurement:
return Measurement(1, options.max_width)
def make_test_card() -> Table:
"""Get a renderable that demonstrates a number of features."""
table = Table.grid(padding=1, pad_edge=True)
table.title = "Rich features"
table.add_column("Feature", no_wrap=True, justify="center", style="bold red")
table.add_column("Demonstration")
color_table = Table(
box=None,
expand=False,
show_header=False,
show_edge=False,
pad_edge=False,
)
color_table.add_row(
(
"✓ [bold green]4-bit color[/]\n"
"✓ [bold blue]8-bit color[/]\n"
"✓ [bold magenta]Truecolor (16.7 million)[/]\n"
"✓ [bold yellow]Dumb terminals[/]\n"
"✓ [bold cyan]Automatic color conversion"
),
ColorBox(),
)
table.add_row("Colors", color_table)
table.add_row(
"Styles",
"All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].",
)
lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus."
lorem_table = Table.grid(padding=1, collapse_padding=True)
lorem_table.pad_edge = False
lorem_table.add_row(
Text(lorem, justify="left", style="green"),
Text(lorem, justify="center", style="yellow"),
Text(lorem, justify="right", style="blue"),
Text(lorem, justify="full", style="red"),
)
table.add_row(
"Text",
Group(
Text.from_markup(
"""Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n"""
),
lorem_table,
),
)
def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table:
table = Table(show_header=False, pad_edge=False, box=None, expand=True)
table.add_column("1", ratio=1)
table.add_column("2", ratio=1)
table.add_row(renderable1, renderable2)
return table
table.add_row(
"Asian\nlanguage\nsupport",
":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다",
)
markup_example = (
"[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! "
":+1: :apple: :ant: :bear: :baguette_bread: :bus: "
)
table.add_row("Markup", markup_example)
example_table = Table(
show_edge=False,
show_header=True,
expand=False,
row_styles=["none", "dim"],
box=box.SIMPLE,
)
example_table.add_column("[green]Date", style="green", no_wrap=True)
example_table.add_column("[blue]Title", style="blue")
example_table.add_column(
"[cyan]Production Budget",
style="cyan",
justify="right",
no_wrap=True,
)
example_table.add_column(
"[magenta]Box Office",
style="magenta",
justify="right",
no_wrap=True,
)
example_table.add_row(
"Dec 20, 2019",
"Star Wars: The Rise of Skywalker",
"$275,000,000",
"$375,126,118",
)
example_table.add_row(
"May 25, 2018",
"[b]Solo[/]: A Star Wars Story",
"$275,000,000",
"$393,151,347",
)
example_table.add_row(
"Dec 15, 2017",
"Star Wars Ep. VIII: The Last Jedi",
"$262,000,000",
"[bold]$1,332,539,889[/bold]",
)
example_table.add_row(
"May 19, 1999",
"Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
"$115,000,000",
"$1,027,044,677",
)
table.add_row("Tables", example_table)
code = '''\
def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
"""Iterate and generate a tuple with a flag for last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
for value in iter_values:
yield False, previous_value
previous_value = value
yield True, previous_value'''
pretty_data = {
"foo": [
3.1427,
(
"Paul Atreides",
"Vladimir Harkonnen",
"Thufir Hawat",
),
],
"atomic": (False, True, None),
}
table.add_row(
"Syntax\nhighlighting\n&\npretty\nprinting",
comparison(
Syntax(code, "python3", line_numbers=True, indent_guides=True),
Pretty(pretty_data, indent_guides=True),
),
)
markdown_example = """\
# Markdown
Supports much of the *markdown* __syntax__!
- Headers
- Basic formatting: **bold**, *italic*, `code`
- Block quotes
- Lists, and more...
"""
table.add_row(
"Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example))
)
table.add_row(
"+more!",
"""Progress bars, columns, styled logging handler, tracebacks, etc...""",
)
return table
if __name__ == "__main__": # pragma: no cover
from rich.panel import Panel
console = Console(
file=io.StringIO(),
force_terminal=True,
)
test_card = make_test_card()
# Print once to warm cache
start = process_time()
console.print(test_card)
pre_cache_taken = round((process_time() - start) * 1000.0, 1)
console.file = io.StringIO()
start = process_time()
console.print(test_card)
taken = round((process_time() - start) * 1000.0, 1)
c = Console(record=True)
c.print(test_card)
console = Console()
console.print(f"[dim]rendered in [not dim]{pre_cache_taken}ms[/] (cold cache)")
console.print(f"[dim]rendered in [not dim]{taken}ms[/] (warm cache)")
console.print()
console.print(
Panel.fit(
"[b magenta]Hope you enjoy using Rich![/]\n\n"
"Please consider sponsoring me if you get value from my work.\n\n"
"Even the price of a ☕ can brighten my day!\n\n"
"https://github.com/sponsors/willmcgugan",
border_style="red",
title="Help ensure Rich is maintained",
)
)
| ColorBox |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 163087,
"end": 168126
} | class ____(DataplexCatalogBaseOperator):
r"""
List Entry resources.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogListEntriesOperator`
:param entry_group_id: Required. EntryGroup resource name to which created Entry belongs to.
:param filter_by: Optional. A filter on the entries to return. Filters are case-sensitive.
You can filter the request by the following fields:
- entry_type
- entry_source.display_name
The comparison operators are =, !=, <, >, <=, >=. The service compares strings according to
lexical order.
You can use the logical operators AND, OR, NOT in the filter. You can use Wildcard "*", but for
entry_type you need to provide the full project id or number.
Example filter expressions:
- "entry_source.display_name=AnExampleDisplayName"
- "entry_type=projects/example-project/locations/global/entryTypes/example-entry_type"
- "entry_type=projects/example-project/locations/us/entryTypes/a\*
OR entry_type=projects/another-project/locations/\*"
- "NOT entry_source.display_name=AnotherExampleDisplayName".
:param page_size: Optional. Number of items to return per page. If there are remaining results,
the service returns a next_page_token. If unspecified, the service returns at most 10 Entries.
The maximum value is 100; values above 100 will be coerced to 100.
:param page_token: Optional. Page token received from a previous ``ListEntries`` call. Provide
this to retrieve the subsequent page.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(DataplexCatalogBaseOperator.template_fields)
operator_extra_links = (DataplexCatalogEntryGroupLink(),)
def __init__(
self,
entry_group_id: str,
page_size: int | None = None,
page_token: str | None = None,
filter_by: str | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.entry_group_id = entry_group_id
self.page_size = page_size
self.page_token = page_token
self.filter_by = filter_by
@property
def extra_links_params(self) -> dict[str, Any]:
return {
**super().extra_links_params,
"entry_group_id": self.entry_group_id,
}
def execute(self, context: Context):
DataplexCatalogEntryGroupLink.persist(context=context)
self.log.info(
"Listing Dataplex Catalog Entry from location %s.",
self.location,
)
try:
entries_on_page = self.hook.list_entries(
entry_group_id=self.entry_group_id,
location=self.location,
project_id=self.project_id,
page_size=self.page_size,
page_token=self.page_token,
filter_by=self.filter_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Entries on page: %s", entries_on_page)
context["ti"].xcom_push(
key="entry_page",
value=ListEntriesResponse.to_dict(entries_on_page._response),
)
except Exception as ex:
raise AirflowException(ex)
# Constructing list to return Entries in readable format
entries_list = [
MessageToDict(entry._pb, preserving_proto_field_name=True)
for entry in next(iter(entries_on_page.pages)).entries
]
return entries_list
| DataplexCatalogListEntriesOperator |
python | pytorch__pytorch | torch/_dynamo/output_graph.py | {
"start": 121419,
"end": 166759
} | class ____(fx.Tracer):
"""
Holds an FX graph that is being traced. OutputGraph owns a SubgraphTracer
and the separation of responsibilities is that SubgraphTracer is
responsible for building the graph while OutputGraph is responsible for
compiling and executing the graph.
"""
def __init__(
self,
output_graph: "OutputGraph",
parent: Optional["SubgraphTracer"] = None,
is_export: bool = False,
source_target: Optional[Target] = None,
) -> None:
super().__init__()
self.output_graph = weakref.proxy(output_graph)
self.graph = torch.fx.Graph()
# See note [Export inputs must be explicitly passed in]
self.is_export = is_export
# Map from graph input name to its placeholder proxy object, where the
# map's keys give all current placeholder node names and can be used to
# create unique node names
self.input_name_to_proxy: dict[str, fx.Proxy] = {}
# Node => computed real value (see utils.get_real_value)
self.real_value_cache: dict[fx.Node, torch.Tensor] = {}
# SubgraphTracers can be nested. See NOTE [HigherOrderOperator tracing design]
self.parent = parent
self.source_target = source_target
# A dict mapping previously free variables (Proxy objects)
# to new Proxy objects that wrap inputs to this subgraph.
#
# This dict maps proxies in outer graphs to placeholders in current graph.
# It serves two purposes:
# - Proxies are associated with VariableTrackers. If we see
# the same VariableTracker twice (and it is a free variable),
# then we want to use the same Proxy in the current subgraph to
# record the tracing.
# - If we are tracing a HigherOrderOperator's body_fn, then we
# need to keep track of what free variables were lifted so we can
# rewrite the HigherOrderOperator call using the traced body_fn.
# Dicts maintain the order of args for the HigherOrderOperator call.
self.lifted_freevars: dict[fx.Proxy, fx.Proxy] = {}
# map basic symbols (unbacked and unbacked) to their bound proxies.
# There are only two cases where bound_symbols will be recorded:
# 1. when we create_graph_input for a backed SymInt that's basic symbol
# 2. when we track_produced_symints for intermediate results
# bound_symbols always map the symbol to the proxy whose
# tracer is the current tracer that's readily accessible in current tracer's graph.
self.bound_symbols: dict[sympy.Symbol, Union[torch.fx.Proxy, LazyProxy]] = {}
# Maps _DynamicScalar object ids to allocated SymInt nodes, for symbol reuse
self.dynamic_scalar_nodes: dict[int, torch.SymInt] = {}
self.prev_inst = None
# True if this tracer is currently tracing into torch.utils.checkpoint
# as part of speculate_subgraph.
self.under_activation_checkpoint = False
# True if we want to allow externally visible side-effects (doesn't throw error on their existence)
# during this tracer's tracing of torch.utils.checkpoint (via speculate_subgraph).
# Only safe if we know for sure that *NOT* replaying these side-effects during
# backward recomputation of the checkpoint region doesn't affect its correctness.
self.allow_side_effects_under_checkpoint = False
# True if we want to allow externally visible side-effects (doesn't throw error on their existence)
# during this tracer's tracing. This is currently only used by experimental AC out-of-tree
# via torch._dynamo.utils._disable_side_effect_safety_checks_for_current_subtracer.
# Note: Externally visible side-effects are allowed if this flag OR the above flag is True.
self.unsafe_allow_externally_visible_side_effects = False
# True if this tracer is currently tracing (reconstructing) into a Python generator
self.is_reconstructing_generator = False
self.debug_level: int = parent.debug_level + 1 if parent is not None else 0
self._cur_code = None
self._orig_gm_meta: Optional[list[Any]] = None
self._orig_gm_lineno_map: Optional[dict[int, Optional[int]]] = None
self._orig_gm_firstlineno: Optional[int] = None
# Each SubgraphTracer is associated with a source target, which indicates
# which operator this subgraph is attached to. We compute a source_fn_stack
# based on the source target. For the root tracer, it's set to [].
# This is useful for debugging and transforming the exported graph.
if self.parent is None:
self.source_fn_stack: list[Any] = []
else:
self.source_fn_stack = self.parent.source_fn_stack + [
(self.graph._target_to_str(source_target), source_target)
]
# This is used to create a unique name for the placeholder
self._used_names: OrderedSet[str] = OrderedSet()
# Stores the versions of the input tensors at the time they are inserted
# as placeholders in the graph. This is used to track input mutation.
self._input_versions_at_beginning: list[int] = []
if torch.is_inference_mode_enabled():
raise RuntimeError(
"Inference mode is supposed to be disabled during compilation. Please open an issue."
)
self.tracked_tensor_or_symint_vt: OrderedSet[VariableTracker] = OrderedSet()
def record_tensor_or_symint_vt(self, vt: VariableTracker):
self.tracked_tensor_or_symint_vt.add(vt)
# preserve original meta if it is available
def _maybe_preserve_original_meta(
self, tx: "InstructionTranslatorBase", node: fx.Node
) -> None:
if (
self._orig_gm_meta
and self._orig_gm_lineno_map
and self._orig_gm_firstlineno
):
lineno = tx.current_instruction.starts_line
node_idx = None
if lineno is not None:
node_idx = self._orig_gm_lineno_map.get(
lineno - self._orig_gm_firstlineno, None
)
if node_idx is not None:
meta = self._orig_gm_meta[node_idx]
for field in fx.proxy._COPY_META_FIELDS:
if field in meta:
node.meta[field] = meta[field]
if "stack_trace" in meta:
node.meta["stack_trace"] = meta["stack_trace"]
def create_proxy(
self,
kind: str,
target: Any,
args: Any,
kwargs: Any,
name: Optional[str] = None,
type_expr: Optional[Any] = None,
proxy_factory_fn: Optional[Callable[[fx.Node], fx.Proxy]] = None,
) -> fx.Proxy:
# NOTE: [Nested SubgraphTracer and free_variable handling]
# --------------------------------------------------------
# Read NOTE [HigherOrderOperator tracing design] first.
#
# Let's say we're in the middle of introspecting the body of a possibly
# nested HigherOrderOperator, and we see a free variable.
#
# There are two cases:
# 1. We see a free variable that is already tracked by Dynamo.
# 2. We see a free variable that has not been tracked by Dynamo
#
# In case 1, we call `maybe_lift_tracked_freevar_to_input` (below)
# which will lift the freevar to be an input of this subgraph
# and also recursively lift it to be an input on the parent(s).
#
# In case 2, before the call to `create_proxy`, the InstructionTranslator
# will see the freevar when it gets loaded by Python bytecode.
# E.g. for Python 3.11 the bytecodes that may do this are LOAD_DEREF or
# LOAD_GLOBAL.
# There, the InstructionTranslator asks Dynamo to begin tracking the
# freevar by building a new Variable.
# Building a new Variable automatically lifts the freevar to be an
# input of the root SubgraphTracer.
#
# The implications for the code below are:
# - We will always be in Case 1 when we get to this code.
# - Any "free variable" we encounter here is guaranteed to already be
# bound, that is, it is either a graph input of the root graph, or
# some local variable of the root graph or a subgraph.
# - The additional work we need to do here is *only* that we need to
# lift this free variable into inputs (recursively) of each nested
# higher-order-op subgraph until we hit the subgraph where the free
# variable is bound
if self.parent is not None:
flat_args, tree_spec = pytree.tree_flatten((args, kwargs))
new_flat_args = []
for arg in flat_args:
maybe_new_arg = self.maybe_lift_tracked_freevar_to_input(arg)
new_flat_args.append(maybe_new_arg)
args, kwargs = pytree.tree_unflatten(new_flat_args, tree_spec)
rv = super().create_proxy(
kind,
target,
args,
kwargs,
name,
type_expr,
proxy_factory_fn, # type: ignore[arg-type]
)
# append stack trace to fx node
tx = self.output_graph.current_tx
# log detailed location of line of code in 3.11
if sys.version_info >= (3, 11) and kind in (
"call_function",
"call_method",
"call_module",
):
cur_inst = tx.current_instruction
if (
cur_inst is not self.prev_inst
and cur_inst.positions is not None
and cur_inst.positions.lineno is not None
):
tx_code = tx.f_code
header = tx.get_line_of_code_header(lineno=cur_inst.positions.lineno)
def get_trace_call_log_str() -> str:
line = get_instruction_source_311(tx_code, cur_inst).rstrip()
return f"TRACE FX call {rv.node.name} from {header}\n{line}"
trace_call_log.debug("%s", LazyString(get_trace_call_log_str))
self.prev_inst = cur_inst
# update reference to original meta if we're tracing a new code object
is_retracing = False
if tx.f_code is not self._cur_code:
orig_graphmodule_maybe = code_context.get_context(tx.f_code).get(
"orig_graphmodule", lambda: None
)()
if isinstance(orig_graphmodule_maybe, torch.fx.GraphModule):
is_retracing = True
self._orig_gm_meta = [
nd.meta for nd in orig_graphmodule_maybe.graph.nodes
]
self._orig_gm_lineno_map = orig_graphmodule_maybe._lineno_map
self._orig_gm_firstlineno = (
orig_graphmodule_maybe.forward.__code__.co_firstlineno
)
else:
self._orig_gm_meta = None
self._orig_gm_lineno_map = None
self._orig_gm_firstlineno = None
nn_module_stack = tx.nn_module_stack
if nn_module_stack:
rv.node.meta["nn_module_stack"] = nn_module_stack.copy()
if kind in {"call_function", "call_method"}:
stack = (rv.node.name, target)
if nn_module_stack:
# Current codebase assumes that the nn_module_stack has the
# builtin modules in the stack.
current_nn_module = list(rv.node.meta["nn_module_stack"].values())[-1][
1
]
if current_nn_module.__module__.startswith(
("torch.nn.modules", "torch.ao.")
) and not current_nn_module.__module__.startswith(
"torch.nn.modules.container"
):
stack = (rv.node.name, current_nn_module)
rv.node.meta["source_fn_stack"] = self.source_fn_stack + [stack]
elif kind == "call_module":
if self.parent is not None:
# TODO can remove once inline_inbuilt_nn_modules is always True
unimplemented(
gb_type="Invoking an nn.Module inside a higher order operator",
context=f"Higher order op name: {self.source_target}",
explanation="This is not supported.",
hints=[],
)
# For modules we store the class
rv.node.meta["source_fn_stack"] = self.source_fn_stack + [
(
rv.node.name,
next(
ty
for k, (_, ty) in rv.node.meta["nn_module_stack"].items()
if k.split("@")[0] == target
),
)
]
self._maybe_preserve_original_meta(tx, rv.node)
if not is_retracing:
if "nn_module_stack" not in rv.node.meta:
nn_module_stack = tx.nn_module_stack
if nn_module_stack:
rv.node.meta["nn_module_stack"] = nn_module_stack.copy()
if "source_fn_stack" not in rv.node.meta:
if kind in {"call_function", "call_method"}:
rv.node.meta["source_fn_stack"] = self.source_fn_stack + [
(rv.node.name, target)
]
elif kind == "call_module":
if self.parent is not None:
# TODO can remove once inline_inbuilt_nn_modules is always True
unimplemented(
gb_type="Invoking an nn.Module inside a HigherOrderOperator",
context="",
explanation="This is not supported.",
hints=[],
)
# For modules we store the class
rv.node.meta["source_fn_stack"] = self.source_fn_stack + [
(
rv.node.name,
rv.node.meta["nn_module_stack"][target][1],
)
]
if "stack_trace" not in rv.node.meta:
frame_summaries: list[traceback.FrameSummary] = []
while tx:
# Avoid frame summaries from inside the torch/nn/modules. This ensures that we keep the stack trace of
# the user code.
if not tx.is_co_filename_from_nn_modules():
frame_summaries.append(tx.frame_summary())
tx = getattr(tx, "parent", None)
filtered_frame_summaries = [
frame
for frame in frame_summaries
if frame.filename not in uninteresting_files()
]
# Reverse the frame_summaries, such that the innermost frame is at the last
filtered_frame_summaries.reverse()
# official from_list stub doesn't have new-style type
msgs = traceback.StackSummary.from_list(filtered_frame_summaries).format()
rv.node.stack_trace = "".join(msgs)
if (
torch._dynamo.config.use_graph_deduplication
or torch._dynamo.config.track_nodes_for_deduplication
):
self.output_graph.region_tracker.track_node(
self.output_graph.current_tx, rv.node
)
return rv
def create_node(
self,
op: str,
target: Target,
args: Any = None,
kwargs: Any = None,
name: Optional[str] = None,
type_expr: Optional[Any] = None,
) -> fx.Node:
check_pt2_compliant_op(self.output_graph, op, target, args, kwargs)
if self.parent is not None:
flat_args = pytree.arg_tree_leaves(*args, **kwargs)
for arg in flat_args:
if not isinstance(arg, torch.fx.Node):
continue
assert arg.graph == self.graph, (
"create_node using arg not from this SubgraphTracer"
)
node = super().create_node(op, target, args, kwargs, name, type_expr)
node.meta["creation_timestamp"] = self.output_graph.timestamp
self._used_names.add(node.name)
return node
# Note: we did not override erase_node since
# we call self.graph.erase_node elsewhere
def remove_node(self, node: fx.Node) -> None:
if len(node.users) > 0:
user_graph_nodes: list[torch.fx.Node] = []
for user in node.users:
# For the case where user.graph == self.graph, that is a real bug and will raise
# properly.
if user.graph != self.graph:
# This is a nested graph, which needs to be deleted.
# If we do not do this, we will raise on attempting to remove this.
# As we only get here during restoration cleanup, this is sound.
user_graph_nodes.extend(reversed(list(user.graph.nodes)))
for other_graph_node in user_graph_nodes:
other_graph_node.graph.erase_node(other_graph_node)
self.graph.erase_node(node)
self.input_name_to_proxy.pop(node.name, None)
# when before=True, we will insert this input before the most recent
# inserted proxy. This is a hack to get around an ordering problem,
# where we first insert a tensor argument, and then insert bindings
# for SymInts that may occur in the tensor argument.
# Remove this if https://github.com/pytorch/pytorch/issues/99007 gets
# fixed.
def create_graph_input(
self,
name: str,
type_expr: Any,
example_value: Any,
before: bool = False,
source: Optional[Source] = None,
) -> fx.Proxy:
if isinstance(example_value, torch.Tensor):
self._input_versions_at_beginning.append(example_value._version)
log.debug(
"create_graph_input %s %s %s at debug_level %s before=%s",
name,
source.name() if source is not None else "(none)",
example_value,
self.debug_level,
before,
)
if source is None:
assert self.parent is not None, (
f"you are required to provide a source for inputs {name} example_val {example_value} on the root tracer"
)
# Note [Export inputs must be explicitly passed in]
# In eager, we are generally OK with adding graph inputs whenever we
# want, because we take care of writing the bytecode that knows how
# to source all the inputs.
#
# In export, this is bad, because you want a self-contained export
# object which only depends on the inputs you explicitly passed to it.
# So we are a bit more strict about what sources can become inputs
# in export
if self.is_export and self.parent is None:
assert source is not None
if not is_from_local_source(source, only_allow_input=True):
self.output_graph.source_to_user_stacks.setdefault(source, []).append(
TracingContext.extract_stack()
)
# _used_names contains the names of all the nodes in the graph,
# including intermediates. This ensures that we do not have a name
# collision.
name = get_unique_name_wrt(name, self._used_names)
if self.input_name_to_proxy:
prev_name = next(reversed(self.input_name_to_proxy))
node = self.input_name_to_proxy[prev_name].node
if before:
ctx = self.graph.inserting_before(node)
else:
ctx = self.graph.inserting_after(node)
else:
ctx = self.graph.inserting_before(None)
with ctx:
proxy = self.create_proxy("placeholder", name, (), {}, type_expr=type_expr)
set_example_value(proxy.node, example_value)
if self.input_name_to_proxy and before:
k, v = self.input_name_to_proxy.popitem()
self.input_name_to_proxy[name] = proxy
self.input_name_to_proxy[k] = v
else:
self.input_name_to_proxy[name] = proxy
# For placeholder nodes, `name` is passed as a str to the target,
# and then torch.fx decides the node.name. So, record the `target`
# name as well in the _used_names to prevent any collision.
self._used_names.add(name)
# NOTE: [Auto lift basic free symbols when create_graph_input]
# There are two sources of basic symbols:
#
# - They can come from inputs, e.g. when an input tensor is specified as dynamic. We handle
# this case by intercepting at create_graph_input. Whenever we call create_graph_input, we
# try to also lift the basic symbols in example values as graph input.
#
# 1. When create_graph_input for a tensor that has symbolic shapes,
# we look for basic symbols in its size and stride, we check if the symbol is bound
# in current graph (i.e. bound_symbols), it it's not bound, we'll create a placeholder
# for it then recursively check its parent, creates ph if not bound at parent until.
# reachting the top-level, where we require a source is attached to the proxy.
#
# 2. When create_graph_input for a tensor that contains compound exprs,
# for example, if an input to subgraph takes size [s1+s2//8], we'll look for the
# the free basic symbols in the sizes and lift all of them following 1.
#
# 3. When create_graph_input for a symint. The following invariants hold:
# a. if symint's expr is a basic symbol, we only lift it once.
# b. if symint's expr is compuned, we lift the expr as a single input. We won't lift The basic symbols
# in the compuned expr are NOT lifted. Because if the basic symbols are used inside the subgraph
# they will be lifted according to 3.a
#
# - They can come from intermediate results:
# For example, data-dependent operators such as t.item(), t.nonzero(), where basic symbols
# might be created. For this purpose, we track the basic symbols of intermediate results
# immediately after they're created at wrap_fx_proxy with track_produced_symints. Notice
# that for basic symbols that're already tracked by create_graph_input, we won't track it again.
#
# Also see NOTE: [Export inputs must be explicitly passed in]
is_strict_export = self.is_export
is_non_strict_export = torch.compiler.is_compiling()
if not is_strict_export and not is_non_strict_export:
if isinstance(example_value, torch.Tensor):
self._lift_basic_symbols(example_value, source)
elif isinstance(example_value, (list, tuple)):
for i, e in enumerate(example_value):
if not isinstance(e, torch.Tensor):
continue
e_source = None
if source:
e_source = GetItemSource(
base=source, index=i, index_is_slice=False
)
self._lift_basic_symbols(e, e_source)
# Bound the symbol to ph if example_value is a SymInt with basic symbol.
if isinstance(example_value, torch.SymInt) and isinstance(
example_value.node.expr, sympy.Symbol
):
self.bound_symbols[example_value.node.expr] = proxy
return proxy
# See NOTE: [Nested SubgraphTracer and free_variable handling] for more details
def lift_tracked_freevar_to_input(
self, proxy: fx.Proxy
) -> Union[LazyProxy, fx.Proxy]:
# You're doing something wrong if we are the root SubgraphTracer because
# Dynamo adds tensors to graph inputs before creating a proxy for them.
assert self.parent is not None, (
"lift_tracked_freevar_to_input should not be called on root SubgraphTracer"
)
example_value = proxy.node.meta["example_value"]
# To avoid lifting the same symbol twice, we check whether basic symbols has been tracked.
# For example, the basic symbols may have already been lifted for current subgraph when
# we automatically lift basic symbols in the sizes/strides of a tensor t.
# Suppose parent graph calls sz = t.size()[0], it creates
# a proxy in parent and the subgraph accesses sz via closure. sz's proxy is not tracked
# in current sub-tracer so we may lift the same symbol twice.
if (
isinstance(example_value, torch.SymInt)
and example_value.node.expr in self.bound_symbols
):
return self.bound_symbols[example_value.node.expr]
# Proxies are associated with VariableTracker.
# It is possible that we've already lifted the Proxy to be an input.
# If that is the case, just return the already lifted Proxy.
if proxy in self.lifted_freevars:
return self.lifted_freevars[proxy]
# We first lift proxy to parent's graph then lift to current graph's input
# so that when we bind symints of the sizes in current graph, those symints
# would already be lifted as inputs to parent graph.
if proxy.tracer != self.parent:
self.parent.lift_tracked_freevar_to_input(proxy)
example_value = proxy.node.meta["example_value"]
new_proxy = self.create_graph_input(
proxy.node.name, type(example_value), example_value
)
self.lifted_freevars[proxy] = new_proxy
return new_proxy
def maybe_lift_tracked_freevar_to_input(self, arg: Any) -> Any:
"""
If arg is a free variable, then lift it to be an input.
Returns the new lifted arg (if arg was a freevar), else the
original arg.
"""
if not isinstance(arg, torch.fx.Proxy):
# Note: arg can be a python built-in slice type e.g.
# x[:max_seq] is represented as get_item(t, (slice(None, max_seq, None)))
# we need to also look into the slice variable itself to lift the
# proxies there.
if isinstance(arg, slice):
return slice(
*(
self.maybe_lift_tracked_freevar_to_input(sub_arg)
for sub_arg in (arg.start, arg.stop, arg.step)
)
)
else:
return arg
elif arg.tracer == self:
return arg
return self.lift_tracked_freevar_to_input(arg)
# See NOTE: [Auto lift basic free symbols when create_graph_input] for overall design
# You MUST call this API every time when creating a proxy in wrap_fx_proxy for a call
# that produced symints or tensors with unbacked symint shapes.
# This function is used to track the symints with its proxies created during
# dynamo tracing so that subgraph knows how to bind a symbol input with parent's proxy.
# LazyProxy are created for tensor shapes that're unbacked so that we don't create proxies
# for symbols that're not going to be used, the LazyProxy will be turned into a proxy
# when it's lifted as input to subgraph.
def track_produced_symints(
self, example_value: Any, e_proxy: Union[LazyProxy, torch.fx.Proxy]
) -> None:
# When binding the symbols in an example_value, we bind the symbols
# to the proxy's associated Tracer instead of current tracer.
# This is because:
# 1. We may be calling wrap_tensors during speculate_subgraph because
# the variables are lazily realized. The proxy are top-level phs but
# current tracer is a subtracer.
# 2. For autograd.Function, we trace the backward graph with a new tracer
# whose parent is the forward tracer, but we're using all the proxies created
# in forward tracer to trace the backward.
# For example, forward calls save_for_backward for a input tensor t.
# Backward calls t.tolist(). In this case, all the proxies that backward tracer
# sees are from parent tracer (i.e. the forward tracer). (e.g. t[0].item())
# See test_validate_outputs_unbacked for repro on 2.
tracer = e_proxy.tracer
assert isinstance(tracer, SubgraphTracer)
def need_bind(s: Any) -> bool:
from torch.fx.experimental.symbolic_shapes import is_symbolic
return (
is_symbolic(s)
and isinstance(s.node.expr, sympy.Symbol)
and s.node.expr not in self.bound_symbols
)
def _proxy_with_example_value(
example_value: Any, *args: Any, **kwargs: Any
) -> fx.Proxy:
# We need to insert proxy for creating sym_size/sym_stride/sym_storage right after e_proxy
nonlocal e_proxy
e_proxy = e_proxy() if isinstance(e_proxy, LazyProxy) else e_proxy
assert isinstance(e_proxy, torch.fx.Proxy)
with tracer.graph.inserting_after(e_proxy.node):
proxy = tracer.create_proxy(*args, **kwargs)
set_example_value(proxy.node, example_value)
return proxy
if isinstance(example_value, torch.Tensor):
for i, s in enumerate(example_value.size()):
if need_bind(s):
log.debug(
"track_produced_symints %s for %s.size()[%s] at debug_level %s",
s,
e_proxy,
i,
tracer.debug_level,
)
lazy_proxy = LazyProxy(
tracer,
_proxy_with_example_value,
s,
"call_function",
torch.ops.aten.sym_size.int,
(e_proxy, i),
{},
type_expr=type(s),
)
self.track_produced_symints(s, lazy_proxy)
storage_offset = example_value.storage_offset()
if need_bind(storage_offset):
log.debug(
"track_produced_symints %s for %s.storage_offset() at debug_level %s",
storage_offset,
e_proxy,
tracer.debug_level,
)
lazy_proxy = LazyProxy(
tracer,
_proxy_with_example_value,
storage_offset,
"call_function",
torch.ops.aten.sym_storage_offset,
(e_proxy,),
{},
type_expr=type(storage_offset),
)
self.track_produced_symints(storage_offset, lazy_proxy)
if example_value.layout is torch.strided:
for i, s in enumerate(example_value.stride()):
if need_bind(s):
log.debug(
"track_produced_symints %s for %s.stride()[%s] at debug_level %s",
s,
e_proxy,
i,
tracer.debug_level,
)
lazy_proxy = LazyProxy(
tracer,
_proxy_with_example_value,
s,
"call_function",
torch.ops.aten.sym_stride.int,
(e_proxy, i),
{},
type_expr=type(s),
)
self.track_produced_symints(s, lazy_proxy)
elif example_value.layout is torch.sparse_coo:
self.track_produced_symints(example_value._indices(), e_proxy)
self.track_produced_symints(example_value._values(), e_proxy)
elif example_value.layout in {torch.sparse_csr, torch.sparse_bsr}:
self.track_produced_symints(example_value.crow_indices(), e_proxy)
self.track_produced_symints(example_value.col_indices(), e_proxy)
elif example_value.layout in {torch.sparse_csc, torch.sparse_bsc}:
self.track_produced_symints(example_value.ccol_indices(), e_proxy)
self.track_produced_symints(example_value.row_indices(), e_proxy)
if is_traceable_wrapper_subclass(example_value):
attrs, ctx = example_value.__tensor_flatten__()
for attr in attrs:
inner_t = getattr(example_value, attr)
self.track_produced_symints(inner_t, getattr(e_proxy, attr))
elif isinstance(example_value, torch.SymInt):
if need_bind(example_value):
expr = example_value.node.expr
tracer.bound_symbols[expr] = e_proxy
# See Note [Auto lift basic free symbols when create_graph_input]
def _lift_basic_symbols(
self, example_value: Union[torch.SymInt, torch.Tensor], src: Optional[Source]
) -> None:
# The before arg is for inserting symints in the sizes/strides of a tensor
# before the tensor. This ordering ensures that when we look at the tensor's
# symbols, they're already lifted/tracked. E.g. this assumption is used
# in insert_deferred_runtime_asserts.
def _lift_symbols_in_symint(
s: Union[int, torch.SymInt],
source: Optional[Source],
before: bool = False,
) -> None:
if not is_symbolic(s):
return
assert isinstance(s, torch.SymInt)
self_to_be_bound = self.lookup_unbound_symbols(s)
if len(self_to_be_bound) == 0:
return
# For subgraph
if self.parent is not None:
# Recursively lift symbols in symint until top-level.
self.parent._lift_basic_symbols(s, source)
for s0 in self_to_be_bound:
parent_proxy = self.parent.bound_symbols[s0]
example_val = parent_proxy.node.meta["example_value"] # type: ignore[union-attr]
assert isinstance(example_val, torch.SymInt)
ph = self.create_graph_input(
str(s0),
type(example_val),
example_val,
before=before,
source=source,
)
log.debug(
"_lift_symbols_in_symint %s from %s at debug_level %s",
s0,
source.name() if source is not None else "subgraph inputs",
self.debug_level,
)
self.lifted_freevars[parent_proxy] = ph # type: ignore[index]
# For root_tracer:
else:
assert len(self_to_be_bound) == 1, (
f"For root tracer, we only expect to bind basic symbols (compound symbols "
f"should be cached before) but got unbound symbols {self_to_be_bound} in {s}"
)
assert source is not None, (
f"Source of '{s}' is None when lifting it to input of top-level. If it's an unbacked symbol, "
"this could be because it's not tracked with lazy_bind_unbacked_symbols. "
f"Otherwise, should provide a source when create_graph_input for `{s}` at root tracer."
)
s0 = next(iter(self_to_be_bound))
ph = self.create_graph_input(
str(s0),
type(s),
s,
before=before,
source=source,
)
log.debug(
"_lift_symbols_in_symint %s from %s at debug_level %s",
s,
source.name() if source is not None else "subgraph inputs",
self.debug_level,
)
ph.node.meta["grapharg"] = GraphArg(
source,
s,
pass_arg_as_tensor=False,
fake_tensor=None,
is_tensor=False,
)
if isinstance(example_value, torch.Tensor):
for i, s in enumerate(example_value.size()):
_lift_symbols_in_symint(
s,
(
TensorPropertySource(src, TensorProperty.SIZE, i)
if src is not None
else None
),
before=True,
)
if example_value.layout is torch.strided:
for i, s in enumerate(example_value.stride()):
_lift_symbols_in_symint(
s,
(
TensorPropertySource(src, TensorProperty.STRIDE, i)
if src is not None
else None
),
before=True,
)
_lift_symbols_in_symint(
example_value.storage_offset(),
(
TensorPropertySource(src, TensorProperty.STORAGE_OFFSET)
if src is not None
else None
),
before=True,
)
elif example_value.layout is torch.sparse_coo:
self._lift_basic_symbols(example_value._indices(), src)
self._lift_basic_symbols(example_value._values(), src)
elif example_value.layout in {torch.sparse_csr, torch.sparse_bsr}:
self._lift_basic_symbols(example_value.crow_indices(), src)
self._lift_basic_symbols(example_value.col_indices(), src)
elif example_value.layout in {torch.sparse_csc, torch.sparse_bsc}:
self._lift_basic_symbols(example_value.ccol_indices(), src)
self._lift_basic_symbols(example_value.row_indices(), src)
if is_traceable_wrapper_subclass(example_value):
attrs, ctx = example_value.__tensor_flatten__()
for attr in attrs:
inner_t = getattr(example_value, attr)
self._lift_basic_symbols(
inner_t, AttrSource(src, attr) if src is not None else None
)
elif isinstance(example_value, torch.SymInt):
_lift_symbols_in_symint(
example_value,
src,
)
# Lookup the proxy in current tracer for each symbol in expressions of s,
# See Note [Auto lift basic free symbols when create_graph_input]
def lookup_unbound_symbols(self, s: torch.SymInt) -> list[sympy.Symbol]:
free_symbols = s.node.expr.free_symbols
if len(free_symbols) == 0:
return []
to_be_bound = []
for s0 in free_symbols:
if s0 not in self.bound_symbols:
to_be_bound.append(s0)
continue
proxy = self.bound_symbols[s0]
if isinstance(proxy, LazyProxy):
proxy = proxy()
self.bound_symbols[s0] = proxy
assert isinstance(proxy, torch.fx.Proxy) and proxy.tracer is self, (
f"The proxy of symbol {s0} doesn't belong to current tracer."
)
# Sort the symbols so that we can have a deterministic lifting order
return sorted(to_be_bound, key=lambda s: s.name)
def has_input_mutation(self) -> MutationInfo:
input_versions_at_beginning = self._input_versions_at_beginning
input_nodes = []
input_versions_at_end = []
for node in self.graph.nodes:
if node.op == "placeholder":
example_value = node.meta["example_value"]
if isinstance(example_value, torch.Tensor):
input_versions_at_end.append(example_value._version)
input_nodes.append(node)
else:
break
mutated_inputs = [
i
for i, (v1, v2) in enumerate(
zip(input_versions_at_beginning, input_versions_at_end)
)
if v1 != v2
]
if mutated_inputs:
mutated_nodes = [input_nodes[i] for i in mutated_inputs]
msg = f"Input mutation detected at {mutated_nodes}"
return MutationInfo(True, msg)
return MutationInfo(False, "")
def has_aliasing(self) -> AliasingInfo:
from torch._higher_order_ops.utils import _collect_fake_inputs
input_storages: dict[StorageWeakRef, torch.fx.Node] = dict()
for node in self.graph.nodes:
if node.op == "placeholder":
example_value = _collect_fake_inputs([node])[0]
if isinstance(example_value, torch.Tensor):
storage = StorageWeakRef(example_value._typed_storage())
if storage in input_storages:
# input-input aliasing
msg = f"Input-to-input aliasing detected at nodes {input_storages[storage]} and {node}"
return AliasingInfo(True, msg)
input_storages[storage] = node
else:
break
output_storages: dict[StorageWeakRef, torch.fx.Node] = dict()
out_nodes = self.graph.find_nodes(op="output")[0]
for out_node in pytree.tree_leaves(out_nodes.args[0]):
if out_node:
example_value = _collect_fake_inputs([out_node])[0]
assert not isinstance(example_value, list)
if isinstance(example_value, torch.Tensor):
storage = StorageWeakRef(example_value._typed_storage())
if storage in output_storages:
# output-output aliasing
msg = f"Output-to-output aliasing detected at nodes {output_storages[storage]} and {out_node}"
return AliasingInfo(True, msg)
output_storages[storage] = out_node
intersected_storages = input_storages.keys() & output_storages.keys()
if len(intersected_storages) > 0:
# input-output aliasing
aliased = [
(input_storages[s], output_storages[s]) for s in intersected_storages
]
aliased = ", ".join([f"{i} and {o}" for i, o in aliased])
msg = f"Input-to-output aliasing detected at nodes {aliased}"
return AliasingInfo(True, msg)
return AliasingInfo(False, "")
# NOTE: [HigherOrderOperator tracing design]
# Ignoring HigherOrderOperators for a moment,
# OutputGraph represents the graph being built by Dynamo that may be compiled
# and executed. It holds a root SubgraphTracer where the FX graph is built.
#
# HigherOrderOperators are operators that take functions as their arguments.
# When Dynamo encounters a HigherOrderOperator, then it attempts to introspect
# the function passed to it (call this the "body function"), capture it into a
# GraphModule, and rewrite the call to the HigherOrderOperator to use the
# GraphModule.
#
# The way we handle the capture of body functions is through having
# (possibly nested) SubgraphTracers, one per body function.
#
# Mechanically, we do the introspection by:
# - Creating a new SubgraphTracer via OutputGraph.subtracer
# - Executing the body function.
# This constructs the graph of the body function in the new SubgraphTracer
# while modifying the state of the OutputGraph. For example:
# - the OutputGraph can receive new GraphArgs (if we discover any new
# untracked Tensors)
# - side effects from the body function get accumulated into
# OutputGraph.side_effects
# - guards produced by the body function get accumulated into OutputGraph.guards
#
# The traced function has some special properties that make it easier for us
# to transform later down the line:
# - we lift all free variables to being inputs.
#
# If the introspection fails (due to the existence of graph breaks), then
# we roll back the current OutputGraph state and graph break on the
# HigherOrderOperator.
| SubgraphTracer |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/api_fastapi/services/roles.py | {
"start": 1412,
"end": 6421
} | class ____:
"""Service layer for FAB Auth Manager role operations (create, validate, sync)."""
@staticmethod
def _check_action_and_resource(
security_manager: FabAirflowSecurityManagerOverride,
perms: list[tuple[str, str]],
) -> None:
for action_name, resource_name in perms:
if not security_manager.get_action(action_name):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"The specified action: {action_name!r} was not found",
)
if not security_manager.get_resource(resource_name):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"The specified resource: {resource_name!r} was not found",
)
@classmethod
def create_role(cls, body: RoleBody) -> RoleResponse:
security_manager = get_fab_auth_manager().security_manager
existing = security_manager.find_role(name=body.name)
if existing:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"Role with name {body.name!r} already exists; please update with the PATCH endpoint",
)
perms: list[tuple[str, str]] = [(ar.action.name, ar.resource.name) for ar in (body.permissions or [])]
cls._check_action_and_resource(security_manager, perms)
security_manager.bulk_sync_roles([{"role": body.name, "perms": perms}])
created = security_manager.find_role(name=body.name)
if not created:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail="Role was not created due to an unexpected error.",
)
return RoleResponse.model_validate(created)
@classmethod
def get_roles(cls, *, order_by: str, limit: int, offset: int) -> RoleCollectionResponse:
security_manager = get_fab_auth_manager().security_manager
session = security_manager.session
total_entries = session.scalars(select(func.count(Role.id))).one()
ordering = build_ordering(order_by, allowed={"name": Role.name, "role_id": Role.id})
stmt = select(Role).order_by(ordering).offset(offset).limit(limit)
roles = session.scalars(stmt).unique().all()
return RoleCollectionResponse(
roles=[RoleResponse.model_validate(r) for r in roles],
total_entries=total_entries,
)
@classmethod
def delete_role(cls, name: str) -> None:
security_manager = get_fab_auth_manager().security_manager
existing = security_manager.find_role(name=name)
if not existing:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Role with name {name!r} does not exist.",
)
security_manager.delete_role(existing)
@classmethod
def get_role(cls, name: str) -> RoleResponse:
security_manager = get_fab_auth_manager().security_manager
existing = security_manager.find_role(name=name)
if not existing:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Role with name {name!r} does not exist.",
)
return RoleResponse.model_validate(existing)
@classmethod
def patch_role(cls, body: RoleBody, name: str, update_mask: str | None = None) -> RoleResponse:
security_manager = get_fab_auth_manager().security_manager
existing = security_manager.find_role(name=name)
if not existing:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Role with name {name!r} does not exist.",
)
if update_mask:
update_data = RoleResponse.model_validate(existing)
for field in update_mask:
if field == "actions":
update_data.permissions = body.permissions
elif hasattr(body, field):
setattr(update_data, field, getattr(body, field))
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"'{field}' in update_mask is unknown",
)
else:
update_data = RoleResponse(name=body.name, permissions=body.permissions or [])
perms: list[tuple[str, str]] = [(ar.action.name, ar.resource.name) for ar in (body.permissions or [])]
cls._check_action_and_resource(security_manager, perms)
security_manager.bulk_sync_roles([{"role": name, "perms": perms}])
new_name = update_data.name
if new_name and new_name != existing.name:
security_manager.update_role(role_id=existing.id, name=new_name)
return RoleResponse.model_validate(update_data)
| FABAuthManagerRoles |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_snippets.py | {
"start": 227,
"end": 2003
} | class ____(util.MdCase):
"""Test snippet cases."""
extension = [
'pymdownx.snippets', 'pymdownx.superfences'
]
extension_configs = {
'pymdownx.snippets': {
'base_path': [os.path.join(BASE, '_snippets')],
'dedent_subsections': True
}
}
def test_dedent_section(self):
"""Test dedenting sections."""
self.check_markdown(
R'''
```text
---8<--- "indented.txt:py-section"
```
''',
R'''
<div class="highlight"><pre><span></span><code>def some_method(self, param):
"""Docstring."""
return param
</code></pre></div>
''',
True
)
def test_dedent_lines(self):
"""Test dedenting lines."""
self.check_markdown(
R'''
```text
---8<--- "indented.txt:5:8"
```
''',
R'''
<div class="highlight"><pre><span></span><code>def some_method(self, param):
"""Docstring."""
return param
</code></pre></div>
''',
True
)
def test_dedent_indented(self):
"""Test dedenting sections that has indented insertion."""
self.check_markdown(
R'''
Paragraph
---8<--- "indented.txt:py-section"
''',
R'''
<p>Paragraph</p>
<pre><code>def some_method(self, param):
"""Docstring."""
return param
</code></pre>
''',
True
)
| TestSnippetDedent |
python | pypa__pip | src/pip/_internal/models/direct_url.py | {
"start": 3968,
"end": 4421
} | class ____:
name: ClassVar = "dir_info"
editable: bool = False
@classmethod
def _from_dict(cls, d: dict[str, Any] | None) -> DirInfo | None:
if d is None:
return None
return cls(editable=_get_required(d, bool, "editable", default=False))
def _to_dict(self) -> dict[str, Any]:
return _filter_none(editable=self.editable or None)
InfoType = Union[ArchiveInfo, DirInfo, VcsInfo]
@dataclass
| DirInfo |
python | tensorflow__tensorflow | tensorflow/compiler/tests/scan_ops_test.py | {
"start": 5129,
"end": 8174
} | class ____(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.float64]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
def neginf_like(x):
return -np.inf * np.ones_like(x)
np_out = handle_options(np.logaddexp.accumulate, neginf_like, x, axis,
exclusive, reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumulative_logsumexp(p, axis, exclusive,
reverse).eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out, rtol=4e-5)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumulative_logsumexp(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
@test_util.disable_mlir_bridge("Error handling")
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumulative_logsumexp(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumulative_logsumexp(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumulative_logsumexp(input_tensor, [0]).eval()
| CumulativeLogSumExpTest |
python | getsentry__sentry | tests/sentry/integrations/bitbucket/test_webhook.py | {
"start": 6569,
"end": 8839
} | class ____(WebhookBaseTest):
method = "post"
def setUp(self) -> None:
super().setUp()
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(
provider="bitbucket",
external_id="bitbucket_external_id",
name="Hello world",
metadata={"webhook_secret": "test_secret"},
)
integration.add_organization(self.organization)
self.create_repository(integration_id=integration.id)
def send_signed_webhook(self):
return self.get_response(
self.organization_id,
raw_data=PUSH_EVENT_EXAMPLE,
extra_headers=dict(
HTTP_X_EVENT_KEY="repo:push",
HTTP_X_HUB_SIGNATURE=self.signature,
REMOTE_ADDR=BITBUCKET_IP,
),
)
def test_is_valid_signature(self) -> None:
# https://support.atlassian.com/bitbucket-cloud/docs/manage-webhooks/#Examples
assert is_valid_signature(
b"Hello World!",
"It's a Secret to Everybody",
"a4771c39fbe90f317c7824e83ddef3caae9cb3d976c214ace1f2937e133263c9",
)
def test_success(self) -> None:
self.signature = "sha256=ee07bac3b2fa849cf4346113dc5f6b9738660673aca6fa8f07ce459e7543f980"
response = self.send_signed_webhook()
assert response.status_code == 204
def test_missing_signature(self) -> None:
self.signature = ""
response = self.send_signed_webhook()
assert response.status_code == 400
assert response.data["detail"]["message"] == "Missing webhook signature"
def test_invalid_signature(self) -> None:
self.signature = "sha256=definitely-invalid"
response = self.send_signed_webhook()
assert response.status_code == 400
assert response.data["detail"]["message"] == "Webhook signature is invalid"
def test_invalid_method(self) -> None:
self.signature = "sha1=b842d7b7d535c446133bcf18cf085fb9472175c7"
response = self.send_signed_webhook()
assert response.status_code == 400
assert response.data["detail"]["message"] == "Signature method is not supported"
| WebhookSignatureTest |
python | jazzband__django-pipeline | pipeline/compressors/csshtmljsminify.py | {
"start": 50,
"end": 507
} | class ____(CompressorBase):
"""
CSS, HTML and JS compressor based on the Python library css-html-js-minify
(https://pypi.org/project/css-html-js-minify/).
"""
def compress_css(self, css):
from css_html_js_minify import css_minify # noqa: PLC0415
return css_minify(css)
def compress_js(self, js):
from css_html_js_minify import js_minify # noqa: PLC0415
return js_minify(js)
| CssHtmlJsMinifyCompressor |
python | xlwings__xlwings | xlwings/pro/_xlcalamine.py | {
"start": 3233,
"end": 5157
} | class ____(base_classes.Books):
def __init__(self, app):
self.app = app
self.books = []
self._active = None
@property
def active(self):
return self._active
def open(self, filename):
filename = str(Path(filename).resolve())
sheet_names = xlwingslib.get_sheet_names(filename)
names = []
for name, ref in xlwingslib.get_defined_names(filename):
if ref.split("!")[0].strip("'") in sheet_names:
names.append(
{
"name": name,
"sheet_index": sheet_names.index(ref.split("!")[0].strip("'")),
"address": ref.split("!")[1],
"book_scope": True, # TODO: not provided by calamine
}
)
book = Book(
api={
"sheet_names": sheet_names,
"names": names,
},
books=self,
path=filename,
)
self.books.append(book)
self._active = book
return book
def add(self):
book = Book(api={"sheet_names": ["Sheet1"]}, books=self, path="dummy")
self.books.append(book)
self._active = book
return book
def _try_find_book_by_name(self, name):
for book in self.books:
if book.name == name or book.fullname == name:
return book
return None
def __len__(self):
return len(self.books)
def __iter__(self):
for book in self.books:
yield book
def __call__(self, name_or_index):
if isinstance(name_or_index, numbers.Number):
return self.books[name_or_index - 1]
else:
book = self._try_find_book_by_name(name_or_index)
if book is None:
raise KeyError(name_or_index)
return book
| Books |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 512242,
"end": 512853
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("ProjectV2Edge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| ProjectV2Connection |
python | ansible__ansible | lib/ansible/module_utils/facts/network/linux.py | {
"start": 18392,
"end": 18549
} | class ____(NetworkCollector):
_platform = 'Linux'
_fact_class = LinuxNetwork
required_facts = set(['distribution', 'platform'])
| LinuxNetworkCollector |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/translate.py | {
"start": 5532,
"end": 5801
} | class ____(BaseGoogleLink):
"""
Helper class for constructing Translation Model link.
Link for legacy and native models.
"""
name = "Translation Model"
key = "translation_model"
format_str = TRANSLATION_NATIVE_MODEL_LINK
| TranslationModelLink |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/relationships/tutorial001_py310.py | {
"start": 588,
"end": 795
} | class ____(SQLModel):
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
| HeroBase |
python | ansible__ansible | test/integration/targets/ansible-test-container/runme.py | {
"start": 25428,
"end": 25575
} | class ____:
"""Result from execution of a subprocess."""
command: list[str]
stdout: str
stderr: str
status: int
| SubprocessResult |
python | modin-project__modin | modin/core/dataframe/base/interchange/dataframe_protocol/dataframe.py | {
"start": 1088,
"end": 1877
} | class ____(TypedDict): # noqa: GL08
# first element is a buffer containing the column data;
# second element is the data buffer's associated dtype
data: Tuple["ProtocolBuffer", Any]
# first element is a buffer containing mask values indicating missing data;
# second element is the mask value buffer's associated dtype.
# None if the null representation is not a bit or byte mask
validity: Optional[Tuple["ProtocolBuffer", Any]]
# first element is a buffer containing the offset values for
# variable-size binary data (e.g., variable-length strings);
# second element is the offsets buffer's associated dtype.
# None if the data buffer does not have an associated offsets buffer
offsets: Optional[Tuple["ProtocolBuffer", Any]]
| ColumnBuffers |
python | wandb__wandb | wandb/vendor/pygments/lexers/grammar_notation.py | {
"start": 3686,
"end": 6328
} | class ____(RegexLexer):
"""
For `JSpeech Grammar Format <https://www.w3.org/TR/jsgf/>`_
grammars.
.. versionadded:: 2.2
"""
name = 'JSGF'
aliases = ['jsgf']
filenames = ['*.jsgf']
mimetypes = ['application/jsgf', 'application/x-jsgf', 'text/jsgf']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
include('comments'),
include('non-comments'),
],
'comments': [
(r'/\*\*(?!/)', Comment.Multiline, 'documentation comment'),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
],
'non-comments': [
('\A#JSGF[^;]*', Comment.Preproc),
(r'\s+', Text),
(r';', Punctuation),
(r'[=|()\[\]*+]', Operator),
(r'/[^/]+/', Number.Float),
(r'"', String.Double, 'string'),
(r'\{', String.Other, 'tag'),
(words(('import', 'public'), suffix=r'\b'), Keyword.Reserved),
(r'grammar\b', Keyword.Reserved, 'grammar name'),
(r'(<)(NULL|VOID)(>)',
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r'<', Punctuation, 'rulename'),
(r'\w+|[^\s;=|()\[\]*+/"{<\w]+', Text),
],
'string': [
(r'"', String.Double, '#pop'),
(r'\\.', String.Escape),
(r'[^\\"]+', String.Double),
],
'tag': [
(r'\}', String.Other, '#pop'),
(r'\\.', String.Escape),
(r'[^\\}]+', String.Other),
],
'grammar name': [
(r';', Punctuation, '#pop'),
(r'\s+', Text),
(r'\.', Punctuation),
(r'[^;\s.]+', Name.Namespace),
],
'rulename': [
(r'>', Punctuation, '#pop'),
(r'\*', Punctuation),
(r'\s+', Text),
(r'([^.>]+)(\s*)(\.)', bygroups(Name.Namespace, Text, Punctuation)),
(r'[^.>]+', Name.Constant),
],
'documentation comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'(^\s*\*?\s*)(@(?:example|see)\s+)'
r'([\w\W]*?(?=(?:^\s*\*?\s*@|\*/)))',
bygroups(Comment.Multiline, Comment.Special,
using(this, state='example'))),
(r'(^\s*\*?\s*)(@\S*)',
bygroups(Comment.Multiline, Comment.Special)),
(r'[^*\n@]+|\w|\W', Comment.Multiline),
],
'example': [
(r'\n\s*\*', Comment.Multiline),
include('non-comments'),
(r'.', Comment.Multiline),
],
}
| JsgfLexer |
python | pyca__cryptography | tests/hazmat/primitives/test_rsa.py | {
"start": 77797,
"end": 82492
} | class ____:
def test_rsa_public_numbers(self):
public_numbers = rsa.RSAPublicNumbers(e=1, n=15)
assert public_numbers.e == 1
assert public_numbers.n == 15
def test_rsa_private_numbers(self):
public_numbers = rsa.RSAPublicNumbers(e=1, n=15)
private_numbers = rsa.RSAPrivateNumbers(
p=3,
q=5,
d=1,
dmp1=1,
dmq1=1,
iqmp=2,
public_numbers=public_numbers,
)
assert private_numbers.p == 3
assert private_numbers.q == 5
assert private_numbers.d == 1
assert private_numbers.dmp1 == 1
assert private_numbers.dmq1 == 1
assert private_numbers.iqmp == 2
assert private_numbers.public_numbers == public_numbers
def test_rsa_private_numbers_create_key(self, backend):
private_key = RSA_KEY_1024.private_key(
backend, unsafe_skip_rsa_key_validation=True
)
assert private_key
def test_rsa_public_numbers_create_key(self, backend):
public_key = RSA_KEY_1024.public_numbers.public_key(backend)
assert public_key
public_key = rsa.RSAPublicNumbers(n=10, e=3).public_key(backend)
assert public_key
def test_public_numbers_invalid_types(self):
with pytest.raises(TypeError):
rsa.RSAPublicNumbers(e=None, n=15) # type: ignore[arg-type]
with pytest.raises(TypeError):
rsa.RSAPublicNumbers(e=1, n=None) # type: ignore[arg-type]
@pytest.mark.parametrize(
("p", "q", "d", "dmp1", "dmq1", "iqmp", "public_numbers"),
[
(None, 5, 1, 1, 1, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, None, 1, 1, 1, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, None, 1, 1, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, 1, None, 1, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, 1, 1, None, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, 1, 1, 1, None, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, 1, 1, 1, 2, None),
],
)
def test_private_numbers_invalid_types(
self, p, q, d, dmp1, dmq1, iqmp, public_numbers
):
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=dmp1,
dmq1=dmq1,
iqmp=iqmp,
public_numbers=public_numbers,
)
@pytest.mark.parametrize(
("e", "n"),
[
(7, 2), # modulus < 3
(1, 15), # public_exponent < 3
(17, 15), # public_exponent > modulus
(14, 15), # public_exponent not odd
],
)
def test_invalid_public_numbers_argument_values(self, e, n, backend):
# Start with public_exponent=7, modulus=15. Then change one value at a
# time to test the bounds.
with pytest.raises(ValueError):
rsa.RSAPublicNumbers(e=e, n=n).public_key(backend)
@pytest.mark.parametrize(
("p", "q", "d", "dmp1", "dmq1", "iqmp", "e", "n"),
[
(3, 11, 3, 1, 3, 2, 7, 2), # modulus < 3
(3, 11, 3, 1, 3, 2, 7, 35), # modulus != p * q
(37, 11, 3, 1, 3, 2, 7, 33), # p > modulus
(3, 37, 3, 1, 3, 2, 7, 33), # q > modulus
(3, 11, 3, 35, 3, 2, 7, 33), # dmp1 > modulus
(3, 11, 3, 1, 35, 2, 7, 33), # dmq1 > modulus
(3, 11, 3, 1, 3, 35, 7, 33), # iqmp > modulus
(3, 11, 37, 1, 3, 2, 7, 33), # d > modulus
(3, 11, 3, 1, 3, 2, 1, 33), # public_exponent < 3
(3, 11, 3, 1, 3, 35, 65537, 33), # public_exponent > modulus
(3, 11, 3, 1, 3, 2, 6, 33), # public_exponent is not odd
(3, 11, 3, 2, 3, 2, 7, 33), # dmp1 is not odd
(3, 11, 3, 1, 4, 2, 7, 33), # dmq1 is not odd
],
)
def test_invalid_private_numbers_argument_values(
self, p, q, d, dmp1, dmq1, iqmp, e, n, backend
):
# Start with p=3, q=11, private_exponent=3, public_exponent=7,
# modulus=33, dmp1=1, dmq1=3, iqmp=2. Then change one value at
# a time to test the bounds.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=dmp1,
dmq1=dmq1,
iqmp=iqmp,
public_numbers=rsa.RSAPublicNumbers(e=e, n=n),
).private_key(backend)
def test_public_number_repr(self):
num = RSAPublicNumbers(1, 1)
assert repr(num) == "<RSAPublicNumbers(e=1, n=1)>"
| TestRSANumbers |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 78529,
"end": 80159
} | class ____(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`x \ge 0` and :math:`c \ge 0`.
`foldnorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (True, False))]
def _rvs(self, c, size=None, random_state=None):
return abs(random_state.standard_normal(size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
sqrt_two = np.sqrt(2)
return 0.5 * (sc.erf((x - c)/sqrt_two) + sc.erf((x + c)/sqrt_two))
def _sf(self, x, c):
return _norm_sf(x - c) + _norm_sf(x + c)
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# https://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
| foldnorm_gen |
python | doocs__leetcode | solution/3100-3199/3178.Find the Child Who Has the Ball After K Seconds/Solution.py | {
"start": 0,
"end": 147
} | class ____:
def numberOfChild(self, n: int, k: int) -> int:
k, mod = divmod(k, n - 1)
return n - mod - 1 if k & 1 else mod
| Solution |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 39255,
"end": 39383
} | class ____(AutomationRuleMixin):
model = RegexAutomationRule
form_class = RegexAutomationRuleForm
| RegexAutomationRuleMixin |
python | mlflow__mlflow | dev/clint/tests/rules/test_implicit_optional.py | {
"start": 1105,
"end": 1261
} | class ____:
x: "Optional[str]" = None
# Good - stringified with | None
good3: "int | None" = None
good4: "str | None" = None
good5: "int|None" = None
| Good1 |
python | getsentry__sentry | src/sentry/monitors/serializers.py | {
"start": 2699,
"end": 3005
} | class ____(TypedDict):
name: str
status: str
isMuted: bool
dateCreated: datetime
lastCheckIn: datetime
nextCheckIn: datetime
nextCheckInLatest: datetime
activeIncident: MonitorIncidentSerializerResponse | None
@register(MonitorEnvironment)
| MonitorEnvironmentSerializerResponse |
python | kamyu104__LeetCode-Solutions | Python/insert-into-a-binary-search-tree.py | {
"start": 154,
"end": 749
} | class ____(object):
def insertIntoBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
curr, parent = root, None
while curr:
parent = curr
if val <= curr.val:
curr = curr.left
else:
curr = curr.right
if not parent:
root = TreeNode(val)
elif val <= parent.val:
parent.left = TreeNode(val)
else:
parent.right = TreeNode(val)
return root
# Time: O(h)
# Space: O(h)
| Solution |
python | wandb__wandb | wandb/sync/sync.py | {
"start": 12499,
"end": 16125
} | class ____:
def __init__(
self,
project=None,
entity=None,
run_id=None,
job_type=None,
mark_synced=None,
app_url=None,
view=None,
verbose=None,
sync_tensorboard=None,
log_path=None,
append=None,
skip_console=None,
replace_tags=None,
):
self._sync_list = []
self._thread = None
self._project = project
self._entity = entity
self._run_id = run_id
self._job_type = job_type
self._mark_synced = mark_synced
self._app_url = app_url
self._view = view
self._verbose = verbose
self._sync_tensorboard = sync_tensorboard
self._log_path = log_path
self._append = append
self._skip_console = skip_console
self._replace_tags = replace_tags or {}
def status(self):
pass
def add(self, p):
self._sync_list.append(os.path.abspath(str(p)))
def start(self):
# create a thread for each file?
self._thread = SyncThread(
sync_list=self._sync_list,
project=self._project,
entity=self._entity,
run_id=self._run_id,
job_type=self._job_type,
view=self._view,
verbose=self._verbose,
mark_synced=self._mark_synced,
app_url=self._app_url,
sync_tensorboard=self._sync_tensorboard,
log_path=self._log_path,
append=self._append,
skip_console=self._skip_console,
replace_tags=self._replace_tags,
)
self._thread.start()
def is_done(self):
return not self._thread.is_alive()
def poll(self):
time.sleep(1)
return False
def get_runs(
include_offline: bool = True,
include_online: bool = True,
include_synced: bool = False,
include_unsynced: bool = True,
exclude_globs: Optional[List[str]] = None,
include_globs: Optional[List[str]] = None,
):
# TODO(jhr): grab dir info from settings
base = ".wandb" if os.path.exists(".wandb") else "wandb"
if not os.path.exists(base):
return ()
all_dirs = os.listdir(base)
dirs = []
if include_offline:
dirs += filter(lambda _d: _d.startswith("offline-run-"), all_dirs)
if include_online:
dirs += filter(lambda _d: _d.startswith("run-"), all_dirs)
# find run file in each dir
fnames = []
dirs.sort()
for d in dirs:
paths = os.listdir(os.path.join(base, d))
if exclude_globs:
paths = set(paths)
for g in exclude_globs:
paths = paths - set(fnmatch.filter(paths, g))
paths = list(paths)
if include_globs:
new_paths = set()
for g in include_globs:
new_paths = new_paths.union(fnmatch.filter(paths, g))
paths = list(new_paths)
for f in paths:
if f.endswith(WANDB_SUFFIX):
fnames.append(os.path.join(base, d, f))
filtered = []
for f in fnames:
dname = os.path.dirname(f)
# TODO(frz): online runs are assumed to be synced, verify from binary log.
if os.path.exists(f"{f}{SYNCED_SUFFIX}") or os.path.basename(dname).startswith(
"run-"
):
if include_synced:
filtered.append(_LocalRun(dname, True))
else:
if include_unsynced:
filtered.append(_LocalRun(dname, False))
return tuple(filtered)
def get_run_from_path(path):
return _LocalRun(path)
| SyncManager |
python | django__django | tests/update/tests.py | {
"start": 482,
"end": 2374
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = A.objects.create()
cls.a2 = A.objects.create()
B.objects.bulk_create(B(a=cls.a1) for _ in range(20))
for x in range(20):
D.objects.create(a=cls.a1)
def test_nonempty_update(self):
"""
Update changes the right number of rows for a nonempty queryset
"""
num_updated = self.a1.b_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update(self):
"""
Update changes the right number of rows for an empty queryset
"""
num_updated = self.a2.b_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_nonempty_update_with_inheritance(self):
"""
Update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a1.d_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update_with_inheritance(self):
"""
Update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a2.d_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_foreign_key_update_with_id(self):
"""
Update works using <field>_id for foreign keys
"""
num_updated = self.a1.d_set.update(a_id=self.a2)
self.assertEqual(num_updated, 20)
self.assertEqual(self.a2.d_set.count(), 20)
| SimpleTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solverHigherOrder5.py | {
"start": 1555,
"end": 2291
} | class ____:
def identity(self, x: T) -> T:
return x
def test_1(self, f: Callable[[A], X]) -> Callable[[A, B, C], tuple[X, B, C]]:
val = triple_1(f, self.identity, self.identity)
reveal_type(
val,
expected_text="(A@test_1, T@identity, T(1)@identity) -> tuple[X@test_1, T@identity, T(1)@identity]",
)
return val
def test_2(self, f: Callable[[A], X]) -> Callable[[A, B, C], tuple[X, B, C]]:
val = triple_2((f, self.identity, self.identity))
reveal_type(
val,
expected_text="(A@test_2, T@identity, T(1)@identity) -> tuple[X@test_2, T@identity, T(1)@identity]",
)
return val
@dataclass(frozen=True)
| ClassA |
python | pytest-dev__pytest-xdist | src/xdist/looponfail.py | {
"start": 5998,
"end": 8221
} | class ____:
def __init__(self, config: pytest.Config, channel: execnet.Channel) -> None:
self.config = config
self.channel = channel
self.recorded_failures: list[pytest.CollectReport | pytest.TestReport] = []
self.collection_failed = False
config.pluginmanager.register(self)
config.option.looponfail = False
config.option.usepdb = False
def DEBUG(self, *args: object) -> None:
if self.config.option.debug:
print(" ".join(map(str, args)))
@pytest.hookimpl
def pytest_collection(self, session: pytest.Session) -> bool:
self.session = session
self.trails = self.current_command
hook = self.session.ihook
try:
items = session.perform_collect(self.trails or None)
except pytest.UsageError:
items = session.perform_collect(None)
hook.pytest_collection_modifyitems(
session=session, config=session.config, items=items
)
hook.pytest_collection_finish(session=session)
return True
@pytest.hookimpl
def pytest_runtest_logreport(self, report: pytest.TestReport) -> None:
if report.failed:
self.recorded_failures.append(report)
@pytest.hookimpl
def pytest_collectreport(self, report: pytest.CollectReport) -> None:
if report.failed:
self.recorded_failures.append(report)
self.collection_failed = True
def main(self) -> None:
self.DEBUG("WORKER: received configuration, waiting for command trails")
try:
command = self.channel.receive()
except KeyboardInterrupt:
return # in the worker we can't do much about this
self.DEBUG("received", command)
self.current_command = command
self.config.hook.pytest_cmdline_main(config=self.config)
trails, failreports = [], []
for rep in self.recorded_failures:
trails.append(rep.nodeid)
loc = rep.longrepr
loc = str(getattr(loc, "reprcrash", loc))
failreports.append(loc)
result = (trails, failreports, self.collection_failed)
self.channel.send(result)
| WorkerFailSession |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_validation.py | {
"start": 7507,
"end": 10079
} | class ____:
async def test_work_pool_template_validation_empty_block_document(
self,
session,
work_pool,
empty_block_doc_ref_template,
):
with pytest.raises(HTTPException, match="404: Block not found."):
await validate_job_variable_defaults_for_work_pool(
session,
work_pool.name,
empty_block_doc_ref_template,
)
async def test_work_pool_template_validation_missing_block_document(
self,
session,
work_pool,
missing_block_doc_ref_template,
):
with pytest.raises(HTTPException, match="404: Block not found."):
await validate_job_variable_defaults_for_work_pool(
session,
work_pool.name,
missing_block_doc_ref_template,
)
async def test_work_pool_template_validation_malicious_block_document(
self,
session,
work_pool,
malicious_block_doc_ref_template,
):
with pytest.raises(HTTPException, match="404: Block not found."):
await validate_job_variable_defaults_for_work_pool(
session,
work_pool.name,
malicious_block_doc_ref_template,
)
async def test_work_pool_template_validation_block_document_reference_incorrect_type(
self,
session,
work_pool,
incorrect_type_block_ref_template,
):
with pytest.raises(
HTTPException, match="{'foo': 'bar'} is not of type 'string'"
):
await validate_job_variable_defaults_for_work_pool(
session,
work_pool.name,
incorrect_type_block_ref_template,
)
async def test_work_pool_template_validation_block_document_reference_incorrect_type_empty_dict(
self,
session,
work_pool,
empty_block_ref_template,
):
with pytest.raises(HTTPException, match="{} is not of type 'string'"):
await validate_job_variable_defaults_for_work_pool(
session,
work_pool.name,
empty_block_ref_template,
)
async def test_work_pool_template_validation_valid_block_document_reference(
self,
session,
work_pool,
block_ref_template,
):
await validate_job_variable_defaults_for_work_pool(
session,
work_pool.name,
block_ref_template,
)
| TestWorkPoolValidation |
python | getsentry__sentry | tests/sentry/replays/tasks/test_delete_replays_bulk.py | {
"start": 624,
"end": 12388
} | class ____(APITestCase, ReplaysSnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project(name="test_project")
self.range_start = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(days=1)
self.range_end = datetime.datetime.now(tz=datetime.UTC)
self.query = ""
self.environments = ["prod"]
# Create a deletion job
self.job = ReplayDeletionJobModel.objects.create(
organization_id=self.project.organization.id,
project_id=self.project.id,
range_start=self.range_start,
range_end=self.range_end,
query=self.query,
environments=self.environments,
status="pending",
)
@patch("sentry.replays.tasks.fetch_rows_matching_pattern")
@patch("sentry.replays.tasks.delete_matched_rows")
def test_run_bulk_replay_delete_job_first_run(
self, mock_delete_matched_rows: MagicMock, mock_fetch_rows: MagicMock
) -> None:
"""Test the first run of the bulk deletion job"""
# Mock the fetch_rows_matching_pattern to return some rows
mock_fetch_rows.return_value = {
"rows": [
{
"retention_days": 90,
"replay_id": "a",
"max_segment_id": 1,
},
{
"retention_days": 90,
"replay_id": "b",
"max_segment_id": 0,
},
],
"has_more": True,
}
# Run the job
run_bulk_replay_delete_job(self.job.id, offset=0)
# Verify the job status was updated
self.job.refresh_from_db()
assert self.job.status == "in-progress", self.job.status
assert self.job.offset == 2, self.job.offset
# Verify the delete operation was called
mock_delete_matched_rows.assert_called_once_with(
self.project.id, mock_fetch_rows.return_value["rows"]
)
# Verify fetch_rows was called with correct parameters
mock_fetch_rows.assert_called_once_with(
project_id=self.project.id,
start=self.range_start,
end=self.range_end,
query=self.query,
environment=self.environments,
limit=100,
offset=0,
)
@patch("sentry.replays.tasks.fetch_rows_matching_pattern")
@patch("sentry.replays.tasks.delete_matched_rows")
def test_run_bulk_replay_delete_job_completion(
self, mock_delete_matched_rows: MagicMock, mock_fetch_rows: MagicMock
) -> None:
"""Test the completion of the bulk deletion job"""
# Mock the fetch_rows_matching_pattern to return no more rows
mock_fetch_rows.return_value = {
"rows": [
{
"retention_days": 90,
"replay_id": "a",
"max_segment_id": 1,
},
{
"retention_days": 90,
"replay_id": "b",
"max_segment_id": None,
},
],
"has_more": False,
}
# Run the job
run_bulk_replay_delete_job(self.job.id, offset=100)
# Verify the job status was updated to completed
self.job.refresh_from_db()
assert self.job.status == "completed", self.job.status
# Verify the delete operation was called
mock_delete_matched_rows.assert_called_once_with(
self.project.id, mock_fetch_rows.return_value["rows"]
)
# Verify fetch_rows was called with correct parameters
mock_fetch_rows.assert_called_once_with(
project_id=self.project.id,
start=self.range_start,
end=self.range_end,
query=self.query,
environment=self.environments,
limit=100,
offset=100,
)
@patch("sentry.replays.tasks.fetch_rows_matching_pattern")
@patch("sentry.replays.tasks.delete_matched_rows")
def test_run_bulk_replay_delete_job_no_rows(
self, mock_delete_matched_rows: MagicMock, mock_fetch_rows: MagicMock
) -> None:
"""Test the bulk deletion job when no rows are found"""
# Mock the fetch_rows_matching_pattern to return no rows
mock_fetch_rows.return_value = {
"rows": [],
"has_more": False,
}
# Run the job
run_bulk_replay_delete_job(self.job.id, offset=0)
# Verify the job status was updated to completed
self.job.refresh_from_db()
assert self.job.status == "completed"
# Verify delete_matched_rows was not called since there were no rows
mock_delete_matched_rows.assert_not_called()
# Verify fetch_rows was called with correct parameters
mock_fetch_rows.assert_called_once_with(
project_id=self.project.id,
start=self.range_start,
end=self.range_end,
query=self.query,
environment=self.environments,
limit=100,
offset=0,
)
def test_run_bulk_replay_delete_job_chained_runs(self) -> None:
project = self.create_project()
t1 = datetime.datetime.now() - datetime.timedelta(seconds=10)
replay_id1 = uuid.uuid4().hex
replay_id2 = uuid.uuid4().hex
replay_id3 = uuid.uuid4().hex
replay_id4 = uuid.uuid4().hex
self.store_replays(
mock_replay(t1, self.project.id, replay_id1, segment_id=0, environment="prod")
)
self.store_replays(
mock_replay(t1, self.project.id, replay_id2, segment_id=0, environment="prod")
)
self.store_replays(
mock_replay(t1, project.id, replay_id3, segment_id=0, environment="prod")
)
self.store_replays(
mock_replay(t1, self.project.id, replay_id4, segment_id=None, environment="prod")
)
with TaskRunner():
run_bulk_replay_delete_job.delay(self.job.id, offset=0, limit=1)
# Runs were chained.
self.job.refresh_from_db()
assert self.job.status == "completed"
assert self.job.offset == 2
def test_run_bulk_replay_delete_job_already_failed(self) -> None:
t1 = datetime.datetime.now() - datetime.timedelta(seconds=10)
replay_id1 = uuid.uuid4().hex
self.store_replays(
mock_replay(t1, self.project.id, replay_id1, segment_id=0, environment="prod")
)
self.job.status = DeletionJobStatus.FAILED
self.job.save()
with TaskRunner():
run_bulk_replay_delete_job.delay(self.job.id, offset=0, limit=0)
# Runs were chained.
self.job.refresh_from_db()
assert self.job.status == "failed"
assert self.job.offset == 0
def test_run_bulk_replay_delete_job_no_matches(self) -> None:
with TaskRunner():
run_bulk_replay_delete_job.delay(self.job.id, offset=0)
self.job.refresh_from_db()
assert self.job.status == "completed"
assert self.job.offset == 0
def test_fetch_rows_matching_pattern(self) -> None:
t1 = datetime.datetime.now() - datetime.timedelta(seconds=10)
t2 = datetime.datetime.now() + datetime.timedelta(seconds=10)
t3 = datetime.datetime.now()
replay_id = uuid.uuid4().hex
self.store_replays(
mock_replay(t3, self.project.id, replay_id, segment_id=0, environment="prod")
)
result = fetch_rows_matching_pattern(
self.project.id,
t1,
t2,
query="count_errors:<100",
environment=["prod"],
limit=50,
offset=0,
)
assert len(result["rows"]) == 1
assert result["rows"][0]["replay_id"] == str(uuid.UUID(replay_id))
@patch("sentry.replays.usecases.delete.make_signed_seer_api_request")
@patch("sentry.replays.tasks.fetch_rows_matching_pattern")
@patch("sentry.replays.tasks.delete_matched_rows")
def test_run_bulk_replay_delete_job_has_seer_data_true(
self,
mock_delete_matched_rows: MagicMock,
mock_fetch_rows: MagicMock,
mock_make_seer_api_request: MagicMock,
) -> None:
def row_generator() -> Generator[MatchedRows]:
yield {
"rows": [
{
"retention_days": 90,
"replay_id": "a",
"max_segment_id": 1,
},
{
"retention_days": 90,
"replay_id": "b",
"max_segment_id": 0,
},
],
"has_more": True,
}
yield {
"rows": [
{
"retention_days": 90,
"replay_id": "c",
"max_segment_id": 1,
},
],
"has_more": False,
}
mock_fetch_rows.side_effect = row_generator()
mock_response = Mock()
mock_response.status = 204
mock_make_seer_api_request.return_value = mock_response
with TaskRunner():
run_bulk_replay_delete_job.delay(self.job.id, offset=0, limit=2, has_seer_data=True)
# Runs were chained.
self.job.refresh_from_db()
assert self.job.status == "completed"
assert self.job.offset == 3
assert mock_make_seer_api_request.call_count == 2
first_call = mock_make_seer_api_request.call_args_list[0]
assert first_call[1]["path"] == SEER_DELETE_SUMMARIES_ENDPOINT_PATH
request_body = json.loads(first_call[1]["body"].decode())
assert request_body == {"replay_ids": ["a", "b"]}
second_call = mock_make_seer_api_request.call_args_list[1]
assert second_call[1]["path"] == SEER_DELETE_SUMMARIES_ENDPOINT_PATH
request_body = json.loads(second_call[1]["body"].decode())
assert request_body == {"replay_ids": ["c"]}
@patch("requests.post")
@patch("sentry.replays.tasks.fetch_rows_matching_pattern")
@patch("sentry.replays.tasks.delete_matched_rows")
def test_run_bulk_replay_delete_job_has_seer_data_false(
self, mock_delete_matched_rows: MagicMock, mock_fetch_rows: MagicMock, mock_post: MagicMock
) -> None:
def row_generator() -> Generator[MatchedRows]:
yield {
"rows": [
{
"retention_days": 90,
"replay_id": "a",
"max_segment_id": 1,
},
{
"retention_days": 90,
"replay_id": "b",
"max_segment_id": 0,
},
],
"has_more": True,
}
yield {
"rows": [
{
"retention_days": 90,
"replay_id": "c",
"max_segment_id": 1,
},
],
"has_more": False,
}
mock_fetch_rows.side_effect = row_generator()
with TaskRunner():
run_bulk_replay_delete_job.delay(self.job.id, offset=0, limit=2, has_seer_data=False)
# Runs were chained.
self.job.refresh_from_db()
assert self.job.status == "completed"
assert self.job.offset == 3
assert mock_post.call_count == 0
| TestDeleteReplaysBulk |
python | facebookresearch__faiss | tests/test_index.py | {
"start": 20047,
"end": 21531
} | class ____(unittest.TestCase):
def test_recons_exception(self):
d = 64 # dimension
nb = 1000
rs = np.random.RandomState(1234)
xb = rs.rand(nb, d).astype('float32')
nlist = 10
quantizer = faiss.IndexFlatL2(d) # the other index
index = faiss.IndexIVFFlat(quantizer, d, nlist)
index.train(xb)
index.add(xb)
index.make_direct_map()
index.reconstruct(9)
self.assertRaises(
RuntimeError,
index.reconstruct, 100001
)
def test_reconstruct_after_add(self):
index = faiss.index_factory(10, 'IVF5,SQfp16')
index.train(faiss.randn((100, 10), 123))
index.add(faiss.randn((100, 10), 345))
index.make_direct_map()
index.add(faiss.randn((100, 10), 678))
# should not raise an exception
index.reconstruct(5)
index.reconstruct(150)
def test_reconstruct_larger_ntotal(self):
vect_dim = 5
n_vectors = 10
# Create the dataset
data = np.random.randint(100, size=(n_vectors, vect_dim))
# Build index
index = faiss.IndexFlatL2(vect_dim)
index.add(data)
# Reconstruct < ntotal (10) without an issue
index.reconstruct(9)
# Reconstruct >= ntotal (10), assert exception raise
self.assertRaises(
RuntimeError,
index.reconstruct, 10
)
| TestReconsException |
python | walkccc__LeetCode | solutions/1366. Rank Teams by Votes/1366.py | {
"start": 47,
"end": 192
} | class ____:
name: str
rank: list[int]
def __init__(self, name: str, teamSize: int):
self.name = name
self.rank = [0] * teamSize
| Team |
python | bokeh__bokeh | src/bokeh/models/annotations/dimensional.py | {
"start": 4203,
"end": 4462
} | class ____(Metric):
""" Model for defining reciprocal metric units of measurement, e.g. ``m^{-1}``.
"""
# explicit __init__ to support Init signatures
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
| ReciprocalMetric |
python | realpython__materials | python-callable-instances/factorial.py | {
"start": 0,
"end": 238
} | class ____:
def __init__(self):
self.cache = {0: 1, 1: 1}
def __call__(self, number):
if number not in self.cache:
self.cache[number] = number * self(number - 1)
return self.cache[number]
| Factorial |
python | openai__openai-python | src/openai/resources/fine_tuning/fine_tuning.py | {
"start": 4794,
"end": 5410
} | class ____:
def __init__(self, fine_tuning: AsyncFineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> AsyncJobsWithStreamingResponse:
return AsyncJobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
return AsyncCheckpointsWithStreamingResponse(self._fine_tuning.checkpoints)
@cached_property
def alpha(self) -> AsyncAlphaWithStreamingResponse:
return AsyncAlphaWithStreamingResponse(self._fine_tuning.alpha)
| AsyncFineTuningWithStreamingResponse |
python | pydata__xarray | asv_bench/benchmarks/dataset_io.py | {
"start": 20150,
"end": 20650
} | class ____(IOSingleNetCDF):
def setup(self, *args, **kwargs):
self.make_ds()
self.filepaths = {}
for engine in _ENGINES:
self.filepaths[engine] = f"test_single_file_with_{engine}.nc"
self.ds.to_netcdf(self.filepaths[engine], engine=engine)
@parameterized(["engine", "chunks"], (_ENGINES, [None, {}]))
def time_read_dataset(self, engine, chunks):
xr.open_dataset(self.filepaths[engine], engine=engine, chunks=chunks)
| IOReadSingleFile |
python | walkccc__LeetCode | solutions/760. Find Anagram Mappings/760.py | {
"start": 0,
"end": 269
} | class ____:
def anagramMappings(self, nums1: list[int], nums2: list[int]) -> list[int]:
numToIndices = collections.defaultdict(list)
for i, num in enumerate(nums2):
numToIndices[num].append(i)
return [numToIndices[num].pop() for num in nums1]
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/interfaces.py | {
"start": 9445,
"end": 11069
} | class ____(TypedDict):
"""Dictionary representing the reflected elements corresponding to
a :class:`_schema.Column` object.
The :class:`.ReflectedColumn` structure is returned by the
:class:`.Inspector.get_columns` method.
"""
name: str
"""column name"""
type: TypeEngine[Any]
"""column type represented as a :class:`.TypeEngine` instance."""
nullable: bool
"""boolean flag if the column is NULL or NOT NULL"""
default: Optional[str]
"""column default expression as a SQL string"""
autoincrement: NotRequired[bool]
"""database-dependent autoincrement flag.
This flag indicates if the column has a database-side "autoincrement"
flag of some kind. Within SQLAlchemy, other kinds of columns may
also act as an "autoincrement" column without necessarily having
such a flag on them.
See :paramref:`_schema.Column.autoincrement` for more background on
"autoincrement".
"""
comment: NotRequired[Optional[str]]
"""comment for the column, if present.
Only some dialects return this key
"""
computed: NotRequired[ReflectedComputed]
"""indicates that this column is computed by the database.
Only some dialects return this key.
"""
identity: NotRequired[ReflectedIdentity]
"""indicates this column is an IDENTITY column.
Only some dialects return this key.
.. versionadded:: 1.4 - added support for identity column reflection.
"""
dialect_options: NotRequired[Dict[str, Any]]
"""Additional dialect-specific options detected for this reflected
object"""
| ReflectedColumn |
python | google__jax | jax/experimental/mosaic/gpu/profiler.py | {
"start": 10065,
"end": 10422
} | class ____:
"""Set of IR values referenced by the profiler logic.
The profiler logic is implemented using `CustomPrimitiveOp` which requires
that all IR values referenced in its body be passed as operands to the op.
"""
start: ir.Value
is_profiling_thread: ir.Value
smem_buffer: ir.Value
gmem_buffer: ir.Value
offset: ir.Value
| _ProfilerCtx |
python | django__django | django/contrib/admin/helpers.py | {
"start": 3482,
"end": 4702
} | class ____:
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, str):
self.fields = [field]
else:
self.fields = field
self.has_visible_field = not all(
field in self.form.fields and self.form.fields[field].widget.is_hidden
for field in self.fields
)
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(
self.form, field, is_first=(i == 0), model_admin=self.model_admin
)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(
"\n".join(
self.form[f].errors.as_ul()
for f in self.fields
if f not in self.readonly_fields
).strip("\n")
)
| Fieldline |
python | huggingface__transformers | src/transformers/models/rt_detr/modeling_rt_detr.py | {
"start": 69593,
"end": 85583
} | class ____(RTDetrPreTrainedModel):
def __init__(self, config: RTDetrConfig):
super().__init__(config)
# Create backbone
self.backbone = RTDetrConvEncoder(config)
intermediate_channel_sizes = self.backbone.intermediate_channel_sizes
# Create encoder input projection layers
# https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/hybrid_encoder.py#L212
num_backbone_outs = len(intermediate_channel_sizes)
encoder_input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = intermediate_channel_sizes[_]
encoder_input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.encoder_hidden_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(config.encoder_hidden_dim),
)
)
self.encoder_input_proj = nn.ModuleList(encoder_input_proj_list)
# Create encoder
self.encoder = RTDetrHybridEncoder(config)
# denoising part
if config.num_denoising > 0:
self.denoising_class_embed = nn.Embedding(
config.num_labels + 1, config.d_model, padding_idx=config.num_labels
)
# decoder embedding
if config.learn_initial_query:
self.weight_embedding = nn.Embedding(config.num_queries, config.d_model)
# encoder head
self.enc_output = nn.Sequential(
nn.Linear(config.d_model, config.d_model),
nn.LayerNorm(config.d_model, eps=config.layer_norm_eps),
)
self.enc_score_head = nn.Linear(config.d_model, config.num_labels)
self.enc_bbox_head = RTDetrMLPPredictionHead(config, config.d_model, config.d_model, 4, num_layers=3)
# init encoder output anchors and valid_mask
if config.anchor_image_size:
self.anchors, self.valid_mask = self.generate_anchors(dtype=self.dtype)
# Create decoder input projection layers
# https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/rtdetr_decoder.py#L412
num_backbone_outs = len(config.decoder_in_channels)
decoder_input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = config.decoder_in_channels[_]
decoder_input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=1, bias=False),
nn.BatchNorm2d(config.d_model, config.batch_norm_eps),
)
)
for _ in range(config.num_feature_levels - num_backbone_outs):
decoder_input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(config.d_model, config.batch_norm_eps),
)
)
in_channels = config.d_model
self.decoder_input_proj = nn.ModuleList(decoder_input_proj_list)
# decoder
self.decoder = RTDetrDecoder(config)
self.post_init()
def freeze_backbone(self):
for param in self.backbone.parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for param in self.backbone.parameters():
param.requires_grad_(True)
@compile_compatible_method_lru_cache(maxsize=32)
def generate_anchors(self, spatial_shapes=None, grid_size=0.05, device="cpu", dtype=torch.float32):
if spatial_shapes is None:
spatial_shapes = [
[int(self.config.anchor_image_size[0] / s), int(self.config.anchor_image_size[1] / s)]
for s in self.config.feat_strides
]
anchors = []
for level, (height, width) in enumerate(spatial_shapes):
grid_y, grid_x = torch.meshgrid(
torch.arange(end=height, device=device).to(dtype),
torch.arange(end=width, device=device).to(dtype),
indexing="ij",
)
grid_xy = torch.stack([grid_x, grid_y], -1)
grid_xy = grid_xy.unsqueeze(0) + 0.5
grid_xy[..., 0] /= width
grid_xy[..., 1] /= height
wh = torch.ones_like(grid_xy) * grid_size * (2.0**level)
anchors.append(torch.concat([grid_xy, wh], -1).reshape(-1, height * width, 4))
# define the valid range for anchor coordinates
eps = 1e-2
anchors = torch.concat(anchors, 1)
valid_mask = ((anchors > eps) * (anchors < 1 - eps)).all(-1, keepdim=True)
anchors = torch.log(anchors / (1 - anchors))
anchors = torch.where(valid_mask, anchors, torch.tensor(torch.finfo(dtype).max, dtype=dtype, device=device))
return anchors, valid_mask
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[list[dict]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], RTDetrModelOutput]:
r"""
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
labels (`list[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Examples:
```python
>>> from transformers import AutoImageProcessor, RTDetrModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("PekingU/rtdetr_r50vd")
>>> model = RTDetrModel.from_pretrained("PekingU/rtdetr_r50vd")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 300, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones(((batch_size, height, width)), device=device)
features = self.backbone(pixel_values, pixel_mask)
proj_feats = [self.encoder_input_proj[level](source) for level, (source, mask) in enumerate(features)]
if encoder_outputs is None:
encoder_outputs = self.encoder(
proj_feats,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if output_hidden_states else None,
attentions=encoder_outputs[2]
if len(encoder_outputs) > 2
else encoder_outputs[1]
if output_attentions
else None,
)
# Equivalent to def _get_encoder_input
# https://github.com/lyuwenyu/RT-DETR/blob/94f5e16708329d2f2716426868ec89aa774af016/rtdetr_pytorch/src/zoo/rtdetr/rtdetr_decoder.py#L412
sources = []
for level, source in enumerate(encoder_outputs[0]):
sources.append(self.decoder_input_proj[level](source))
# Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage
if self.config.num_feature_levels > len(sources):
_len_sources = len(sources)
sources.append(self.decoder_input_proj[_len_sources](encoder_outputs[0])[-1])
for i in range(_len_sources + 1, self.config.num_feature_levels):
sources.append(self.decoder_input_proj[i](encoder_outputs[0][-1]))
# Prepare encoder inputs (by flattening)
source_flatten = []
spatial_shapes_list = []
spatial_shapes = torch.empty((len(sources), 2), device=device, dtype=torch.long)
for level, source in enumerate(sources):
height, width = source.shape[-2:]
spatial_shapes[level, 0] = height
spatial_shapes[level, 1] = width
spatial_shapes_list.append((height, width))
source = source.flatten(2).transpose(1, 2)
source_flatten.append(source)
source_flatten = torch.cat(source_flatten, 1)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
# prepare denoising training
if self.training and self.config.num_denoising > 0 and labels is not None:
(
denoising_class,
denoising_bbox_unact,
attention_mask,
denoising_meta_values,
) = get_contrastive_denoising_training_group(
targets=labels,
num_classes=self.config.num_labels,
num_queries=self.config.num_queries,
class_embed=self.denoising_class_embed,
num_denoising_queries=self.config.num_denoising,
label_noise_ratio=self.config.label_noise_ratio,
box_noise_scale=self.config.box_noise_scale,
)
else:
denoising_class, denoising_bbox_unact, attention_mask, denoising_meta_values = None, None, None, None
batch_size = len(source_flatten)
device = source_flatten.device
dtype = source_flatten.dtype
# prepare input for decoder
if self.training or self.config.anchor_image_size is None:
# Pass spatial_shapes as tuple to make it hashable and make sure
# lru_cache is working for generate_anchors()
spatial_shapes_tuple = tuple(spatial_shapes_list)
anchors, valid_mask = self.generate_anchors(spatial_shapes_tuple, device=device, dtype=dtype)
else:
anchors, valid_mask = self.anchors, self.valid_mask
anchors, valid_mask = anchors.to(device, dtype), valid_mask.to(device, dtype)
# use the valid_mask to selectively retain values in the feature map where the mask is `True`
memory = valid_mask.to(source_flatten.dtype) * source_flatten
output_memory = self.enc_output(memory)
enc_outputs_class = self.enc_score_head(output_memory)
enc_outputs_coord_logits = self.enc_bbox_head(output_memory) + anchors
_, topk_ind = torch.topk(enc_outputs_class.max(-1).values, self.config.num_queries, dim=1)
reference_points_unact = enc_outputs_coord_logits.gather(
dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, enc_outputs_coord_logits.shape[-1])
)
enc_topk_bboxes = F.sigmoid(reference_points_unact)
if denoising_bbox_unact is not None:
reference_points_unact = torch.concat([denoising_bbox_unact, reference_points_unact], 1)
enc_topk_logits = enc_outputs_class.gather(
dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, enc_outputs_class.shape[-1])
)
# extract region features
if self.config.learn_initial_query:
target = self.weight_embedding.tile([batch_size, 1, 1])
else:
target = output_memory.gather(dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, output_memory.shape[-1]))
target = target.detach()
if denoising_class is not None:
target = torch.concat([denoising_class, target], 1)
init_reference_points = reference_points_unact.detach()
# decoder
decoder_outputs = self.decoder(
inputs_embeds=target,
encoder_hidden_states=source_flatten,
encoder_attention_mask=attention_mask,
reference_points=init_reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
enc_outputs = tuple(
value
for value in [enc_topk_logits, enc_topk_bboxes, enc_outputs_class, enc_outputs_coord_logits]
if value is not None
)
dn_outputs = tuple(value if value is not None else None for value in [denoising_meta_values])
tuple_outputs = decoder_outputs + encoder_outputs + (init_reference_points,) + enc_outputs + dn_outputs
return tuple_outputs
return RTDetrModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
intermediate_logits=decoder_outputs.intermediate_logits,
intermediate_reference_points=decoder_outputs.intermediate_reference_points,
intermediate_predicted_corners=decoder_outputs.intermediate_predicted_corners,
initial_reference_points=decoder_outputs.initial_reference_points,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
init_reference_points=init_reference_points,
enc_topk_logits=enc_topk_logits,
enc_topk_bboxes=enc_topk_bboxes,
enc_outputs_class=enc_outputs_class,
enc_outputs_coord_logits=enc_outputs_coord_logits,
denoising_meta_values=denoising_meta_values,
)
@auto_docstring(
custom_intro="""
RT-DETR Model (consisting of a backbone and encoder-decoder) outputting bounding boxes and logits to be further
decoded into scores and classes.
"""
)
| RTDetrModel |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/sftp_to_gcs.py | {
"start": 1376,
"end": 10314
} | class ____(BaseOperator):
"""
Transfer files to Google Cloud Storage from SFTP server.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SFTPToGCSOperator`
:param source_path: The sftp remote path. This is the specified file path
for downloading the single file or multiple files from the SFTP server.
You can use only one wildcard within your path. The wildcard can appear
inside the path or at the end of the path.
:param destination_bucket: The bucket to upload to.
:param destination_path: The destination name of the object in the
destination Google Cloud Storage bucket.
If destination_path is not provided file/files will be placed in the
main bucket path.
If a wildcard is supplied in the destination_path argument, this is the
prefix that will be prepended to the final destination objects' paths.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:param mime_type: The mime-type string
:param gzip: Allows for file to be compressed and uploaded as gzip
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param sftp_prefetch: Whether to enable SFTP prefetch, the default is True.
:param use_stream: Determines the transfer method from SFTP to GCS.
When ``False`` (default), the file downloads locally
then uploads (may require significant disk space).
When ``True``, the file streams directly without using local disk.
Defaults to ``False``.
:param fail_on_file_not_exist: If True, operator fails when file does not exist,
if False, operator will not fail and skips transfer. Default is True.
"""
template_fields: Sequence[str] = (
"source_path",
"destination_path",
"destination_bucket",
"impersonation_chain",
)
def __init__(
self,
*,
source_path: str,
destination_bucket: str,
destination_path: str | None = None,
gcp_conn_id: str = "google_cloud_default",
sftp_conn_id: str = "ssh_default",
mime_type: str = "application/octet-stream",
gzip: bool = False,
move_object: bool = False,
impersonation_chain: str | Sequence[str] | None = None,
sftp_prefetch: bool = True,
use_stream: bool = False,
fail_on_file_not_exist: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_path = source_path
self.destination_path = destination_path
self.destination_bucket = destination_bucket
self.gcp_conn_id = gcp_conn_id
self.mime_type = mime_type
self.gzip = gzip
self.sftp_conn_id = sftp_conn_id
self.move_object = move_object
self.impersonation_chain = impersonation_chain
self.sftp_prefetch = sftp_prefetch
self.use_stream = use_stream
self.fail_on_file_not_exist = fail_on_file_not_exist
@cached_property
def sftp_hook(self):
return SFTPHook(self.sftp_conn_id)
def execute(self, context: Context):
self.destination_path = self._set_destination_path(self.destination_path)
self.destination_bucket = self._set_bucket_name(self.destination_bucket)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
if WILDCARD in self.source_path:
total_wildcards = self.source_path.count(WILDCARD)
if total_wildcards > 1:
raise AirflowException(
"Only one wildcard '*' is allowed in source_path parameter. "
f"Found {total_wildcards} in {self.source_path}."
)
prefix, delimiter = self.source_path.split(WILDCARD, 1)
base_path = os.path.dirname(prefix)
files, _, _ = self.sftp_hook.get_tree_map(base_path, prefix=prefix, delimiter=delimiter)
for file in files:
destination_path = file.replace(base_path, self.destination_path, 1)
# See issue: https://github.com/apache/airflow/issues/41763
# If the destination_path is not specified, it defaults to an empty string. As a result,
# replacing base_path with an empty string is ineffective, causing the destination_path to
# retain the "/" prefix, if it has.
if not self.destination_path:
destination_path = destination_path.lstrip("/")
self._copy_single_object(gcs_hook, self.sftp_hook, file, destination_path)
else:
destination_object = (
self.destination_path if self.destination_path else self.source_path.rsplit("/", 1)[1]
)
try:
self._copy_single_object(gcs_hook, self.sftp_hook, self.source_path, destination_object)
except FileNotFoundError as e:
if self.fail_on_file_not_exist:
raise e
self.log.info("File %s not found on SFTP server. Skipping transfer.", self.source_path)
return
def _copy_single_object(
self,
gcs_hook: GCSHook,
sftp_hook: SFTPHook,
source_path: str,
destination_object: str,
) -> None:
"""Copy single object."""
self.log.info(
"Executing copy of %s to gs://%s/%s",
source_path,
self.destination_bucket,
destination_object,
)
if self.use_stream:
dest_bucket = gcs_hook.get_bucket(self.destination_bucket)
dest_blob = dest_bucket.blob(destination_object)
with dest_blob.open("wb") as write_stream:
sftp_hook.retrieve_file(source_path, write_stream, prefetch=self.sftp_prefetch)
else:
with NamedTemporaryFile("w") as tmp:
sftp_hook.retrieve_file(source_path, tmp.name, prefetch=self.sftp_prefetch)
gcs_hook.upload(
bucket_name=self.destination_bucket,
object_name=destination_object,
filename=tmp.name,
mime_type=self.mime_type,
gzip=self.gzip,
)
if self.move_object:
self.log.info("Executing delete of %s", source_path)
sftp_hook.delete_file(source_path)
@staticmethod
def _set_destination_path(path: str | None) -> str:
if path is not None:
return path.lstrip("/") if path.startswith("/") else path
return ""
@staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
def get_openlineage_facets_on_start(self):
from airflow.providers.common.compat.openlineage.facet import Dataset
from airflow.providers.google.cloud.openlineage.utils import extract_ds_name_from_gcs_path
from airflow.providers.openlineage.extractors import OperatorLineage
source_name = extract_ds_name_from_gcs_path(self.source_path.split(WILDCARD, 1)[0])
if self.source_path.startswith("/") and source_name != "/":
source_name = "/" + source_name
if WILDCARD not in self.source_path and not self.destination_path:
dest_name = self.source_path.rsplit("/", 1)[1]
else:
dest_name = extract_ds_name_from_gcs_path(f"{self.destination_path}")
return OperatorLineage(
inputs=[
Dataset(
namespace=f"file://{self.sftp_hook.remote_host}:{self.sftp_hook.port}",
name=source_name,
)
],
outputs=[
Dataset(namespace="gs://" + self._set_bucket_name(self.destination_bucket), name=dest_name)
],
)
| SFTPToGCSOperator |
python | astropy__astropy | astropy/timeseries/sampled.py | {
"start": 414,
"end": 16664
} | class ____(BaseTimeSeries):
"""
A class to represent time series data in tabular form.
`~astropy.timeseries.TimeSeries` provides a class for representing time
series as a collection of values of different quantities measured at specific
points in time (for time series with finite time bins, see the
`~astropy.timeseries.BinnedTimeSeries` class).
`~astropy.timeseries.TimeSeries` is a sub-class of `~astropy.table.QTable`
and thus provides all the standard table maniplation methods available to
tables, but it also provides additional conveniences for dealing with time
series, such as a flexible initializer for setting up the times, a method
for folding time series, and a ``time`` attribute for easy access to the
time values.
See also: https://docs.astropy.org/en/stable/timeseries/
Parameters
----------
data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional
Data to initialize time series. This does not need to contain the times,
which can be provided separately, but if it does contain the times they
should be in a column called ``'time'`` to be automatically recognized.
time : `~astropy.time.Time`, `~astropy.time.TimeDelta` or iterable
The times at which the values are sampled - this can be either given
directly as a `~astropy.time.Time` or `~astropy.time.TimeDelta` array
or as any iterable that initializes the `~astropy.time.Time` class. If
this is given, then the remaining time-related arguments should not be used.
time_start : `~astropy.time.Time` or str
The time of the first sample in the time series. This is an alternative
to providing ``time`` and requires that ``time_delta`` is also provided.
time_delta : `~astropy.time.TimeDelta` or `~astropy.units.Quantity` ['time']
The step size in time for the series. This can either be a scalar if
the time series is evenly sampled, or an array of values if it is not.
n_samples : int
The number of time samples for the series. This is only used if both
``time_start`` and ``time_delta`` are provided and are scalar values.
**kwargs : dict, optional
Additional keyword arguments are passed to `~astropy.table.QTable`.
"""
_required_columns = ["time"]
def __init__(
self,
data=None,
*,
time=None,
time_start=None,
time_delta=None,
n_samples=None,
**kwargs,
):
super().__init__(data=data, **kwargs)
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if data is None and time is None and time_start is None and time_delta is None:
self._required_columns_relax = True
return
# First if time has been given in the table data, we should extract it
# and treat it as if it had been passed as a keyword argument.
if data is not None:
if n_samples is not None:
if n_samples != len(self):
raise TypeError(
"'n_samples' has been given both and it is not the "
"same length as the input data."
)
else:
n_samples = len(self)
if "time" in self.colnames:
if time is None:
time = self.columns["time"]
else:
raise TypeError(
"'time' has been given both in the table and as a keyword argument"
)
if time is None and time_start is None:
raise TypeError("Either 'time' or 'time_start' should be specified")
elif time is not None and time_start is not None:
raise TypeError("Cannot specify both 'time' and 'time_start'")
if time is not None and not isinstance(time, (Time, TimeDelta)):
time = Time(time)
if time_start is not None and not isinstance(time_start, (Time, TimeDelta)):
time_start = Time(time_start)
if time_delta is not None and not isinstance(time_delta, (Quantity, TimeDelta)):
raise TypeError("'time_delta' should be a Quantity or a TimeDelta")
if isinstance(time_delta, TimeDelta):
time_delta = time_delta.sec * u.s
if time_start is not None:
# We interpret this as meaning that time is that of the first
# sample and that the interval is given by time_delta.
if time_delta is None:
raise TypeError("'time' is scalar, so 'time_delta' is required")
if time_delta.isscalar:
time_delta = np.repeat(time_delta, n_samples)
time_delta = np.cumsum(time_delta)
time_delta = np.roll(time_delta, 1)
time_delta[0] = 0.0 * u.s
time = time_start + time_delta
elif len(self.colnames) > 0 and len(time) != len(self):
raise ValueError(
f"Length of 'time' ({len(time)}) should match data length ({n_samples})"
)
elif time_delta is not None:
raise TypeError(
"'time_delta' should not be specified since 'time' is an array"
)
with self._delay_required_column_checks():
if "time" in self.colnames:
self.remove_column("time")
self.add_column(time, index=0, name="time")
@property
def time(self):
"""
The time values.
"""
return self["time"]
def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""
Return a new `~astropy.timeseries.TimeSeries` folded with a period and
epoch.
Parameters
----------
period : `~astropy.units.Quantity` ['time']
The period to use for folding
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity` ['dimensionless', 'time']
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_timeseries : `~astropy.timeseries.TimeSeries`
The folded time series object with phase as the ``time`` column.
"""
if not isinstance(period, Quantity) or period.unit.physical_type != "time":
raise UnitsError("period should be a Quantity in units of time")
folded = self.copy()
if epoch_time is None:
epoch_time = self.time[0]
else:
epoch_time = Time(epoch_time)
period_sec = period.to_value(u.s)
if normalize_phase:
if (
isinstance(epoch_phase, Quantity)
and epoch_phase.unit.physical_type != "dimensionless"
):
raise UnitsError(
"epoch_phase should be a dimensionless Quantity "
"or a float when normalize_phase=True"
)
epoch_phase_sec = epoch_phase * period_sec
else:
if epoch_phase == 0:
epoch_phase_sec = 0.0
else:
if (
not isinstance(epoch_phase, Quantity)
or epoch_phase.unit.physical_type != "time"
):
raise UnitsError(
"epoch_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
epoch_phase_sec = epoch_phase.to_value(u.s)
if wrap_phase is None:
wrap_phase = period_sec / 2
else:
if normalize_phase:
if isinstance(
wrap_phase, Quantity
) and not wrap_phase.unit.is_equivalent(u.one):
raise UnitsError(
"wrap_phase should be dimensionless when normalize_phase=True"
)
else:
if wrap_phase < 0 or wrap_phase > 1:
raise ValueError("wrap_phase should be between 0 and 1")
else:
wrap_phase = wrap_phase * period_sec
else:
if (
isinstance(wrap_phase, Quantity)
and wrap_phase.unit.physical_type == "time"
):
if wrap_phase < 0 or wrap_phase > period:
raise ValueError(
"wrap_phase should be between 0 and the period"
)
else:
wrap_phase = wrap_phase.to_value(u.s)
else:
raise UnitsError(
"wrap_phase should be a Quantity in units "
"of time when normalize_phase=False"
)
relative_time_sec = (
(self.time - epoch_time).sec + epoch_phase_sec + (period_sec - wrap_phase)
) % period_sec - (period_sec - wrap_phase)
folded_time = TimeDelta(relative_time_sec * u.s)
if normalize_phase:
folded_time = (folded_time / period).decompose()
period = period_sec = 1
with folded._delay_required_column_checks():
folded.remove_column("time")
folded.add_column(folded_time, name="time", index=0)
return folded
def __getitem__(self, item):
if self._is_list_or_tuple_of_str(item):
if "time" not in item:
out = QTable(
[self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices,
)
out._groups = groups.TableGroups(
out, indices=self.groups._indices, keys=self.groups._keys
)
return out
return super().__getitem__(item)
def add_column(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_column`.
"""
# Note that the docstring is inherited from QTable
result = super().add_column(*args, **kwargs)
if len(self.indices) == 0 and "time" in self.colnames:
self.add_index("time")
return result
def add_columns(self, *args, **kwargs):
"""
See :meth:`~astropy.table.Table.add_columns`.
"""
# Note that the docstring is inherited from QTable
result = super().add_columns(*args, **kwargs)
if len(self.indices) == 0 and "time" in self.colnames:
self.add_index("time")
return result
@classmethod
def from_pandas(cls, df, time_scale="utc"):
"""
Convert a :class:`~pandas.DataFrame` to a
:class:`astropy.timeseries.TimeSeries`.
Parameters
----------
df : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance.
time_scale : str
The time scale to pass into `astropy.time.Time`.
Defaults to ``UTC``.
"""
from pandas import DataFrame, DatetimeIndex
if not isinstance(df, DataFrame):
raise TypeError("Input should be a pandas DataFrame")
if not isinstance(df.index, DatetimeIndex):
raise TypeError("DataFrame does not have a DatetimeIndex")
time = Time(df.index, scale=time_scale)
table = Table.from_pandas(df)
return cls(time=time, data=table)
def to_pandas(self):
"""
Convert this :class:`~astropy.timeseries.TimeSeries` to a
:class:`~pandas.DataFrame` with a :class:`~pandas.DatetimeIndex` index.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
"""
return Table(self).to_pandas(index="time")
@classmethod
def read(
cls,
filename,
time_column=None,
time_format=None,
time_scale=None,
format=None,
*args,
**kwargs,
):
"""
Read and parse a file and returns a `astropy.timeseries.TimeSeries`.
This method uses the unified I/O infrastructure in Astropy which makes
it easy to define readers/writers for various classes
(https://docs.astropy.org/en/stable/io/unified.html). By default, this
method will try and use readers defined specifically for the
`astropy.timeseries.TimeSeries` class - however, it is also
possible to use the ``format`` keyword to specify formats defined for
the `astropy.table.Table` class - in this case, you will need to also
provide the column names for column containing the start times for the
bins, as well as other column names (see the Parameters section below
for details)::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('sampled.dat', format='ascii.ecsv',
... time_column='date') # doctest: +SKIP
Parameters
----------
filename : str
File to parse.
format : str
File format specifier.
time_column : str, optional
The name of the time column.
time_format : str, optional
The time format for the time column.
time_scale : str, optional
The time scale for the time column.
*args : tuple, optional
Positional arguments passed through to the data reader.
**kwargs : dict, optional
Keyword arguments passed through to the data reader.
Returns
-------
out : `astropy.timeseries.sampled.TimeSeries`
TimeSeries corresponding to file contents.
Notes
-----
"""
try:
# First we try the readers defined for the BinnedTimeSeries class
return super().read(filename, *args, format=format, **kwargs)
except TypeError:
# Otherwise we fall back to the default Table readers
if time_column is None:
raise ValueError(
"``time_column`` should be provided since the default Table readers"
" are being used."
)
table = Table.read(filename, *args, format=format, **kwargs)
if time_column in table.colnames:
time = Time(
table.columns[time_column], scale=time_scale, format=time_format
)
table.remove_column(time_column)
else:
raise ValueError(
f"Time column '{time_column}' not found in the input data."
)
return cls(time=time, data=table)
| TimeSeries |
python | mlflow__mlflow | mlflow/models/evaluation/validation.py | {
"start": 5992,
"end": 9517
} | class ____:
"""
Internal class for representing validation result per metric.
Not user facing, used for organizing metric failures and generating failure message
more conveniently.
Args:
metric_name: String representing the metric name
candidate_metric_value: value of metric for candidate model
metric_threshold: :py:class: `MetricThreshold<mlflow.models.validation.MetricThreshold>`
The MetricThreshold for the metric.
baseline_metric_value: value of metric for baseline model
"""
missing_candidate = False
missing_baseline = False
threshold_failed = False
min_absolute_change_failed = False
min_relative_change_failed = False
def __init__(
self,
metric_name,
candidate_metric_value,
metric_threshold,
baseline_metric_value=None,
):
self.metric_name = metric_name
self.candidate_metric_value = candidate_metric_value
self.baseline_metric_value = baseline_metric_value
self.metric_threshold = metric_threshold
def __str__(self):
"""
Returns a human-readable string representing the validation result for the metric.
"""
if self.is_success():
return f"Metric {self.metric_name} passed the validation."
if self.missing_candidate:
return (
f"Metric validation failed: metric {self.metric_name} was missing from the "
f"evaluation result of the candidate model."
)
result_strs = []
if self.threshold_failed:
result_strs.append(
f"Metric {self.metric_name} value threshold check failed: "
f"candidate model {self.metric_name} = {self.candidate_metric_value}, "
f"{self.metric_name} threshold = {self.metric_threshold.threshold}."
)
if self.missing_baseline:
result_strs.append(
f"Model comparison failed: metric {self.metric_name} was missing from "
f"the evaluation result of the baseline model."
)
else:
if self.min_absolute_change_failed:
result_strs.append(
f"Metric {self.metric_name} minimum absolute change check failed: "
f"candidate model {self.metric_name} = {self.candidate_metric_value}, "
f"baseline model {self.metric_name} = {self.baseline_metric_value}, "
f"{self.metric_name} minimum absolute change threshold = "
f"{self.metric_threshold.min_absolute_change}."
)
if self.min_relative_change_failed:
result_strs.append(
f"Metric {self.metric_name} minimum relative change check failed: "
f"candidate model {self.metric_name} = {self.candidate_metric_value}, "
f"baseline model {self.metric_name} = {self.baseline_metric_value}, "
f"{self.metric_name} minimum relative change threshold = "
f"{self.metric_threshold.min_relative_change}."
)
return " ".join(result_strs)
def is_success(self):
return (
not self.missing_candidate
and not self.missing_baseline
and not self.threshold_failed
and not self.min_absolute_change_failed
and not self.min_relative_change_failed
)
| _MetricValidationResult |
python | kamyu104__LeetCode-Solutions | Python/buildings-with-an-ocean-view.py | {
"start": 399,
"end": 757
} | class ____(object):
def findBuildings(self, heights):
"""
:type heights: List[int]
:rtype: List[int]
"""
result = []
for i in reversed(xrange(len(heights))):
if not result or heights[result[-1]] < heights[i]:
result.append(i)
result.reverse()
return result
| Solution2 |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/shopify/tests.py | {
"start": 2303,
"end": 3884
} | class ____(ShopifyTests):
"""
Shopify embedded apps (that run within an iFrame) require a JS (not server)
redirect for starting the oauth2 process.
See Also:
https://help.shopify.com/api/sdks/embedded-app-sdk/getting-started#oauth
"""
def login(self, resp_mock, process="login", with_refresh_token=True):
resp = self.client.post(
reverse(self.provider.id + "_login")
+ "?"
+ urlencode({"process": process, "shop": "test"}),
)
self.assertEqual(resp.status_code, HTTPStatus.OK) # No re-direct, JS must do it
actual_content = resp.content.decode("utf8")
self.assertTrue(
"script" in actual_content,
"Content missing script tag. [Actual: {}]".format(actual_content),
)
self.assertTrue(
resp.xframe_options_exempt,
"Redirect JS must be allowed to run in Shopify iframe",
)
self.assertTrue(
"<!DOCTYPE html><html><head>" in actual_content
and "</head><body></body></html>" in actual_content,
"Expected standard HTML skeleton. [Actual: {}]".format(actual_content),
)
p = urlparse(
actual_content.split(";</script>")[0].split('location.href = "')[1]
)
q = parse_qs(p.query)
resp = self._complete_shopify_login(q, resp, resp_mock, with_refresh_token)
return resp
@override_settings(
SOCIALACCOUNT_PROVIDERS={
"shopify": {"AUTH_PARAMS": {"grant_options[]": "per-user"}}
}
)
| ShopifyEmbeddedTests |
python | pallets__markupsafe | src/markupsafe/__init__.py | {
"start": 12090,
"end": 12734
} | class ____:
"""Helper for :meth:`Markup.__mod__`."""
__slots__ = ("obj", "escape")
def __init__(self, obj: t.Any, escape: _TPEscape) -> None:
self.obj: t.Any = obj
self.escape: _TPEscape = escape
def __getitem__(self, key: t.Any, /) -> te.Self:
return self.__class__(self.obj[key], self.escape)
def __str__(self, /) -> str:
return str(self.escape(self.obj))
def __repr__(self, /) -> str:
return str(self.escape(repr(self.obj)))
def __int__(self, /) -> int:
return int(self.obj)
def __float__(self, /) -> float:
return float(self.obj)
| _MarkupEscapeHelper |
python | pypa__pip | tests/functional/test_new_resolver.py | {
"start": 26213,
"end": 69927
} | class ____:
"""
Test installing a package that depends the same package with different
extras, one listed as required and the other as in extra.
"""
@pytest.mark.parametrize(
"pkg_builder",
[
_local_with_setup,
_direct_wheel,
_wheel_from_index,
],
)
def test_new_resolver_extra_merge_in_package(
self, script: PipTestEnvironment, pkg_builder: "PackageBuilder"
) -> None:
create_basic_wheel_for_package(script, "depdev", "1.0.0")
create_basic_wheel_for_package(
script,
"dep",
"1.0.0",
extras={"dev": ["depdev"]},
)
requirement = pkg_builder(
script,
name="pkg",
version="1.0.0",
requires=["dep"],
extras={"dev": ["dep[dev]"]},
)
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
requirement + "[dev]",
)
script.assert_installed(pkg="1.0.0", dep="1.0.0", depdev="1.0.0")
def test_new_resolver_build_directory_error_zazo_19(script: PipTestEnvironment) -> None:
"""https://github.com/pradyunsg/zazo/issues/19#issuecomment-631615674
This will first resolve like this:
1. Pin pkg-b==2.0.0 (since pkg-b has fewer choices)
2. Pin pkg-a==3.0.0 -> Conflict due to dependency pkg-b<2
3. Pin pkg-b==1.0.0
Since pkg-b is only available as sdist, both the first and third steps
would trigger building from source. This ensures the preparer can build
different versions of a package for the resolver.
The preparer would fail with the following message if the different
versions end up using the same build directory::
ERROR: pip can't proceed with requirements 'pkg-b ...' due to a
pre-existing build directory (...). This is likely due to a previous
installation that failed. pip is being responsible and not assuming it
can delete this. Please delete it and try again.
"""
create_basic_wheel_for_package(
script,
"pkg_a",
"3.0.0",
depends=["pkg-b<2"],
)
create_basic_wheel_for_package(script, "pkg_a", "2.0.0")
create_basic_wheel_for_package(script, "pkg_a", "1.0.0")
create_basic_sdist_for_package(script, "pkg_b", "2.0.0")
create_basic_sdist_for_package(script, "pkg_b", "1.0.0")
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"pkg-a",
"pkg-b",
)
script.assert_installed(pkg_a="3.0.0", pkg_b="1.0.0")
def test_new_resolver_upgrade_same_version(script: PipTestEnvironment) -> None:
create_basic_wheel_for_package(script, "pkg", "2")
create_basic_wheel_for_package(script, "pkg", "1")
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"pkg",
)
script.assert_installed(pkg="2")
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"--upgrade",
"pkg",
)
script.assert_installed(pkg="2")
def test_new_resolver_local_and_req(script: PipTestEnvironment) -> None:
source_dir = create_test_package_with_setup(
script,
name="pkg",
version="0.1.0",
)
script.pip(
"install",
"--no-cache-dir",
"--no-index",
source_dir,
"pkg!=0.1.0",
expect_error=True,
)
def test_new_resolver_no_deps_checks_requires_python(
script: PipTestEnvironment,
) -> None:
create_basic_wheel_for_package(
script,
"base",
"0.1.0",
depends=["dep"],
requires_python="<2", # Something that always fails.
)
create_basic_wheel_for_package(
script,
"dep",
"0.2.0",
)
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--no-deps",
"--find-links",
script.scratch_path,
"base",
expect_error=True,
)
message = (
"Package 'base' requires a different Python: "
"{}.{}.{} not in '<2'".format(*sys.version_info[:3])
)
assert message in result.stderr
def test_new_resolver_prefers_installed_in_upgrade_if_latest(
script: PipTestEnvironment,
) -> None:
create_basic_wheel_for_package(script, "pkg", "1")
local_pkg = create_test_package_with_setup(script, name="pkg", version="2")
# Install the version that's not on the index.
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
local_pkg,
)
# Now --upgrade should still pick the local version because it's "better".
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"--upgrade",
"pkg",
)
script.assert_installed(pkg="2")
@pytest.mark.parametrize("N", [2, 10, 20])
def test_new_resolver_presents_messages_when_backtracking_a_lot(
script: PipTestEnvironment, N: int
) -> None:
# Generate a set of wheels that will definitely cause backtracking.
for index in range(1, N + 1):
A_version = f"{index}.0.0"
B_version = f"{index}.0.0"
C_version = f"{index - 1}.0.0"
depends = ["B == " + B_version]
if index != 1:
depends.append("C == " + C_version)
print("A", A_version, "B", B_version, "C", C_version)
create_basic_wheel_for_package(script, "A", A_version, depends=depends)
for index in range(1, N + 1):
B_version = f"{index}.0.0"
C_version = f"{index}.0.0"
depends = ["C == " + C_version]
print("B", B_version, "C", C_version)
create_basic_wheel_for_package(script, "B", B_version, depends=depends)
for index in range(1, N + 1):
C_version = f"{index}.0.0"
print("C", C_version)
create_basic_wheel_for_package(script, "C", C_version)
# Install A
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"A",
)
script.assert_installed(A="1.0.0", B="1.0.0", C="1.0.0")
# These numbers are hard-coded in the code.
if N >= 1:
assert "This could take a while." in result.stdout
if N >= 8:
assert result.stdout.count("This could take a while.") >= 2
if N >= 13:
assert "press Ctrl + C" in result.stdout
@pytest.mark.parametrize(
"metadata_version",
[
"0.1.0+local.1", # Normalized form.
"0.1.0+local_1", # Non-normalized form containing an underscore.
# Non-normalized form containing a dash. This is allowed, installation
# works correctly, but assert_installed() fails because pkg_resources
# cannot handle it correctly. Nobody is complaining about it right now,
# we're probably dropping it for importlib.metadata soon(tm), so let's
# ignore it for the time being.
pytest.param("0.1.0+local-1", marks=pytest.mark.xfail(strict=False)),
],
ids=["meta_dot", "meta_underscore", "meta_dash"],
)
@pytest.mark.parametrize(
"filename_version",
[
("0.1.0+local.1"), # Tools are encouraged to use this.
("0.1.0+local_1"), # But this is allowed (version not normalized).
],
ids=["file_dot", "file_underscore"],
)
def test_new_resolver_check_wheel_version_normalized(
script: PipTestEnvironment,
metadata_version: str,
filename_version: str,
) -> None:
filename = f"simple-{filename_version}-py2.py3-none-any.whl"
wheel_builder = make_wheel(name="simple", version=metadata_version)
wheel_builder.save_to(script.scratch_path / filename)
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"simple",
)
script.assert_installed(simple="0.1.0+local.1")
def test_new_resolver_does_reinstall_local_sdists(script: PipTestEnvironment) -> None:
archive_path = create_basic_sdist_for_package(
script,
"pkg",
"1.0",
)
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
archive_path,
)
script.assert_installed(pkg="1.0")
result = script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
archive_path,
expect_stderr=True,
)
assert "Installing collected packages: pkg" in result.stdout, str(result)
script.assert_installed(pkg="1.0")
def test_new_resolver_does_reinstall_local_paths(script: PipTestEnvironment) -> None:
pkg = create_test_package_with_setup(script, name="pkg", version="1.0")
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
pkg,
)
script.assert_installed(pkg="1.0")
result = script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
pkg,
)
assert "Installing collected packages: pkg" in result.stdout, str(result)
script.assert_installed(pkg="1.0")
def test_new_resolver_does_not_reinstall_when_from_a_local_index(
script: PipTestEnvironment,
) -> None:
create_basic_sdist_for_package(
script,
"simple",
"0.1.0",
)
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"simple",
)
script.assert_installed(simple="0.1.0")
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"simple",
)
# Should not reinstall!
assert "Installing collected packages: simple" not in result.stdout, str(result)
assert "Requirement already satisfied: simple" in result.stdout, str(result)
script.assert_installed(simple="0.1.0")
def test_new_resolver_skip_inconsistent_metadata(script: PipTestEnvironment) -> None:
create_basic_wheel_for_package(script, "A", "1")
a_2 = create_basic_wheel_for_package(script, "A", "2")
a_2.rename(a_2.parent.joinpath("a-3-py2.py3-none-any.whl"))
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"--verbose",
"A",
allow_stderr_warning=True,
)
assert (
" inconsistent version: expected '3', but metadata has '2'"
) in result.stdout, str(result)
script.assert_installed(a="1")
@pytest.mark.parametrize(
"upgrade",
[True, False],
ids=["upgrade", "no-upgrade"],
)
def test_new_resolver_lazy_fetch_candidates(
script: PipTestEnvironment, upgrade: bool
) -> None:
create_basic_wheel_for_package(script, "myuberpkg", "1")
create_basic_wheel_for_package(script, "myuberpkg", "2")
create_basic_wheel_for_package(script, "myuberpkg", "3")
# Install an old version first.
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"myuberpkg==1",
)
# Now install the same package again, maybe with the upgrade flag.
if upgrade:
pip_upgrade_args = ["--upgrade"]
else:
pip_upgrade_args = []
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"myuberpkg",
*pip_upgrade_args, # Trailing comma fails on Python 2.
)
# pip should install the version preferred by the strategy...
if upgrade:
script.assert_installed(myuberpkg="3")
else:
script.assert_installed(myuberpkg="1")
# But should reach there in the best route possible, without trying
# candidates it does not need to.
assert "myuberpkg-2" not in result.stdout, str(result)
def test_new_resolver_no_fetch_no_satisfying(script: PipTestEnvironment) -> None:
create_basic_wheel_for_package(script, "myuberpkg", "1")
# Install the package. This should emit a "Processing" message for
# fetching the distribution from the --find-links page.
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"myuberpkg",
)
assert "Processing " in result.stdout, str(result)
# Try to upgrade the package. This should NOT emit the "Processing"
# message because the currently installed version is latest.
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"--upgrade",
"myuberpkg",
)
assert "Processing " not in result.stdout, str(result)
def test_new_resolver_does_not_install_unneeded_packages_with_url_constraint(
script: PipTestEnvironment,
) -> None:
archive_path = create_basic_wheel_for_package(
script,
"installed",
"0.1.0",
)
not_installed_path = create_basic_wheel_for_package(
script,
"not_installed",
"0.1.0",
)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"not_installed @ {not_installed_path.as_uri()}")
(script.scratch_path / "index").mkdir()
archive_path.rename(script.scratch_path / "index" / archive_path.name)
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path / "index",
"-c",
constraints_file,
"installed",
)
script.assert_installed(installed="0.1.0")
script.assert_not_installed("not_installed")
def test_new_resolver_installs_packages_with_url_constraint(
script: PipTestEnvironment,
) -> None:
installed_path = create_basic_wheel_for_package(
script,
"installed",
"0.1.0",
)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"installed @ {installed_path.as_uri()}")
script.pip(
"install", "--no-cache-dir", "--no-index", "-c", constraints_file, "installed"
)
script.assert_installed(installed="0.1.0")
def test_new_resolver_reinstall_link_requirement_with_constraint(
script: PipTestEnvironment,
) -> None:
installed_path = create_basic_wheel_for_package(
script,
"installed",
"0.1.0",
)
cr_file = script.scratch_path / "constraints.txt"
cr_file.write_text(f"installed @ {installed_path.as_uri()}")
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-r",
cr_file,
)
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-c",
cr_file,
"-r",
cr_file,
)
# TODO: strengthen assertion to "second invocation does no work"
# I don't think this is true yet, but it should be in the future.
script.assert_installed(installed="0.1.0")
def test_new_resolver_prefers_url_constraint(script: PipTestEnvironment) -> None:
installed_path = create_basic_wheel_for_package(
script,
"test_pkg",
"0.1.0",
)
not_installed_path = create_basic_wheel_for_package(
script,
"test_pkg",
"0.2.0",
)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"test_pkg @ {installed_path.as_uri()}")
(script.scratch_path / "index").mkdir()
not_installed_path.rename(script.scratch_path / "index" / not_installed_path.name)
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path / "index",
"-c",
constraints_file,
"test_pkg",
)
script.assert_installed(test_pkg="0.1.0")
def test_new_resolver_prefers_url_constraint_on_update(
script: PipTestEnvironment,
) -> None:
installed_path = create_basic_wheel_for_package(
script,
"test_pkg",
"0.1.0",
)
not_installed_path = create_basic_wheel_for_package(
script,
"test_pkg",
"0.2.0",
)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"test_pkg @ {installed_path.as_uri()}")
(script.scratch_path / "index").mkdir()
not_installed_path.rename(script.scratch_path / "index" / not_installed_path.name)
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path / "index",
"test_pkg",
)
script.assert_installed(test_pkg="0.2.0")
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path / "index",
"-c",
constraints_file,
"test_pkg",
)
script.assert_installed(test_pkg="0.1.0")
@pytest.mark.parametrize("version_option", ["--constraint", "--requirement"])
def test_new_resolver_fails_with_url_constraint_and_incompatible_version(
script: PipTestEnvironment,
version_option: str,
) -> None:
not_installed_path = create_basic_wheel_for_package(
script,
"test_pkg",
"0.1.0",
)
not_installed_path = create_basic_wheel_for_package(
script,
"test_pkg",
"0.2.0",
)
url_constraint = script.scratch_path / "constraints.txt"
url_constraint.write_text(f"test_pkg @ {not_installed_path.as_uri()}")
version_req = script.scratch_path / "requirements.txt"
version_req.write_text("test_pkg<0.2.0")
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"--constraint",
url_constraint,
version_option,
version_req,
"test_pkg",
expect_error=True,
)
assert "Cannot install test_pkg" in result.stderr, str(result)
assert (
"because these package versions have conflicting dependencies."
) in result.stderr, str(result)
script.assert_not_installed("test_pkg")
# Assert that pip works properly in the absence of the constraints file.
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
version_option,
version_req,
"test_pkg",
)
def test_new_resolver_ignores_unneeded_conflicting_constraints(
script: PipTestEnvironment,
) -> None:
version_1 = create_basic_wheel_for_package(
script,
"test_pkg",
"0.1.0",
)
version_2 = create_basic_wheel_for_package(
script,
"test_pkg",
"0.2.0",
)
create_basic_wheel_for_package(
script,
"installed",
"0.1.0",
)
constraints = [
f"test_pkg @ {version_1.as_uri()}",
f"test_pkg @ {version_2.as_uri()}",
]
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text("\n".join(constraints))
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"-c",
constraints_file,
"installed",
)
script.assert_not_installed("test_pkg")
script.assert_installed(installed="0.1.0")
def test_new_resolver_fails_on_needed_conflicting_constraints(
script: PipTestEnvironment,
) -> None:
version_1 = create_basic_wheel_for_package(
script,
"test_pkg",
"0.1.0",
)
version_2 = create_basic_wheel_for_package(
script,
"test_pkg",
"0.2.0",
)
constraints = [
f"test_pkg @ {version_1.as_uri()}",
f"test_pkg @ {version_2.as_uri()}",
]
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text("\n".join(constraints))
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"-c",
constraints_file,
"test_pkg",
expect_error=True,
)
assert (
"Cannot install test_pkg because these package versions have conflicting "
"dependencies."
) in result.stderr, str(result)
script.assert_not_installed("test_pkg")
# Assert that pip works properly in the absence of the constraints file.
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"test_pkg",
)
def test_new_resolver_fails_on_conflicting_constraint_and_requirement(
script: PipTestEnvironment,
) -> None:
version_1 = create_basic_wheel_for_package(
script,
"test_pkg",
"0.1.0",
)
version_2 = create_basic_wheel_for_package(
script,
"test_pkg",
"0.2.0",
)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"test_pkg @ {version_1.as_uri()}")
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"-c",
constraints_file,
f"test_pkg @ {version_2.as_uri()}",
expect_error=True,
)
assert "Cannot install test-pkg 0.2.0" in result.stderr, str(result)
assert (
"because these package versions have conflicting dependencies."
) in result.stderr, str(result)
script.assert_not_installed("test_pkg")
# Assert that pip works properly in the absence of the constraints file.
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
f"test_pkg @ {version_2.as_uri()}",
)
@pytest.mark.parametrize("editable", [False, True])
def test_new_resolver_succeeds_on_matching_constraint_and_requirement(
script: PipTestEnvironment, editable: bool
) -> None:
if editable:
source_dir = create_test_package_with_setup(
script, name="test_pkg", version="0.1.0"
)
else:
source_dir = create_basic_wheel_for_package(
script,
"test_pkg",
"0.1.0",
)
req_line = f"test_pkg @ {source_dir.as_uri()}"
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(req_line)
last_args: tuple[str, ...]
if editable:
last_args = ("-e", os.fspath(source_dir))
else:
last_args = (req_line,)
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
"-c",
constraints_file,
*last_args,
)
script.assert_installed(test_pkg="0.1.0")
if editable:
script.assert_installed_editable("test_pkg")
def test_new_resolver_applies_url_constraint_to_dep(script: PipTestEnvironment) -> None:
version_1 = create_basic_wheel_for_package(
script,
"dep",
"0.1.0",
)
version_2 = create_basic_wheel_for_package(
script,
"dep",
"0.2.0",
)
base = create_basic_wheel_for_package(script, "base", "0.1.0", depends=["dep"])
(script.scratch_path / "index").mkdir()
base.rename(script.scratch_path / "index" / base.name)
version_2.rename(script.scratch_path / "index" / version_2.name)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"dep @ {version_1.as_uri()}")
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-c",
constraints_file,
"--find-links",
script.scratch_path / "index",
"base",
)
script.assert_installed(dep="0.1.0")
def test_new_resolver_handles_compatible_wheel_tags_in_constraint_url(
script: PipTestEnvironment, make_fake_wheel: MakeFakeWheel
) -> None:
initial_path = make_fake_wheel("base", "0.1.0", "fakepy1-fakeabi-fakeplat")
constrained = script.scratch_path / "constrained"
constrained.mkdir()
final_path = constrained / initial_path.name
initial_path.rename(final_path)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"base @ {final_path.as_uri()}")
result = script.pip(
"install",
"--implementation",
"fakepy",
"--only-binary=:all:",
"--python-version",
"1",
"--abi",
"fakeabi",
"--platform",
"fakeplat",
"--target",
script.scratch_path / "target",
"--no-cache-dir",
"--no-index",
"-c",
constraints_file,
"base",
)
dist_info = pathlib.Path("scratch", "target", "base-0.1.0.dist-info")
result.did_create(dist_info)
def test_new_resolver_handles_incompatible_wheel_tags_in_constraint_url(
script: PipTestEnvironment, make_fake_wheel: MakeFakeWheel
) -> None:
initial_path = make_fake_wheel("base", "0.1.0", "fakepy1-fakeabi-fakeplat")
constrained = script.scratch_path / "constrained"
constrained.mkdir()
final_path = constrained / initial_path.name
initial_path.rename(final_path)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"base @ {final_path.as_uri()}")
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-c",
constraints_file,
"base",
expect_error=True,
)
assert (
"Cannot install base because these package versions have conflicting "
"dependencies."
) in result.stderr, str(result)
script.assert_not_installed("base")
def test_new_resolver_avoids_incompatible_wheel_tags_in_constraint_url(
script: PipTestEnvironment, make_fake_wheel: MakeFakeWheel
) -> None:
initial_path = make_fake_wheel("dep", "0.1.0", "fakepy1-fakeabi-fakeplat")
constrained = script.scratch_path / "constrained"
constrained.mkdir()
final_path = constrained / initial_path.name
initial_path.rename(final_path)
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text(f"dep @ {final_path.as_uri()}")
index = script.scratch_path / "index"
index.mkdir()
index_dep = create_basic_wheel_for_package(script, "dep", "0.2.0")
base = create_basic_wheel_for_package(script, "base", "0.1.0")
base_2 = create_basic_wheel_for_package(script, "base", "0.2.0", depends=["dep"])
index_dep.rename(index / index_dep.name)
base.rename(index / base.name)
base_2.rename(index / base_2.name)
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"-c",
constraints_file,
"--find-links",
script.scratch_path / "index",
"base",
)
script.assert_installed(base="0.1.0")
script.assert_not_installed("dep")
@pytest.mark.parametrize(
"suffixes_equivalent, depend_suffix, request_suffix",
[
pytest.param(
True,
"#egg=foo",
"",
id="drop-depend-egg",
),
pytest.param(
True,
"",
"#egg=foo",
id="drop-request-egg",
),
pytest.param(
True,
"#subdirectory=bar&egg=foo",
"#subdirectory=bar&egg=bar",
id="drop-egg-only",
),
pytest.param(
True,
"#subdirectory=bar&egg=foo",
"#egg=foo&subdirectory=bar",
id="fragment-ordering",
),
pytest.param(
True,
"?a=1&b=2",
"?b=2&a=1",
id="query-opordering",
),
pytest.param(
False,
"#sha512=1234567890abcdef",
"#sha512=abcdef1234567890",
id="different-keys",
),
pytest.param(
False,
"#sha512=1234567890abcdef",
"#md5=1234567890abcdef",
id="different-values",
),
pytest.param(
False,
"#subdirectory=bar&egg=foo",
"#subdirectory=rex",
id="drop-egg-still-different",
),
],
)
def test_new_resolver_direct_url_equivalent(
tmp_path: pathlib.Path,
script: PipTestEnvironment,
suffixes_equivalent: bool,
depend_suffix: str,
request_suffix: str,
) -> None:
pkga = create_basic_wheel_for_package(script, name="pkga", version="1")
pkgb = create_basic_wheel_for_package(
script,
name="pkgb",
version="1",
depends=[f"pkga@{pkga.as_uri()}{depend_suffix}"],
)
# Make pkgb visible via --find-links, but not pkga.
find_links = tmp_path.joinpath("find_links")
find_links.mkdir()
with open(pkgb, "rb") as f:
find_links.joinpath(pkgb.name).write_bytes(f.read())
# Install pkgb from --find-links, and pkga directly but from a different
# URL suffix as specified in pkgb. This should work!
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
str(find_links),
f"{pkga.as_uri()}{request_suffix}",
"pkgb",
expect_error=(not suffixes_equivalent),
)
if suffixes_equivalent:
script.assert_installed(pkga="1", pkgb="1")
else:
script.assert_not_installed("pkga", "pkgb")
def test_new_resolver_direct_url_with_extras(
tmp_path: pathlib.Path, script: PipTestEnvironment
) -> None:
pkg1 = create_basic_wheel_for_package(script, name="pkg1", version="1")
pkg2 = create_basic_wheel_for_package(
script,
name="pkg2",
version="1",
extras={"ext": ["pkg1"]},
)
pkg3 = create_basic_wheel_for_package(
script,
name="pkg3",
version="1",
depends=["pkg2[ext]"],
)
# Make pkg1 and pkg3 visible via --find-links, but not pkg2.
find_links = tmp_path.joinpath("find_links")
find_links.mkdir()
with open(pkg1, "rb") as f:
find_links.joinpath(pkg1.name).write_bytes(f.read())
with open(pkg3, "rb") as f:
find_links.joinpath(pkg3.name).write_bytes(f.read())
# Install with pkg2 only available with direct URL. The extra-ed direct
# URL pkg2 should be able to provide pkg2[ext] required by pkg3.
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
str(find_links),
pkg2,
"pkg3",
)
script.assert_installed(pkg1="1", pkg2="1", pkg3="1")
assert not result.get_created_direct_url("pkg1")
assert result.get_created_direct_url("pkg2")
assert not result.get_created_direct_url("pkg3")
def test_new_resolver_modifies_installed_incompatible(
script: PipTestEnvironment,
) -> None:
create_basic_wheel_for_package(script, name="a", version="1")
create_basic_wheel_for_package(script, name="a", version="2")
create_basic_wheel_for_package(script, name="a", version="3")
create_basic_wheel_for_package(script, name="b", version="1", depends=["a==1"])
create_basic_wheel_for_package(script, name="b", version="2", depends=["a==2"])
create_basic_wheel_for_package(script, name="c", version="1", depends=["a!=1"])
create_basic_wheel_for_package(script, name="c", version="2", depends=["a!=1"])
create_basic_wheel_for_package(script, name="d", version="1", depends=["b", "c"])
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"b==1",
)
# d-1 depends on b and c. b-1 is already installed and therefore first
# pinned, but later found to be incompatible since the "a==1" dependency
# makes all c versions impossible to satisfy. The resolver should be able to
# discard b-1 and backtrack, so b-2 is selected instead.
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"d==1",
)
script.assert_installed(d="1", c="2", b="2", a="2")
def test_new_resolver_transitively_depends_on_unnamed_local(
script: PipTestEnvironment,
) -> None:
create_basic_wheel_for_package(script, name="certbot-docs", version="1")
certbot = create_test_package_with_setup(
script,
name="certbot",
version="99.99.0.dev0",
extras_require={"docs": ["certbot-docs"]},
)
certbot_apache = create_test_package_with_setup(
script,
name="certbot-apache",
version="99.99.0.dev0",
install_requires=["certbot>=99.99.0.dev0"],
)
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
f"{certbot}[docs]",
certbot_apache,
)
script.assert_installed(
certbot="99.99.0.dev0",
certbot_apache="99.99.0.dev0",
certbot_docs="1",
)
def _to_localhost_uri(path: pathlib.Path) -> str:
# Something like file://localhost/path/to/package
return path.as_uri().replace("///", "//localhost/")
@pytest.mark.parametrize(
"format_dep",
[
pytest.param(pathlib.Path.as_uri, id="emptyhost"),
pytest.param(_to_localhost_uri, id="localhost"),
],
)
@pytest.mark.parametrize(
"format_input",
[
pytest.param(pathlib.Path, id="path"),
pytest.param(pathlib.Path.as_uri, id="emptyhost"),
pytest.param(_to_localhost_uri, id="localhost"),
],
)
def test_new_resolver_file_url_normalize(
script: PipTestEnvironment,
format_dep: Callable[[pathlib.Path], str],
format_input: Callable[[pathlib.Path], str],
) -> None:
lib_a = create_test_package_with_setup(
script,
name="lib_a",
version="1",
)
lib_b = create_test_package_with_setup(
script,
name="lib_b",
version="1",
install_requires=[f"lib_a @ {format_dep(lib_a)}"],
)
script.pip(
"install",
"--no-build-isolation",
"--no-cache-dir",
"--no-index",
format_input(lib_a),
lib_b,
)
script.assert_installed(lib_a="1", lib_b="1")
def test_new_resolver_dont_backtrack_on_extra_if_base_constrained(
script: PipTestEnvironment,
) -> None:
create_basic_wheel_for_package(script, "dep", "1.0")
create_basic_wheel_for_package(script, "pkg", "1.0", extras={"ext": ["dep"]})
create_basic_wheel_for_package(script, "pkg", "2.0", extras={"ext": ["dep"]})
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text("pkg==1.0")
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"--constraint",
constraints_file,
"pkg[ext]",
)
assert "pkg-2.0" not in result.stdout, "Should not try 2.0 due to constraint"
script.assert_installed(pkg="1.0", dep="1.0")
@pytest.mark.parametrize("swap_order", [True, False])
@pytest.mark.parametrize("two_extras", [True, False])
def test_new_resolver_dont_backtrack_on_extra_if_base_constrained_in_requirement(
script: PipTestEnvironment, swap_order: bool, two_extras: bool
) -> None:
"""
Verify that a requirement with a constraint on a package (either on the base
on the base with an extra) causes the resolver to infer the same constraint for
any (other) extras with the same base.
:param swap_order: swap the order the install specifiers appear in
:param two_extras: also add an extra for the constrained specifier
"""
create_basic_wheel_for_package(script, "dep", "1.0")
create_basic_wheel_for_package(
script, "pkg", "1.0", extras={"ext1": ["dep"], "ext2": ["dep"]}
)
create_basic_wheel_for_package(
script, "pkg", "2.0", extras={"ext1": ["dep"], "ext2": ["dep"]}
)
to_install: tuple[str, str] = (
"pkg[ext1]",
"pkg[ext2]==1.0" if two_extras else "pkg==1.0",
)
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
*(to_install if not swap_order else reversed(to_install)),
)
assert "pkg-2.0" not in result.stdout, "Should not try 2.0 due to constraint"
script.assert_installed(pkg="1.0", dep="1.0")
@pytest.mark.parametrize("swap_order", [True, False])
@pytest.mark.parametrize("two_extras", [True, False])
def test_new_resolver_dont_backtrack_on_conflicting_constraints_on_extras(
tmpdir: pathlib.Path,
virtualenv: VirtualEnvironment,
script_factory: ScriptFactory,
swap_order: bool,
two_extras: bool,
) -> None:
"""
Verify that conflicting constraints on the same package with different
extras cause the resolver to trivially reject the request rather than
trying any candidates.
:param swap_order: swap the order the install specifiers appear in
:param two_extras: also add an extra for the second specifier
"""
script: PipTestEnvironment = script_factory(
tmpdir.joinpath("workspace"),
virtualenv,
{**os.environ, "PIP_RESOLVER_DEBUG": "1"},
)
create_basic_wheel_for_package(script, "dep", "1.0")
create_basic_wheel_for_package(
script, "pkg", "1.0", extras={"ext1": ["dep"], "ext2": ["dep"]}
)
create_basic_wheel_for_package(
script, "pkg", "2.0", extras={"ext1": ["dep"], "ext2": ["dep"]}
)
to_install: tuple[str, str] = (
"pkg[ext1]>1",
"pkg[ext2]==1.0" if two_extras else "pkg==1.0",
)
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
*(to_install if not swap_order else reversed(to_install)),
expect_error=True,
)
assert (
"pkg-2.0" not in result.stdout or "pkg-1.0" not in result.stdout
), "Should only try one of 1.0, 2.0 depending on order"
assert "Reporter.starting()" in result.stdout, (
"This should never fail unless the debug reporting format has changed,"
" in which case the other assertions in this test need to be reviewed."
)
assert (
"Reporter.rejecting_candidate" not in result.stdout
), "Should be able to conclude conflict before even selecting a candidate"
assert (
"conflict is caused by" in result.stdout
), "Resolver should be trivially able to find conflict cause"
def test_new_resolver_respect_user_requested_if_extra_is_installed(
script: PipTestEnvironment,
) -> None:
create_basic_wheel_for_package(script, "pkg1", "1.0")
create_basic_wheel_for_package(script, "pkg2", "1.0", extras={"ext": ["pkg1"]})
create_basic_wheel_for_package(script, "pkg2", "2.0", extras={"ext": ["pkg1"]})
create_basic_wheel_for_package(script, "pkg3", "1.0", depends=["pkg2[ext]"])
# Install pkg3 with an older pkg2.
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"pkg3",
"pkg2==1.0",
)
script.assert_installed(pkg3="1.0", pkg2="1.0", pkg1="1.0")
# Now upgrade both pkg3 and pkg2. pkg2 should be upgraded although pkg2[ext]
# is not requested by the user.
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"--upgrade",
"pkg3",
"pkg2",
)
script.assert_installed(pkg3="1.0", pkg2="2.0", pkg1="1.0")
def test_new_resolver_constraint_on_link_with_extra(
script: PipTestEnvironment,
) -> None:
"""
Verify that installing works from a link with both an extra and a constraint.
"""
wheel: pathlib.Path = create_basic_wheel_for_package(
script, "pkg", "1.0", extras={"ext": []}
)
script.pip(
"install",
"--no-cache-dir",
# no index, no --find-links: only the explicit path
"--no-index",
f"{wheel}[ext]",
"pkg==1",
)
script.assert_installed(pkg="1.0")
def test_new_resolver_constraint_on_link_with_extra_indirect(
script: PipTestEnvironment,
) -> None:
"""
Verify that installing works from a link with an extra if there is an indirect
dependency on that same package with the same extra (#12372).
"""
wheel_one: pathlib.Path = create_basic_wheel_for_package(
script, "pkg1", "1.0", extras={"ext": []}
)
wheel_two: pathlib.Path = create_basic_wheel_for_package(
script, "pkg2", "1.0", depends=["pkg1[ext]==1.0"]
)
script.pip(
"install",
"--no-cache-dir",
# no index, no --find-links: only the explicit path
wheel_two,
f"{wheel_one}[ext]",
)
script.assert_installed(pkg1="1.0", pkg2="1.0")
def test_new_resolver_do_not_backtrack_on_build_failure(
script: PipTestEnvironment,
) -> None:
create_basic_sdist_for_package(script, "pkg1", "2.0", fails_build=True)
create_basic_wheel_for_package(script, "pkg1", "1.0")
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"pkg1",
expect_error=True,
)
assert "Failed to build 'pkg1'" in result.stderr
def test_new_resolver_works_when_failing_package_builds_are_disallowed(
script: PipTestEnvironment,
) -> None:
create_basic_wheel_for_package(script, "pkg2", "1.0", depends=["pkg1"])
create_basic_sdist_for_package(script, "pkg1", "2.0", fails_build=True)
create_basic_wheel_for_package(script, "pkg1", "1.0")
constraints_file = script.scratch_path / "constraints.txt"
constraints_file.write_text("pkg1 != 2.0")
script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
"-c",
constraints_file,
"pkg2",
)
script.assert_installed(pkg2="1.0", pkg1="1.0")
@pytest.mark.parametrize("swap_order", [True, False])
def test_new_resolver_comes_from_with_extra(
script: PipTestEnvironment, swap_order: bool
) -> None:
"""
Verify that reporting where a dependency comes from is accurate when it comes
from a package with an extra.
:param swap_order: swap the order the install specifiers appear in
"""
create_basic_wheel_for_package(script, "dep", "1.0")
create_basic_wheel_for_package(script, "pkg", "1.0", extras={"ext": ["dep"]})
to_install: tuple[str, str] = ("pkg", "pkg[ext]")
result = script.pip(
"install",
"--no-cache-dir",
"--no-index",
"--find-links",
script.scratch_path,
*(to_install if not swap_order else reversed(to_install)),
)
assert "(from pkg[ext])" in result.stdout
assert "(from pkg)" not in result.stdout
script.assert_installed(pkg="1.0", dep="1.0")
| TestExtraMerge |
python | spack__spack | lib/spack/spack/vendor/jsonschema/exceptions.py | {
"start": 3566,
"end": 3771
} | class ____(_Error):
"""
An instance was invalid under a provided schema.
"""
_word_for_schema_in_error_message = "schema"
_word_for_instance_in_error_message = "instance"
| ValidationError |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/conditional_languages/package.py | {
"start": 216,
"end": 725
} | class ____(Package):
"""Conditional depends on c/cxx/fortran with a variant for each"""
homepage = "https://dev.null"
version("1.0")
variant("c", default=False, description="depend on c")
variant("cxx", default=False, description="depend on cxx")
variant("fortran", default=False, description="depend on fortran")
depends_on("c", type="build", when="+c")
depends_on("cxx", type="build", when="+cxx")
depends_on("fortran", type="build", when="+fortran")
| ConditionalLanguages |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.