language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/issues/endpoints/project_codeowners_index.py | {
"start": 865,
"end": 3974
} | class ____(ProjectCodeOwnersBase):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, project: Project) -> Response:
"""
Retrieve the list of CODEOWNERS configurations for a project
````````````````````````````````````````````
Return a list of a project's CODEOWNERS configuration.
:auth: required
"""
if not self.has_feature(request, project):
raise PermissionDenied
expand = request.GET.getlist("expand", [])
expand.extend(["errors", "renameIdentifier", "hasTargetingContext"])
codeowners: list[ProjectCodeOwners] = list(
ProjectCodeOwners.objects.filter(project=project).order_by("-date_added")
)
return Response(
serialize(
codeowners,
request.user,
serializer=projectcodeowners_serializers.ProjectCodeOwnersSerializer(expand=expand),
),
status.HTTP_200_OK,
)
def post(self, request: Request, project: Project) -> Response:
"""
Upload a CODEOWNERS for a project
`````````````
:pparam string organization_id_or_slug: the id or slug of the organization.
:pparam string project_id_or_slug: the id or slug of the project to get.
:param string raw: the raw CODEOWNERS text
:param string codeMappingId: id of the RepositoryProjectPathConfig object
:auth: required
"""
if not self.has_feature(request, project):
self.track_response_code("create", PermissionDenied.status_code)
raise PermissionDenied
serializer = ProjectCodeOwnerSerializer(context={"project": project}, data=request.data)
if serializer.is_valid():
project_codeowners = serializer.save()
self.track_response_code("create", status.HTTP_201_CREATED)
user_id = getattr(request.user, "id", None) or None
try:
analytics.record(
CodeOwnersCreated(
user_id=user_id,
organization_id=project.organization_id,
project_id=project.id,
codeowners_id=project_codeowners.id,
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
expand = ["ownershipSyntax", "errors", "hasTargetingContext"]
return Response(
serialize(
project_codeowners,
request.user,
serializer=projectcodeowners_serializers.ProjectCodeOwnersSerializer(
expand=expand
),
),
status=status.HTTP_201_CREATED,
)
self.track_response_code("create", status.HTTP_400_BAD_REQUEST)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| ProjectCodeOwnersEndpoint |
python | ray-project__ray | python/ray/train/v2/_internal/execution/training_report.py | {
"start": 122,
"end": 552
} | class ____:
"""A specification for validation."""
def __init__(
self,
validate_fn: Callable[["Checkpoint", Optional[Dict]], Dict],
validate_config: Dict,
):
self.validate_fn = validate_fn
self.validate_config = validate_config
def __repr__(self) -> str:
return f"ValidationSpec(validate_fn={self.validate_fn}, validate_config={self.validate_config})"
| _ValidationSpec |
python | kubernetes-client__python | kubernetes/client/models/v1_namespace.py | {
"start": 383,
"end": 7166
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1NamespaceSpec',
'status': 'V1NamespaceStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1Namespace - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Namespace. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Namespace. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Namespace.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Namespace. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Namespace. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Namespace. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Namespace.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Namespace. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Namespace. # noqa: E501
:return: The metadata of this V1Namespace. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Namespace.
:param metadata: The metadata of this V1Namespace. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1Namespace. # noqa: E501
:return: The spec of this V1Namespace. # noqa: E501
:rtype: V1NamespaceSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Namespace.
:param spec: The spec of this V1Namespace. # noqa: E501
:type: V1NamespaceSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1Namespace. # noqa: E501
:return: The status of this V1Namespace. # noqa: E501
:rtype: V1NamespaceStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Namespace.
:param status: The status of this V1Namespace. # noqa: E501
:type: V1NamespaceStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Namespace):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Namespace):
return True
return self.to_dict() != other.to_dict()
| V1Namespace |
python | fluentpython__example-code | attic/dicts/test_transformdict.py | {
"start": 7288,
"end": 9317
} | class ____(TransformDictTestBase,
mapping_tests.BasicTestMappingProtocol):
TransformDict = TransformDict
type2test = partial(TransformDict, str.lower)
def check_shallow_copy(self, copy_func):
d = self.TransformDict(str_lower, {'Foo': []})
e = copy_func(d)
self.assertIs(e.__class__, self.TransformDict)
self.assertIs(e._transform, str_lower)
self.check_underlying_dict(e, {'foo': []})
e['Bar'] = 6
self.assertEqual(e['bar'], 6)
with self.assertRaises(KeyError):
d['bar']
e['foo'].append(5)
self.assertEqual(d['foo'], [5])
self.assertEqual(set(e), {'Foo', 'Bar'})
def check_deep_copy(self, copy_func):
d = self.TransformDict(str_lower, {'Foo': []})
e = copy_func(d)
self.assertIs(e.__class__, self.TransformDict)
self.assertIs(e._transform, str_lower)
self.check_underlying_dict(e, {'foo': []})
e['Bar'] = 6
self.assertEqual(e['bar'], 6)
with self.assertRaises(KeyError):
d['bar']
e['foo'].append(5)
self.assertEqual(d['foo'], [])
self.check_underlying_dict(e, {'foo': [5], 'bar': 6})
self.assertEqual(set(e), {'Foo', 'Bar'})
def test_copy(self):
self.check_shallow_copy(lambda d: d.copy())
def test_copy_copy(self):
self.check_shallow_copy(copy.copy)
def test_cast_as_dict(self):
d = self.TransformDict(str.lower, {'Foo': 5})
e = dict(d)
self.assertEqual(e, {'Foo': 5})
def test_copy_deepcopy(self):
self.check_deep_copy(copy.deepcopy)
def test_pickling(self):
def pickle_unpickle(obj, proto):
data = pickle.dumps(obj, proto)
return pickle.loads(data)
for proto in range(0, pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
self.check_deep_copy(partial(pickle_unpickle, proto=proto))
| TransformDictMappingTests |
python | doocs__leetcode | solution/1000-1099/1087.Brace Expansion/Solution.py | {
"start": 0,
"end": 849
} | class ____:
def expand(self, s: str) -> List[str]:
def convert(s):
if not s:
return
if s[0] == '{':
j = s.find('}')
items.append(s[1:j].split(','))
convert(s[j + 1 :])
else:
j = s.find('{')
if j != -1:
items.append(s[:j].split(','))
convert(s[j:])
else:
items.append(s.split(','))
def dfs(i, t):
if i == len(items):
ans.append(''.join(t))
return
for c in items[i]:
t.append(c)
dfs(i + 1, t)
t.pop()
items = []
convert(s)
ans = []
dfs(0, [])
ans.sort()
return ans
| Solution |
python | ansible__ansible | lib/ansible/plugins/test/files.py | {
"start": 803,
"end": 1407
} | class ____(object):
""" Ansible file jinja2 tests """
def tests(self):
return {
# file testing
'directory': isdir,
'is_dir': isdir,
'file': isfile,
'is_file': isfile,
'link': islink,
'is_link': islink,
'exists': exists,
'link_exists': lexists,
# path testing
'abs': isabs,
'is_abs': isabs,
'same_file': samefile,
'is_same_file': samefile,
'mount': ismount,
'is_mount': ismount,
}
| TestModule |
python | graphql-python__graphene | graphene/types/uuid.py | {
"start": 181,
"end": 1021
} | class ____(Scalar):
"""
Leverages the internal Python implementation of UUID (uuid.UUID) to provide native UUID objects
in fields, resolvers and input.
"""
@staticmethod
def serialize(uuid):
if isinstance(uuid, str):
uuid = _UUID(uuid)
assert isinstance(uuid, _UUID), f"Expected UUID instance, received {uuid}"
return str(uuid)
@staticmethod
def parse_literal(node, _variables=None):
if isinstance(node, StringValueNode):
return _UUID(node.value)
return Undefined
@staticmethod
def parse_value(value):
if isinstance(value, _UUID):
return value
try:
return _UUID(value)
except (ValueError, AttributeError):
raise GraphQLError(f"UUID cannot represent value: {repr(value)}")
| UUID |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/branch.py | {
"start": 778,
"end": 15777
} | class ____(RunnableSerializable[Input, Output]):
"""`Runnable` that selects which branch to run based on a condition.
The `Runnable` is initialized with a list of `(condition, Runnable)` pairs and
a default branch.
When operating on an input, the first condition that evaluates to True is
selected, and the corresponding `Runnable` is run on the input.
If no condition evaluates to `True`, the default branch is run on the input.
Examples:
```python
from langchain_core.runnables import RunnableBranch
branch = RunnableBranch(
(lambda x: isinstance(x, str), lambda x: x.upper()),
(lambda x: isinstance(x, int), lambda x: x + 1),
(lambda x: isinstance(x, float), lambda x: x * 2),
lambda x: "goodbye",
)
branch.invoke("hello") # "HELLO"
branch.invoke(None) # "goodbye"
```
"""
branches: Sequence[tuple[Runnable[Input, bool], Runnable[Input, Output]]]
"""A list of `(condition, Runnable)` pairs."""
default: Runnable[Input, Output]
"""A `Runnable` to run if no condition is met."""
def __init__(
self,
*branches: tuple[
Runnable[Input, bool]
| Callable[[Input], bool]
| Callable[[Input], Awaitable[bool]],
RunnableLike,
]
| RunnableLike,
) -> None:
"""A `Runnable` that runs one of two branches based on a condition.
Args:
*branches: A list of `(condition, Runnable)` pairs.
Defaults a `Runnable` to run if no condition is met.
Raises:
ValueError: If the number of branches is less than `2`.
TypeError: If the default branch is not `Runnable`, `Callable` or `Mapping`.
TypeError: If a branch is not a `tuple` or `list`.
ValueError: If a branch is not of length `2`.
"""
if len(branches) < _MIN_BRANCHES:
msg = "RunnableBranch requires at least two branches"
raise ValueError(msg)
default = branches[-1]
if not isinstance(
default,
(Runnable, Callable, Mapping), # type: ignore[arg-type]
):
msg = "RunnableBranch default must be Runnable, callable or mapping."
raise TypeError(msg)
default_ = cast(
"Runnable[Input, Output]", coerce_to_runnable(cast("RunnableLike", default))
)
branches_ = []
for branch in branches[:-1]:
if not isinstance(branch, (tuple, list)):
msg = (
f"RunnableBranch branches must be "
f"tuples or lists, not {type(branch)}"
)
raise TypeError(msg)
if len(branch) != _MIN_BRANCHES:
msg = (
f"RunnableBranch branches must be "
f"tuples or lists of length 2, not {len(branch)}"
)
raise ValueError(msg)
condition, runnable = branch
condition = cast("Runnable[Input, bool]", coerce_to_runnable(condition))
runnable = coerce_to_runnable(runnable)
branches_.append((condition, runnable))
super().__init__(
branches=branches_,
default=default_,
)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
@override
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "runnable"]`
"""
return ["langchain", "schema", "runnable"]
@override
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
runnables = (
[self.default]
+ [r for _, r in self.branches]
+ [r for r, _ in self.branches]
)
for runnable in runnables:
if (
runnable.get_input_schema(config).model_json_schema().get("type")
is not None
):
return runnable.get_input_schema(config)
return super().get_input_schema(config)
@property
@override
def config_specs(self) -> list[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec
for step in (
[self.default]
+ [r for _, r in self.branches]
+ [r for r, _ in self.branches]
)
for spec in step.config_specs
)
@override
def invoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> Output:
"""First evaluates the condition, then delegate to `True` or `False` branch.
Args:
input: The input to the `Runnable`.
config: The configuration for the `Runnable`.
**kwargs: Additional keyword arguments to pass to the `Runnable`.
Returns:
The output of the branch that was run.
"""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = condition.invoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"),
),
)
if expression_value:
output = runnable.invoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"),
),
**kwargs,
)
break
else:
output = self.default.invoke(
input,
config=patch_config(
config, callbacks=run_manager.get_child(tag="branch:default")
),
**kwargs,
)
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(output)
return output
@override
async def ainvoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> Output:
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = await condition.ainvoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"),
),
)
if expression_value:
output = await runnable.ainvoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"),
),
**kwargs,
)
break
else:
output = await self.default.ainvoke(
input,
config=patch_config(
config, callbacks=run_manager.get_child(tag="branch:default")
),
**kwargs,
)
except BaseException as e:
await run_manager.on_chain_error(e)
raise
await run_manager.on_chain_end(output)
return output
@override
def stream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Output]:
"""First evaluates the condition, then delegate to `True` or `False` branch.
Args:
input: The input to the `Runnable`.
config: The configuration for the `Runnable`.
**kwargs: Additional keyword arguments to pass to the `Runnable`.
Yields:
The output of the branch that was run.
"""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
final_output: Output | None = None
final_output_supported = True
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = condition.invoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"),
),
)
if expression_value:
for chunk in runnable.stream(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"),
),
**kwargs,
):
yield chunk
if final_output_supported:
if final_output is None:
final_output = chunk
else:
try:
final_output = final_output + chunk # type: ignore[operator]
except TypeError:
final_output = None
final_output_supported = False
break
else:
for chunk in self.default.stream(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag="branch:default"),
),
**kwargs,
):
yield chunk
if final_output_supported:
if final_output is None:
final_output = chunk
else:
try:
final_output = final_output + chunk # type: ignore[operator]
except TypeError:
final_output = None
final_output_supported = False
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(final_output)
@override
async def astream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Output]:
"""First evaluates the condition, then delegate to `True` or `False` branch.
Args:
input: The input to the `Runnable`.
config: The configuration for the `Runnable`.
**kwargs: Additional keyword arguments to pass to the `Runnable`.
Yields:
The output of the branch that was run.
"""
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
final_output: Output | None = None
final_output_supported = True
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = await condition.ainvoke(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"condition:{idx + 1}"),
),
)
if expression_value:
async for chunk in runnable.astream(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag=f"branch:{idx + 1}"),
),
**kwargs,
):
yield chunk
if final_output_supported:
if final_output is None:
final_output = chunk
else:
try:
final_output = final_output + chunk # type: ignore[operator]
except TypeError:
final_output = None
final_output_supported = False
break
else:
async for chunk in self.default.astream(
input,
config=patch_config(
config,
callbacks=run_manager.get_child(tag="branch:default"),
),
**kwargs,
):
yield chunk
if final_output_supported:
if final_output is None:
final_output = chunk
else:
try:
final_output = final_output + chunk # type: ignore[operator]
except TypeError:
final_output = None
final_output_supported = False
except BaseException as e:
await run_manager.on_chain_error(e)
raise
await run_manager.on_chain_end(final_output)
| RunnableBranch |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 34454,
"end": 35059
} | class ____:
params = [get_benchmark_shapes("TimeDropDuplicatesSeries")]
param_names = ["shape"]
def setup(self, shape):
rows = shape[0]
self.series = IMPL.Series(
np.tile(
IMPL.Index([f"i-{i}" for i in range(rows // 10)], dtype=object).values,
10,
)
)
execute(self.series)
def time_drop_dups(self, shape):
execute(self.series.drop_duplicates())
def time_drop_dups_string(self, shape):
self.series.drop_duplicates(inplace=True)
execute(self.series)
| TimeDropDuplicatesSeries |
python | pandas-dev__pandas | setup.py | {
"start": 8577,
"end": 8869
} | class ____(build_ext):
"""
Custom command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op.
"""
def build_extension(self, ext) -> None:
pass
| CythonCommand |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/tests/test_helpers/test_execution/test_run_steps.py | {
"start": 14807,
"end": 17726
} | class ____:
def test_init(self):
options = RunStepOptions()
assert options.fail_fast is True
assert options.concurrency == 10
assert options.skip_steps == []
assert options.step_params == {}
options = RunStepOptions(fail_fast=False, concurrency=1, skip_steps=["step1"], step_params={"step1": {"--param1": ["value1"]}})
assert options.fail_fast is False
assert options.concurrency == 1
assert options.skip_steps == ["step1"]
assert options.step_params == {"step1": {"--param1": ["value1"]}}
with pytest.raises(ValueError):
RunStepOptions(skip_steps=["step1"], keep_steps=["step2"])
@pytest.mark.parametrize(
"step_tree, options, expected_skipped_ids",
[
(
[
[StepToRun(id="step1", step=TestStep(test_context)), StepToRun(id="step2", step=TestStep(test_context))],
StepToRun(id="step3", step=TestStep(test_context)),
StepToRun(id="step4", step=TestStep(test_context), depends_on=["step3", "step1"]),
StepToRun(id="step5", step=TestStep(test_context)),
],
RunStepOptions(keep_steps=["step4"]),
{"step2", "step5"},
),
(
[
[StepToRun(id="step1", step=TestStep(test_context)), StepToRun(id="step2", step=TestStep(test_context))],
StepToRun(id="step3", step=TestStep(test_context)),
[
StepToRun(id="step4", step=TestStep(test_context), depends_on=["step1"]),
StepToRun(id="step6", step=TestStep(test_context), depends_on=["step4", "step5"]),
],
StepToRun(id="step5", step=TestStep(test_context), depends_on=["step3"]),
],
RunStepOptions(keep_steps=["step6"]),
{"step2"},
),
(
[
[StepToRun(id="step1", step=TestStep(test_context)), StepToRun(id="step2", step=TestStep(test_context))],
StepToRun(id="step3", step=TestStep(test_context)),
[
StepToRun(id="step4", step=TestStep(test_context), depends_on=["step1"]),
StepToRun(id="step6", step=TestStep(test_context), depends_on=["step4", "step5"]),
],
StepToRun(id="step5", step=TestStep(test_context), depends_on=["step3"]),
],
RunStepOptions(skip_steps=["step1"]),
{"step1"},
),
],
)
def test_get_step_ids_to_skip(self, step_tree, options, expected_skipped_ids):
skipped_ids = options.get_step_ids_to_skip(step_tree)
assert set(skipped_ids) == expected_skipped_ids
| TestRunStepOptions |
python | gevent__gevent | src/gevent/tests/test__refcount.py | {
"start": 2386,
"end": 3457
} | class ____(object):
listening = False
client_data = None
server_port = None
def __init__(self, raise_on_timeout):
self.raise_on_timeout = raise_on_timeout
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.server_port = support.bind_port(self.socket, params.DEFAULT_BIND_ADDR)
except:
self.close()
raise
def close(self):
self.socket.close()
self.socket = None
def handle_request(self):
try:
self.socket.settimeout(SOCKET_TIMEOUT)
self.socket.listen(5)
self.listening = True
try:
conn, _ = self.socket.accept() # pylint:disable=no-member
except socket.timeout:
if self.raise_on_timeout:
raise
return
try:
self.client_data = conn.recv(100)
conn.send(b'bye')
finally:
conn.close()
finally:
self.close()
| Server |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/global_shuffle_test.py | {
"start": 7243,
"end": 8617
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
dataset_range=[1, 10],
reshuffle_each_iteration=[True, False],
prefetch=[True, False],
symbolic_checkpoint=[True, False])))
def testRange(
self,
verify_fn: Callable[..., None],
dataset_range: int,
reshuffle_each_iteration: bool,
prefetch: bool,
symbolic_checkpoint: bool):
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.range(dataset_range)
if prefetch:
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=42, reshuffle_each_iteration=reshuffle_each_iteration)
if symbolic_checkpoint:
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
dataset = dataset.with_options(options)
return dataset
verify_fn(
self,
_build_dataset,
num_outputs=dataset_range,
assert_items_equal=reshuffle_each_iteration)
| GlobalShuffleCheckpointTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/document.py | {
"start": 2022,
"end": 2307
} | class ____:
def __init__(self) -> None:
#: List of lines for the Document text.
self.lines: _ImmutableLineList | None = None
#: List of index positions, pointing to the start of all the lines.
self.line_indexes: list[int] | None = None
| _DocumentCache |
python | sympy__sympy | sympy/functions/special/hyper.py | {
"start": 30711,
"end": 31139
} | class ____(HyperRep):
""" Represent -z*hyper([1, 1], [2], z) == log(1 - z). """
@classmethod
def _expr_small(cls, x):
return log(1 - x)
@classmethod
def _expr_small_minus(cls, x):
return log(1 + x)
@classmethod
def _expr_big(cls, x, n):
return log(x - 1) + (2*n - 1)*pi*I
@classmethod
def _expr_big_minus(cls, x, n):
return log(1 + x) + 2*n*pi*I
| HyperRep_log1 |
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 35087,
"end": 39124
} | class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a
[`GroupViTEncoderLayer`].
Args:
config: GroupViTTextConfig
"""
def __init__(self, config: GroupViTTextConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
| GroupViTTextEncoder |
python | allegroai__clearml | clearml/router/route.py | {
"start": 117,
"end": 3888
} | class ____:
def __init__(
self,
target_url: str,
request_callback: Optional[Callable[[Any, Dict[str, Any]], Any]] = None,
response_callback: Optional[Callable[[Any, Any, Dict[str, Any]], Any]] = None,
session: Optional[Any] = None,
error_callback: Optional[Callable[[Any, Any, Dict[str, Any]], Any]] = None,
) -> None:
self.target_url = target_url
self.request_callback = request_callback
self.response_callback = response_callback
self.error_callback = error_callback
self.session = session
self.persistent_state = {}
self._endpoint_telemetry = None
self._endpoint_telemetry_args = None
def set_endpoint_telemetry_args(
self,
endpoint_name: str = "endpoint",
model_name: str = "model",
model: Any = None,
model_url: Any = None,
model_source: Any = None,
model_version: Any = None,
app_id: Any = None,
app_instance: Any = None,
tags: Any = None,
system_tags: Any = None,
container_id: Any = None,
input_size: Any = None,
input_type: str = "str",
report_statistics: bool = True,
endpoint_url: Any = None,
preprocess_artifact: Any = None,
force_register: bool = False,
) -> None:
self._endpoint_telemetry_args = dict(
endpoint_name=endpoint_name,
model_name=model_name,
model=model,
model_url=model_url,
model_source=model_source,
model_version=model_version,
app_id=app_id,
app_instance=app_instance,
tags=tags,
system_tags=system_tags,
container_id=container_id,
input_size=input_size,
input_type=input_type,
report_statistics=report_statistics,
endpoint_url=endpoint_url,
preprocess_artifact=preprocess_artifact,
force_register=force_register,
)
def start_endpoint_telemetry(self) -> None:
if self._endpoint_telemetry is not None or self._endpoint_telemetry_args is None:
return
self._endpoint_telemetry = EndpointTelemetry(**self._endpoint_telemetry_args)
def stop_endpoint_telemetry(self) -> None:
if self._endpoint_telemetry is None:
return
self._endpoint_telemetry.stop()
self._endpoint_telemetry = None
async def on_request(self, request: Any) -> Any:
new_request = request
if self.request_callback:
new_request = self.request_callback(request, persistent_state=self.persistent_state) or request
if inspect.isawaitable(new_request):
new_request = (await new_request) or request
if self._endpoint_telemetry:
self._endpoint_telemetry.on_request()
return new_request
async def on_response(self, response: Any, request: Any) -> Any:
new_response = response
if self.response_callback:
new_response = self.response_callback(response, request, persistent_state=self.persistent_state) or response
if inspect.isawaitable(new_response):
new_response = (await new_response) or response
if self._endpoint_telemetry:
self._endpoint_telemetry.on_response()
return new_response
async def on_error(self, request: Any, error: Any) -> None:
on_error_result = None
if self.error_callback:
on_error_result = self.error_callback(request, error, persistent_state=self.persistent_state)
if inspect.isawaitable(on_error_result):
await on_error_result
return on_error_result
| Route |
python | pytorch__pytorch | test/inductor/test_mps_basic.py | {
"start": 4475,
"end": 8627
} | class ____(TestCase):
def check_model(self, m, inp, dynamic_shapes=None):
res2 = m(*inp)
ep = torch.export.export(m, inp, dynamic_shapes=dynamic_shapes)
path = torch._inductor.aoti_compile_and_package(ep)
m = torch._inductor.aoti_load_package(path)
res = m(*inp)
assert torch.allclose(res, res2)
def test_add_mps(self):
class M(torch.nn.Module):
def forward(self, x, y):
return x + y
inp = (torch.ones(3, 3, device="mps"), torch.ones(3, 3, device="mps"))
m = M().to("mps")
self.check_model(m, inp)
def test_fallback_mps(self):
class M(torch.nn.Module):
def forward(self, x, y):
return torch.nn.functional.linear(x, y)
inp = (
torch.randn(10, 10, device="mps"),
torch.randn(10, 10, device="mps"),
)
m = M().to("mps")
self.check_model(m, inp)
def test_c10(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return torch.cat(tensors=torch.split(x, 4, dim=1), dim=-2)
inp = (torch.randn(2, 8, device="mps"),)
m = M().to("mps")
self.check_model(m, inp)
def test_two_const(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.y = torch.ones(3, 3, device="mps")
self.z = torch.full((3, 3), 2, device="mps")
def forward(self, x):
return x + self.y + self.z
inp = (torch.ones(3, 3, device="mps"),)
m = Model().to(device="mps")
self.check_model(m, inp)
def test_simple_dynamic(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
add_0 = x + y
return torch.nn.functional.relu(input=add_0, inplace=False)
x = torch.randn(128, 2048, device="mps")
y = torch.randn(128, 2048, device="mps")
inp = (x, y)
m = Model().to(device="mps")
dim0_x = torch.export.Dim("dim0_x", min=1, max=2048)
dynamic_shapes = {"x": {0: dim0_x}, "y": {0: dim0_x}}
self.check_model(m, inp, dynamic_shapes)
def test_dynamic_cat(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, a, b):
return torch.cat([a, b], dim=0)
a = torch.randn(2, 4, device="mps")
b = torch.randn(3, 4, device="mps")
inp = (a, b)
m = Model().to(device="mps")
dim0_a = torch.export.Dim("dim0_a", min=1, max=10)
dim0_b = torch.export.Dim("dim0_b", min=1, max=20)
dynamic_shapes = {"a": {0: dim0_a}, "b": {0: dim0_b}}
self.check_model(m, inp, dynamic_shapes)
def test_reuse_kernel(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
a = torch.sin(x)
b = torch.mm(a, y)
c = torch.sin(b)
d = torch.mm(b, c)
return d
example_inputs = (
torch.randn(87, 87, device="mps"),
torch.randn(87, 87, device="mps"),
)
model = Model()
ep = torch.export.export(model, example_inputs)
package_path = torch._export.aot_compile(ep.module(), example_inputs)
target_str = "aoti_torch_mps_get_kernel_function("
target_count = 1
with open(os.path.splitext(package_path)[0] + ".cpp") as cpp:
src_code = cpp.read()
FileCheck().check_count(
target_str,
target_count,
exactly=True,
).run(src_code)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
if torch.backends.mps.is_available():
run_tests(needs="filelock")
| MPSBasicTestsAOTI |
python | pyinstaller__pyinstaller | tests/unit/test_modulegraph/testpkg-edgedata/script.py | {
"start": 55,
"end": 1384
} | class ____:
import toplevel_class_existing
import toplevel_class_nonexisting
if a == b:
import toplevel_conditional_existing
import toplevel_conditional_nonexisting
try:
import toplevel_conditional_import_existing
import toplevel_conditional_import_nonexisting
except:
import toplevel_conditional_import2_existing
import toplevel_conditional_import2_nonexisting
try:
import toplevel_import_existing
import toplevel_import_nonexisting
except:
import toplevel_import2_existing
import toplevel_import2_nonexisting
def function():
import function_existing
import function_nonexisting
class MyClass:
import function_class_existing
import function_class_nonexisting
if a == b:
import function_conditional_existing
import function_conditional_nonexisting
try:
import function_conditional_import_existing
import function_conditional_import_nonexisting
except:
import function_conditional_import2_existing
import function_conditional_import2_nonexisting
try:
import function_import_existing
import function_import_nonexisting
except:
import function_import2_existing
import function_import2_nonexisting
| MyClass |
python | getsentry__sentry | tests/sentry/grouping/seer_similarity/test_training_mode.py | {
"start": 416,
"end": 7494
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.event = save_new_event({"message": "Dogs are great!"}, self.project)
self.variants = self.event.get_grouping_variants()
# save_new_event already creates a grouphash, so retrieve it
self.grouphash = GroupHash.objects.get(
hash=self.event.get_primary_hash(), project_id=self.project.id
)
def test_does_nothing_when_feature_not_enabled(self) -> None:
"""Should not send request when feature flag is not enabled"""
with patch(
"sentry.grouping.ingest.seer.get_seer_similar_issues"
) as mock_get_seer_similar_issues:
maybe_send_seer_for_new_model_training(self.event, self.grouphash, self.variants)
mock_get_seer_similar_issues.assert_not_called()
def test_does_nothing_when_no_rollout(self) -> None:
"""Should not send request when no new version is being rolled out"""
with (
patch("sentry.seer.similarity.config.SEER_GROUPING_NEW_VERSION", None),
patch(
"sentry.grouping.ingest.seer.get_seer_similar_issues"
) as mock_get_seer_similar_issues,
self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE),
):
maybe_send_seer_for_new_model_training(self.event, self.grouphash, self.variants)
mock_get_seer_similar_issues.assert_not_called()
def test_does_nothing_when_already_sent_to_new_version(self) -> None:
"""Should not send request when grouphash already has new version embedding"""
with (
patch(
"sentry.grouping.ingest.seer.get_seer_similar_issues"
) as mock_get_seer_similar_issues,
self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE),
):
# Set the metadata to indicate already sent to v2
metadata, _ = GroupHashMetadata.objects.get_or_create(grouphash=self.grouphash)
metadata.seer_model = "v2"
metadata.save()
maybe_send_seer_for_new_model_training(self.event, self.grouphash, self.variants)
mock_get_seer_similar_issues.assert_not_called()
def test_sends_request_when_never_sent_to_seer(self) -> None:
"""Should send training request when grouphash has no seer_model"""
with (
patch("sentry.grouping.ingest.seer.should_call_seer_for_grouping", return_value=True),
patch(
"sentry.grouping.ingest.seer.get_seer_similar_issues"
) as mock_get_seer_similar_issues,
patch("sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=[]),
self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE),
):
# Clear the seer_model to simulate never sent to Seer
metadata, _ = GroupHashMetadata.objects.get_or_create(grouphash=self.grouphash)
metadata.seer_model = None
metadata.save()
maybe_send_seer_for_new_model_training(self.event, self.grouphash, self.variants)
# Should be called with training_mode=True
mock_get_seer_similar_issues.assert_called_once()
call_args = mock_get_seer_similar_issues.call_args
assert call_args[1]["training_mode"] is True
def test_sends_request_when_sent_to_old_version(self) -> None:
"""Should send training request when grouphash was sent to old version (v0 or v1)"""
with (
patch("sentry.grouping.ingest.seer.should_call_seer_for_grouping", return_value=True),
patch(
"sentry.grouping.ingest.seer.get_seer_similar_issues"
) as mock_get_seer_similar_issues,
patch("sentry.grouping.ingest.seer.get_similarity_data_from_seer", return_value=[]),
self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE),
):
# Test both v0 and v1 behave the same way
for old_version in ["v0", "v1"]:
mock_get_seer_similar_issues.reset_mock()
# Set metadata to old version
metadata, _ = GroupHashMetadata.objects.get_or_create(grouphash=self.grouphash)
metadata.seer_model = old_version
metadata.save()
maybe_send_seer_for_new_model_training(self.event, self.grouphash, self.variants)
# Should be called with training_mode=True for both versions
mock_get_seer_similar_issues.assert_called_once()
call_args = mock_get_seer_similar_issues.call_args
assert call_args[1]["training_mode"] is True
def test_does_not_send_when_should_call_seer_returns_false(self) -> None:
"""Should not send request when should_call_seer_for_grouping returns False"""
with (
patch("sentry.grouping.ingest.seer.should_call_seer_for_grouping", return_value=False),
patch(
"sentry.grouping.ingest.seer.get_seer_similar_issues"
) as mock_get_seer_similar_issues,
self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE),
):
# Clear seer_model to make should_send_new_model_embeddings return True
metadata, _ = GroupHashMetadata.objects.get_or_create(grouphash=self.grouphash)
metadata.seer_model = None
metadata.save()
maybe_send_seer_for_new_model_training(self.event, self.grouphash, self.variants)
# Should not be called because should_call_seer_for_grouping returned False
mock_get_seer_similar_issues.assert_not_called()
def test_captures_exception_without_failing(self) -> None:
"""Should capture exceptions from Seer calls without failing the process"""
test_exception = Exception("Seer service unavailable")
with (
patch("sentry.grouping.ingest.seer.should_call_seer_for_grouping", return_value=True),
patch(
"sentry.grouping.ingest.seer.get_seer_similar_issues",
side_effect=test_exception,
),
patch("sentry.grouping.ingest.seer.sentry_sdk.capture_exception") as mock_capture,
self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE),
):
# Clear seer_model to trigger sending to new model
metadata, _ = GroupHashMetadata.objects.get_or_create(grouphash=self.grouphash)
metadata.seer_model = None
metadata.save()
# Should not raise, exception is caught and handled
maybe_send_seer_for_new_model_training(self.event, self.grouphash, self.variants)
# Should capture the exception with proper tags
mock_capture.assert_called_once_with(
test_exception,
tags={
"event": self.event.event_id,
"project": self.event.project.id,
"grouphash": self.grouphash.hash,
},
)
| MaybeSendSeerForNewModelTrainingTest |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/defs.py | {
"start": 1780,
"end": 10783
} | class ____(DgClickGroup):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._commands_defined = False
def get_command(self, ctx: click.Context, cmd_name: str) -> Optional[click.Command]:
if not self._commands_defined and cmd_name not in HARDCODED_COMMANDS:
self._define_commands(ctx)
# First try exact match
cmd = super().get_command(ctx, cmd_name)
return cmd or self._get_matching_command(ctx, cmd_name)
def list_commands(self, ctx: click.Context) -> list[str]:
if not self._commands_defined:
self._define_commands(ctx)
return super().list_commands(ctx)
def _define_commands(self, cli_context: click.Context) -> None:
"""Dynamically define a command for each registered component type."""
if not has_config_on_cli_context(cli_context):
cli_context.invoke(not_none(self.callback), **cli_context.params)
config = get_config_from_cli_context(cli_context)
dg_context = DgContext.from_file_discovery_and_command_line_config(Path.cwd(), config)
registry = EnvRegistry.from_dg_context(dg_context)
# Keys where the actual class name is not shared with any other key will use the class name
# as a command alias.
keys_by_name: dict[str, set[EnvRegistryKey]] = {}
for key in registry.keys():
keys_by_name.setdefault(key.name, set()).add(key)
for key, component_type in registry.items():
self._create_subcommand(
key, component_type, use_typename_alias=len(keys_by_name[key.name]) == 1
)
self._commands_defined = True
def _create_subcommand(
self,
key: EnvRegistryKey,
obj: EnvRegistryObjectSnap,
use_typename_alias: bool,
) -> None:
# We need to "reset" the help option names to the default ones because we inherit the parent
# value of context settings from the parent group, which has been customized.
aliases = [
*[alias.to_typename() for alias in obj.aliases],
*([key.name] if use_typename_alias else []),
]
@self.command(
cls=ScaffoldDefsSubCommand,
name=key.to_typename(),
context_settings={"help_option_names": ["-h", "--help"]},
aliases=aliases,
help=obj.description or obj.summary,
)
@click.argument("defs_path", type=str)
@click.pass_context
@cli_telemetry_wrapper
def scaffold_command(
cli_context: click.Context,
defs_path: str,
**other_opts: Any,
) -> None:
f"""Scaffold a {key.name} object.
This command must be run inside a Dagster project directory. The component scaffold will be
placed in submodule `<project_name>.defs.<INSTANCE_NAME>`.
Objects can optionally be passed scaffold parameters. There are two ways to do this:
(1) Passing a single --json-params option with a JSON string of parameters. For example:
dg scaffold foo.bar my_object --json-params '{{"param1": "value", "param2": "value"}}'`.
(2) Passing each parameter as an option. For example:
dg scaffold foo.bar my_object --param1 value1 --param2 value2`
It is an error to pass both --json-params and key-value pairs as options.
"""
cli_config = get_config_from_cli_context(cli_context)
dg_context = DgContext.for_project_environment(Path.cwd(), cli_config)
# json_params will not be present in the key_value_params if no scaffold properties
# are defined.
json_scaffolder_params = other_opts.pop("json_params", None)
# format option is only present if we are dealing with a component. Otherewise we
# default to python for decorator scaffolding. Default is YAML (set by option) for
# components.
scaffolder_format = cast("ScaffoldFormatOptions", other_opts.pop("format", "python"))
# Remanining options are scaffolder params
key_value_scaffolder_params = other_opts
check.invariant(
scaffolder_format in ["yaml", "python"],
"format must be either 'yaml' or 'python'",
)
_core_scaffold(
dg_context,
cli_context,
key,
defs_path,
key_value_scaffolder_params,
scaffolder_format,
json_scaffolder_params,
)
if obj.is_component:
scaffold_command.params.append(
click.Option(
["--format"],
type=click.Choice(["yaml", "python"], case_sensitive=False),
default="yaml",
help="Format of the component configuration (yaml or python)",
)
)
# If there are defined scaffold properties, add them to the command. Also only add
# `--json-params` if there are defined scaffold properties.
if obj.scaffolder_schema and obj.scaffolder_schema.get("properties"):
scaffold_command.params.append(
click.Option(
["--json-params"],
type=str,
default=None,
help="JSON string of scaffolder parameters. Mutually exclusive with passing individual parameters as options.",
callback=parse_json_option,
)
)
for name, field_info in obj.scaffolder_schema["properties"].items():
# All fields are currently optional because they can also be passed under
# `--json-params`
option = json_schema_property_to_click_option(name, field_info, required=False)
scaffold_command.params.append(option)
def _get_matching_command(self, ctx: click.Context, input_cmd: str) -> click.Command:
commands = self.list_commands(ctx)
cmd_query = input_cmd.lower()
matches = sorted([name for name in commands if cmd_query in name.lower()])
# if input is not a substring match for any registered command, try to interpret it as a
# Python reference, load the corresponding registry object, and generate a command on the
# fly
if len(matches) == 0:
snap = self._try_load_input_as_registry_object(input_cmd)
if snap:
self._create_subcommand(snap.key, snap, use_typename_alias=False)
return check.not_none(super().get_command(ctx, snap.key.to_typename()))
else:
exit_with_error(generate_missing_registry_object_error_message(input_cmd))
if len(matches) == 1:
click.echo(f"No exact match found for '{input_cmd}'. Did you mean this one?")
click.echo(f" {matches[0]}")
selection = click.prompt("Choose (y/n)", type=str, default="y")
if selection == "y":
index = 1
elif selection == "n":
click.echo("Exiting.")
ctx.exit(0)
else:
exit_with_error(f"Invalid selection: {selection}. Please choose 'y' or 'n'.")
else:
# Present a menu of options for the user to choose from
click.echo(f"No exact match found for '{input_cmd}'. Did you mean one of these?")
for i, match in enumerate(matches, 1):
click.echo(f"({i}) {match}")
click.echo("(n) quit")
# Get user selection
selection = click.prompt("Select an option (number)", type=str, default="1")
if selection == "n":
click.echo("Exiting.")
ctx.exit(0)
invalid_selection_msg = f"Invalid selection: {selection}. Please choose a number between 1 and {len(matches)}."
if not selection.isdigit():
exit_with_error(invalid_selection_msg)
index = int(selection)
if index < 1 or index > len(matches):
exit_with_error(invalid_selection_msg)
selected_cmd = matches[index - 1]
click.echo(f"Using defs scaffolder: {selected_cmd}")
return check.not_none(super().get_command(ctx, selected_cmd))
def _try_load_input_as_registry_object(self, input_str: str) -> Optional[EnvRegistryObjectSnap]:
from dagster.components.core.snapshot import get_package_entry_snap
if not EnvRegistryKey.is_valid_typename(input_str):
return None
key = EnvRegistryKey.from_typename(input_str)
try:
obj = load_module_object(key.namespace, key.name)
return get_package_entry_snap(key, obj)
except DagsterUnresolvableSymbolError:
return None
| ScaffoldDefsGroup |
python | openai__openai-python | tests/api_resources/beta/threads/test_messages.py | {
"start": 12498,
"end": 25287
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
message = await async_client.beta.threads.messages.create(
thread_id="thread_id",
content="string",
role="user",
)
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
message = await async_client.beta.threads.messages.create(
thread_id="thread_id",
content="string",
role="user",
attachments=[
{
"file_id": "file_id",
"tools": [{"type": "code_interpreter"}],
}
],
metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.messages.with_raw_response.create(
thread_id="thread_id",
content="string",
role="user",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.messages.with_streaming_response.create(
thread_id="thread_id",
content="string",
role="user",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = await response.parse()
assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.threads.messages.with_raw_response.create(
thread_id="",
content="string",
role="user",
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
message = await async_client.beta.threads.messages.retrieve(
message_id="message_id",
thread_id="thread_id",
)
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.messages.with_raw_response.retrieve(
message_id="message_id",
thread_id="thread_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.messages.with_streaming_response.retrieve(
message_id="message_id",
thread_id="thread_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = await response.parse()
assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.threads.messages.with_raw_response.retrieve(
message_id="message_id",
thread_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
await async_client.beta.threads.messages.with_raw_response.retrieve(
message_id="",
thread_id="thread_id",
)
@parametrize
async def test_method_update(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
message = await async_client.beta.threads.messages.update(
message_id="message_id",
thread_id="thread_id",
)
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
message = await async_client.beta.threads.messages.update(
message_id="message_id",
thread_id="thread_id",
metadata={"foo": "string"},
)
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.messages.with_raw_response.update(
message_id="message_id",
thread_id="thread_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(Message, message, path=["response"])
@parametrize
async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.messages.with_streaming_response.update(
message_id="message_id",
thread_id="thread_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = await response.parse()
assert_matches_type(Message, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.threads.messages.with_raw_response.update(
message_id="message_id",
thread_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
await async_client.beta.threads.messages.with_raw_response.update(
message_id="",
thread_id="thread_id",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
message = await async_client.beta.threads.messages.list(
thread_id="thread_id",
)
assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
message = await async_client.beta.threads.messages.list(
thread_id="thread_id",
after="after",
before="before",
limit=0,
order="asc",
run_id="run_id",
)
assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.messages.with_raw_response.list(
thread_id="thread_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.messages.with_streaming_response.list(
thread_id="thread_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = await response.parse()
assert_matches_type(AsyncCursorPage[Message], message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.threads.messages.with_raw_response.list(
thread_id="",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
message = await async_client.beta.threads.messages.delete(
message_id="message_id",
thread_id="thread_id",
)
assert_matches_type(MessageDeleted, message, path=["response"])
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
response = await async_client.beta.threads.messages.with_raw_response.delete(
message_id="message_id",
thread_id="thread_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(MessageDeleted, message, path=["response"])
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
async with async_client.beta.threads.messages.with_streaming_response.delete(
message_id="message_id",
thread_id="thread_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = await response.parse()
assert_matches_type(MessageDeleted, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"):
await async_client.beta.threads.messages.with_raw_response.delete(
message_id="message_id",
thread_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"):
await async_client.beta.threads.messages.with_raw_response.delete(
message_id="",
thread_id="thread_id",
)
| TestAsyncMessages |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/context.py | {
"start": 110489,
"end": 112806
} | class ____(_ColumnEntity):
entity_zero = None
mapper = None
supports_single_entity = False
__slots__ = (
"expr",
"column",
"_label_name",
"entity_zero_or_selectable",
"_extra_entities",
)
def __init__(
self,
compile_state,
column,
entities_collection,
raw_column_index,
is_current_entities,
parent_bundle=None,
):
self.expr = column
self.raw_column_index = raw_column_index
self.translate_raw_column = raw_column_index is not None
if column._is_star:
compile_state.compile_options += {"_is_star": True}
if not is_current_entities or column._is_text_clause:
self._label_name = None
else:
if parent_bundle:
self._label_name = column._proxy_key
else:
self._label_name = compile_state._label_convention(column)
if parent_bundle:
parent_bundle._entities.append(self)
else:
entities_collection.append(self)
self.column = column
self.entity_zero_or_selectable = (
self.column._from_objects[0] if self.column._from_objects else None
)
self._extra_entities = (self.expr, self.column)
self._fetch_column = self._row_processor = None
def corresponds_to(self, entity):
return False
def setup_dml_returning_compile_state(
self,
compile_state: _ORMCompileState,
adapter: Optional[_DMLReturningColFilter],
) -> None:
return self.setup_compile_state(compile_state)
def setup_compile_state(self, compile_state):
current_adapter = compile_state._get_current_adapter()
if current_adapter:
column = current_adapter(self.column, False)
if column is None:
return
else:
column = self.column
if column._annotations:
# annotated columns perform more slowly in compiler and
# result due to the __eq__() method, so use deannotated
column = column._deannotate()
compile_state.dedupe_columns.add(column)
compile_state.primary_columns.append(column)
self._fetch_column = column
| _RawColumnEntity |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 27465,
"end": 27555
} | class ____(TestKill):
def _start_greenlet(self, g):
g.start()
| TestKillAfterStart |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_percent_diff_less_than_or_equal_to_threshold.py | {
"start": 964,
"end": 7743
} | class ____(
DataProfilerProfileMetricProvider
):
metric_name = (
"data_profiler.profile_numeric_columns_percent_diff_less_than_or_equal_to_threshold"
)
value_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 - 22
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
profile_percent_diff = metrics.get("data_profiler.profile_percent_diff")
numeric_columns = metrics.get("data_profiler.profile_numeric_columns")
limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"]
numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"]
columns = list(profile_percent_diff["global_stats"]["profile_schema"][1].keys())
data_stats = profile_percent_diff["data_stats"]
requested_columns = {}
unavailable_stats = {}
# Adds columns if generic column key is provided
# Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly
limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys)
limit_check_report_keys_copy = replace_generic_operator_in_report_keys(
limit_check_report_keys_copy, numeric_columns
)
for col, stats in limit_check_report_keys_copy.items():
if col not in numeric_columns: # Makes sure column requested is numeric
requested_columns[col] = "Column is Non-Numeric"
continue
# adds stats if generic stat key is provided
numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics)
stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy)
if col not in columns: # Makes sure column exists within profile schema
requested_columns[col] = "Column requested was not found."
continue
col_data_stats = {}
for data_stat in data_stats:
if data_stat["column_name"] == col:
col_data_stats = data_stat["statistics"]
break
requested_columns[col] = {}
unavailable_stats[col] = {}
for stat, threshold in stats.items():
if stat not in col_data_stats:
requested_columns[col][stat] = "Statistic requested was not found."
continue
diff_val = col_data_stats[stat]
if diff_val == "ERR_divide_by_zero" or diff_val == "ERR_no_original_value":
unavailable_stats[col][stat] = diff_val
continue
if diff_val == "unchanged": # In the case there is no delta
diff_val = 0
below_threshold = is_value_less_than_or_equal_to_threshold(diff_val, threshold)
if not below_threshold:
requested_columns[col][stat] = {
"threshold": threshold,
"value_found": diff_val,
}
else:
requested_columns[col][stat] = True
for column in list(unavailable_stats.keys()):
if unavailable_stats[column] == {}:
unavailable_stats.pop(column, None)
if unavailable_stats != {}:
div_by_zero_stats = []
no_original_value = []
for column, stats in unavailable_stats.items():
current_col = copy.deepcopy(limit_check_report_keys_copy[column])
for stat, val in stats.items():
if val == "ERR_divide_by_zero":
div_by_zero_stats.append(column + ": " + stat)
current_col.pop(stat, None)
elif val == "ERR_no_original_value":
no_original_value.append(column + ": " + stat)
current_col.pop(stat, None)
limit_check_report_keys_copy[column] = current_col
warning = "\nWARNING:\n"
if len(div_by_zero_stats) > 0:
warning += "Div By Zero ERROR:\nValue in profile report was 0 for the following column: stat\n"
for div_by_zero_stat in div_by_zero_stats:
warning += " " + div_by_zero_stat + "\n"
if len(no_original_value) > 0:
warning += "Value not Found ERROR:\nStatistic was not found in profile report for the following column: stat\n"
for no_original_value_string in no_original_value:
warning += " " + no_original_value_string + "\n"
warning += "\nTo avoid these errors, you should use the replace 'limit_check_report_keys' with the following:\n"
warning += r"" + json.dumps(limit_check_report_keys_copy, indent=2)
warning += "\n"
warnings.warn(warning)
return requested_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== "data_profiler.profile_numeric_columns_percent_diff_less_than_or_equal_to_threshold"
):
dependencies["data_profiler.profile_percent_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_percent_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration(
metric_name="data_profiler.profile_numeric_columns",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| DataProfilerProfileNumericColumnsPercentDiffLessThanOrEqualToThreshold |
python | getsentry__sentry | src/sentry/rules/history/backends/postgres.py | {
"start": 801,
"end": 1341
} | class ____(TypedDict):
group: int
count: int
last_triggered: datetime
event_id: str
def convert_results(results: Sequence[_Result]) -> Sequence[RuleGroupHistory]:
group_lookup = {g.id: g for g in Group.objects.filter(id__in=[r["group"] for r in results])}
return [
RuleGroupHistory(group_lookup[r["group"]], r["count"], r["last_triggered"], r["event_id"])
for r in results
]
# temporary hack for removing unnecessary subqueries from group by list
# TODO: remove when upgrade to django 3.0
| _Result |
python | pola-rs__polars | py-polars/tests/unit/io/database/test_read.py | {
"start": 3598,
"end": 3921
} | class ____(NamedTuple):
"""Clarify read test params."""
read_method: Literal["read_database", "read_database_uri"]
connect_using: Any
expected_dtypes: SchemaDefinition
expected_dates: list[date | str]
schema_overrides: SchemaDict | None = None
batch_size: int | None = None
| DatabaseReadTestParams |
python | readthedocs__readthedocs.org | readthedocs/projects/forms.py | {
"start": 30575,
"end": 32373
} | class ____(forms.ModelForm):
"""Webhook form."""
project = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = WebHook
fields = ["project", "url", "events", "payload", "secret"]
widgets = {
"events": forms.CheckboxSelectMultiple,
}
def __init__(self, *args, **kwargs):
self.project = kwargs.pop("project", None)
super().__init__(*args, **kwargs)
if self.instance and self.instance.pk:
# Show secret in the detail form, but as readonly.
self.fields["secret"].disabled = True
else:
# Don't show the secret in the creation form.
self.fields.pop("secret")
self.fields["payload"].initial = json.dumps(
{
"event": "{{ event }}",
"name": "{{ project.name }}",
"slug": "{{ project.slug }}",
"version": "{{ version.slug }}",
"commit": "{{ build.commit }}",
"build": "{{ build.id }}",
"start_date": "{{ build.start_date }}",
"build_url": "{{ build.url }}",
"docs_url": "{{ build.docs_url }}",
},
indent=2,
)
def clean_project(self):
return self.project
def clean_payload(self):
"""Check if the payload is a valid json object and format it."""
payload = self.cleaned_data["payload"]
try:
payload = json.loads(payload)
payload = json.dumps(payload, indent=2)
except Exception as exc:
raise forms.ValidationError(_("The payload must be a valid JSON object.")) from exc
return payload
| WebHookForm |
python | facebook__pyre-check | scripts/explore_pysa_models.py | {
"start": 24124,
"end": 43130
} | class ____(NamedTuple):
condition_kind: ConditionKind
caller: str
caller_port: str
callee: Optional[str]
callee_port: Optional[str]
taint_kind: str
distance: Optional[int] # None for subtraces.
location: SourceLocationWithFilename
shared_local_features: List[Dict[str, str]]
local_features: List[Dict[str, str]]
type_interval: Dict[str, Any]
def key(
self,
) -> Tuple[
ConditionKind,
str,
str,
Optional[str],
Optional[str],
str,
SourceLocationWithFilename,
str,
]:
return (
self.condition_kind,
self.caller,
self.caller_port,
self.callee,
self.callee_port,
self.taint_kind,
self.location,
str(self.type_interval),
)
def get_frames_from_extra_traces(
caller: str, filename: str, path: Optional[str], extra_traces: List[Dict[str, Any]]
) -> Iterable[TaintFrame]:
for extra_trace in extra_traces:
if extra_trace["trace_kind"] == "source":
condition_kind = ConditionKind.SOURCE
elif extra_trace["trace_kind"] == "sink":
condition_kind = ConditionKind.SINK
else:
raise AssertionError(f'unexpected trace_kind: {extra_trace["trace_kind"]}')
if "call" in extra_trace:
call = extra_trace["call"]
for resolved in call["resolves_to"]:
yield TaintFrame(
condition_kind=condition_kind,
caller=caller,
caller_port="subtrace",
callee=resolved,
callee_port=call["port"],
taint_kind=extra_trace["leaf_kind"],
distance=None,
location=SourceLocation.from_json(call["position"]).add_filename(
filename, path
),
shared_local_features=[],
local_features=[],
type_interval={},
)
def get_frames_from_local_taints(
caller: str,
filename: str,
path: Optional[str],
condition_kind: ConditionKind,
port: str,
local_taints: List[Dict[str, Any]],
include_subtraces: bool = False,
deduplicate: bool = True,
) -> Iterable[TaintFrame]:
for local_taint in local_taints:
if include_subtraces and deduplicate:
yield from get_frames_from_extra_traces(
caller, filename, path, local_taint.get("extra_traces", [])
)
if "origin" in local_taint:
for flow_details in local_taint.get("kinds", []):
if include_subtraces and deduplicate:
yield from get_frames_from_extra_traces(
caller, filename, path, flow_details.get("extra_traces", [])
)
for leaf in flow_details.get("leaves", [{}]):
if include_subtraces and not deduplicate:
# subtraces are attached to a taint frame, so those will be duplicated
yield from get_frames_from_extra_traces(
caller, filename, path, local_taint.get("extra_traces", [])
)
yield from get_frames_from_extra_traces(
caller, filename, path, flow_details.get("extra_traces", [])
)
yield TaintFrame(
condition_kind=condition_kind,
caller=caller,
caller_port=port,
callee=leaf.get("name", None),
callee_port=leaf.get("port", None),
taint_kind=flow_details["kind"],
distance=flow_details.get("length", 0),
location=SourceLocation.from_json(
local_taint["origin"]
).add_filename(filename, path),
shared_local_features=local_taint.get("local_features", []),
local_features=flow_details.get("local_features", []),
type_interval=local_taint.get("type_interval", {}),
)
elif "call" in local_taint:
call = local_taint["call"]
for flow_details in local_taint.get("kinds", []):
if include_subtraces and deduplicate:
yield from get_frames_from_extra_traces(
caller, filename, path, flow_details.get("extra_traces", [])
)
for resolved in call.get("resolves_to", []):
if include_subtraces and not deduplicate:
# subtraces are attached to a taint frame, so those will be duplicated
yield from get_frames_from_extra_traces(
caller, filename, path, local_taint.get("extra_traces", [])
)
yield from get_frames_from_extra_traces(
caller, filename, path, flow_details.get("extra_traces", [])
)
yield TaintFrame(
condition_kind=condition_kind,
caller=caller,
caller_port=port,
callee=resolved,
callee_port=call["port"],
taint_kind=flow_details["kind"],
distance=flow_details.get("length", 0),
location=SourceLocation.from_json(
call["position"]
).add_filename(filename, path),
shared_local_features=local_taint.get("local_features", []),
local_features=flow_details.get("local_features", []),
type_interval=local_taint.get("type_interval", {}),
)
elif "declaration" in local_taint:
pass # User-declared fragment.
else:
raise AssertionError("Unexpected trace fragment.")
def get_frames_from_taint_conditions(
caller: str,
filename: str,
path: Optional[str],
condition_kind: ConditionKind,
conditions: List[Dict[str, Any]],
include_subtraces: bool = False,
deduplicate: bool = True,
) -> Iterable[TaintFrame]:
for taint in conditions:
yield from get_frames_from_local_taints(
caller,
filename,
path,
condition_kind,
taint["port"],
taint["taint"],
include_subtraces=include_subtraces,
deduplicate=deduplicate,
)
def print_model_size_stats(callable: str) -> None:
"""Print statistics about a model size (number of frames, etc.)"""
model = get_raw_model(callable)
trace_frames = 0
trace_frames_for_subtraces = 0
source_trace_frames = 0
sink_trace_frames = 0
trace_frames_per_kind = collections.defaultdict(int)
trace_frames_per_callee = collections.defaultdict(int)
for frame in get_frames_from_taint_conditions(
callable,
model["filename"],
model.get("path"),
ConditionKind.SOURCE,
model.get("sources", []),
include_subtraces=True,
deduplicate=False,
):
trace_frames += 1
if frame.caller_port == "subtrace":
trace_frames_for_subtraces += 1
else:
source_trace_frames += 1
trace_frames_per_kind[frame.taint_kind] += 1
trace_frames_per_callee[frame.callee] += 1
for frame in get_frames_from_taint_conditions(
callable,
model["filename"],
model.get("path"),
ConditionKind.SINK,
model.get("sinks", []),
include_subtraces=True,
deduplicate=False,
):
trace_frames += 1
if frame.caller_port == "subtrace":
trace_frames_for_subtraces += 1
else:
sink_trace_frames += 1
trace_frames_per_kind[frame.taint_kind] += 1
trace_frames_per_callee[frame.callee] += 1
print(f"Statistics of model for callable `{callable}`:")
print(f"Trace frames: {trace_frames}")
print(f"Trace frames for subtraces: {trace_frames_for_subtraces}")
print(f"Source trace frames: {source_trace_frames}")
print(f"Sink trace frames: {sink_trace_frames}")
print()
print("Most common taint kinds:")
for taint_kind, count in sorted(
trace_frames_per_kind.items(), key=lambda p: p[1], reverse=True
)[:20]:
print(f"{taint_kind}: {count} trace frames ({count/trace_frames*100.0:.2f}%)")
print()
print("Most common callees:")
for callee, count in sorted(
trace_frames_per_callee.items(), key=lambda p: p[1], reverse=True
)[:20]:
print(f"{callee}: {count} trace frames ({count/trace_frames*100.0:.2f}%)")
def get_issues(
callable: Optional[str] = None, **kwargs: Union[str, bool]
) -> List[Dict[str, Any]]:
"""
Get all issues.
If a callable is provided, only return issues within it.
"""
directory = _assert_loaded()
if callable is None:
callables = directory.index_.issues.items()
else:
positions = directory.index_.issues.get(callable, [])
callables = [(callable, positions)]
issues = []
for _, issue_positions in callables:
for issue_position in issue_positions:
message = json.loads(_read(issue_position))
assert message["kind"] == "issue"
issues.append(message["data"])
options = __default_formatting_options.apply_options(**kwargs)
for index in range(len(issues)):
# TODO(T138283233): implement filtering by kind on issues.
if not options.show_tito_positions:
issues[index] = issue_remove_tito_positions(issues[index])
if not options.show_class_intervals:
issues[index] = issue_remove_class_intervals(issues[index])
if not options.show_features:
issues[index] = issue_remove_features(issues[index])
del issues[index]["features"]
if not options.show_leaf_names:
issues[index] = issue_remove_leaf_names(issues[index])
return issues
def print_issue_trace(trace: Dict[str, Any]) -> None:
for local_taint in trace["roots"]:
print_call_info(local_taint, indent=" " * 4)
print_local_taint(local_taint, indent=" " * 4)
for frame in local_taint["kinds"]:
print_frame(frame, indent=" " * 6)
def print_issues(callable: str, **kwargs: Union[str, bool]) -> None:
"""Pretty print the issues within the given callable."""
issues = get_issues(callable, **kwargs)
options = __default_formatting_options.apply_options(**kwargs)
if options.format == "json":
print_json(issues)
elif options.format == "text":
print(f"Issues for {green(callable)}")
for issue in issues:
print("Issue:")
print(f' Code: {issue["code"]}')
# pyre-ignore: issue contains a location
print_json_location(issue, "Location: ", indent=" " * 2)
print(f' Message: {blue(issue["message"])}')
print(f' Handle: {green(issue["master_handle"])}')
for trace in issue["traces"]:
print(f' {trace["name"].capitalize()}:')
print_issue_trace(trace)
else:
raise AssertionError(f"Unexpected format `{options.format}`")
def get_call_graph(callable: str, **kwargs: Union[str, bool]) -> Dict[str, Any]:
"""Get the call graph for the given callable."""
directory = _assert_loaded()
if callable not in directory.index_.call_graphs:
raise AssertionError(f"no call graph for callable `{callable}`.")
message = json.loads(_read(directory.index_.call_graphs[callable]))
assert message["kind"] == "call_graph"
return message["data"]
def print_call_graph(callable: str, **kwargs: Union[str, bool]) -> None:
"""Pretty print the call graph for the given callable."""
call_graph = get_call_graph(callable, **kwargs)
# TODO(T138283233): Support format=text
print_json(call_graph)
def taint_kind_match(a: str, b: str) -> bool:
return len(a) == len(b) and a.replace("@", ":") == b.replace("@", ":")
def taint_kind_next_hop(kind: str) -> str:
parts = kind.split("@", 1)
if len(parts) == 1:
return kind
else:
return parts[1]
def get_closest_next_frame(
condition_kind: ConditionKind,
callee: str,
port: str,
taint_kind: str,
seen: Set[TaintFrame] = set(),
) -> Optional[TaintFrame]:
model = get_raw_model(callee)
shortest_frame = None
for frame in get_frames_from_taint_conditions(
caller=callee,
filename=model["filename"],
path=model.get("path"),
condition_kind=condition_kind,
conditions=model.get(condition_kind.model_key(), []),
include_subtraces=False,
deduplicate=True,
):
if frame.caller_port != port:
continue
if not taint_kind_match(frame.taint_kind, taint_kind):
continue
if frame.key() in seen:
continue
# TODO: match on type interval
# pyre-ignore: distance is not None
if shortest_frame is None or (shortest_frame.distance > frame.distance):
shortest_frame = frame
return shortest_frame
def print_shortest_trace(
condition_kind_string: str, callee: str, port: str, taint_kind: str
) -> None:
"""Print the shortest trace starting from the given callable, port, kind"""
condition_kind = ConditionKind.from_string(condition_kind_string)
if condition_kind is None:
print(f"error: expected source or sink as condition kind")
return
seen = set()
while True:
frame = get_closest_next_frame(condition_kind, callee, port, taint_kind, seen)
if frame is None:
print(
f"error: could not find next frame for callee `{callee}` port `{port}` kind `{taint_kind}`"
)
return
print()
print(
f"Callee: {blue(frame.callee or '')} Port: {blue(frame.callee_port or '')} Distance: {frame.distance}"
)
frame.location.print(prefix="Location: ", indent="")
if frame.distance == 0: # leaf
return
seen.add(frame)
callee = frame.callee or ""
port = frame.callee_port or ""
taint_kind = taint_kind_next_hop(frame.taint_kind)
def print_reachable_leaves(
condition_kind_string: str,
callable: str,
taint_kind: str,
include_subtraces: bool = False,
) -> None:
condition_kind = ConditionKind.from_string(condition_kind_string)
if condition_kind is None:
print(f"error: expected source or sink as condition kind")
return
# Find all initial frames
cache = {}
stack = []
model = get_raw_model(callable, cache=cache)
# we need to iterate on both sources and sinks if `include_subtraces=True`
for frame in itertools.chain(
get_frames_from_taint_conditions(
caller=callable,
filename=model["filename"],
path=model.get("path"),
condition_kind=ConditionKind.SOURCE,
conditions=model.get("sources", []),
include_subtraces=include_subtraces,
deduplicate=True,
),
get_frames_from_taint_conditions(
caller=callable,
filename=model["filename"],
path=model.get("path"),
condition_kind=ConditionKind.SINK,
conditions=model.get("sinks", []),
include_subtraces=include_subtraces,
deduplicate=True,
),
):
if frame.condition_kind != condition_kind:
continue
if not taint_kind_match(frame.taint_kind, taint_kind):
continue
stack.append(frame)
seen = set()
while len(stack) > 0:
frame = stack.pop()
if frame.key() in seen:
continue
seen.add(frame.key())
if frame.distance == 0: # leaf
print()
print(
f"Caller: {blue(frame.caller or '')} Port: {blue(frame.caller_port or '')}"
)
print(
f"Leaf: {blue(frame.callee or '')} Port: {blue(frame.callee_port or '')}"
)
frame.location.print(prefix="Location: ", indent="")
continue
model = get_raw_model(frame.callee, cache=cache)
for next_frame in get_frames_from_taint_conditions(
caller=frame.callee,
filename=model["filename"],
path=model.get("path"),
condition_kind=condition_kind,
conditions=model.get(condition_kind.model_key(), []),
include_subtraces=False,
deduplicate=True,
):
if next_frame.caller_port != frame.callee_port:
continue
if not taint_kind_match(
next_frame.taint_kind, taint_kind_next_hop(frame.taint_kind)
):
continue
# TODO: match on type interval
if next_frame.key() in seen:
continue
stack.append(next_frame)
def print_help() -> None:
"""Print this help message."""
print("# Pysa Model Explorer")
print("Available commands:")
commands = [
(index, "index('/path/to/results-directory')"),
(callables_containing, "callables_containing('foo.bar')"),
(callables_matching, "callables_matching(r'foo\\..*')"),
(get_model, "get_model('foo.bar')"),
(print_model, "print_model('foo.bar')"),
(get_issues, "get_issues('foo.bar')"),
(print_issues, "print_issues('foo.bar')"),
(get_call_graph, "get_call_graph('foo.bar')"),
(print_call_graph, "print_call_graph('foo.bar')"),
(print_model_size_stats, "print_model_size_stats('foo.bar')"),
(
print_shortest_trace,
"print_shortest_trace('source', 'foo.bar', 'result', 'UserControlled')",
),
(
print_reachable_leaves,
"print_reachable_leaves('source', 'foo.bar', 'UserControlled')",
),
(set_formatting, "set_formatting(show_sources=False)"),
(show_formatting, "show_formatting()"),
(print_json, "print_json({'a': 'b'})"),
(print_help, "print_help()"),
]
max_width = max(len(command[1]) for command in commands)
for command, example in commands:
doc = textwrap.dedent(command.__doc__ or "")
doc = textwrap.indent(doc, prefix=" " * (max_width + 3)).strip()
print(f" {example:<{max_width}} {doc}")
if __name__ == "__main__":
print_help()
| TaintFrame |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_hash_returned.py | {
"start": 507,
"end": 568
} | class ____:
"""Hash through the metaclass."""
| ThirdGoodHash |
python | falconry__falcon | falcon/testing/client.py | {
"start": 12149,
"end": 36279
} | class ____(_ResultBase):
"""Encapsulates the streamed result of an ASGI request.
Args:
body_chunks (list): A list of body chunks. This list may be
appended to after a result object has been instantiated.
status (str): An HTTP status string, including status code and
reason string
headers (list): A list of (header_name, header_value) tuples,
per PEP-3333
task (asyncio.Task): The scheduled simulated request which may or
may not have already finished. :meth:`~.finalize`
will await the task before returning.
req_event_emitter (~falcon.testing.ASGIRequestEventEmitter): A reference
to the event emitter used to simulate events sent to the ASGI
application via its receive() method.
:meth:`~.finalize` will cause the event emitter to
simulate an ``'http.disconnect'`` event before returning.
"""
def __init__(
self,
body_chunks: Sequence[bytes],
status: str,
headers: HeaderIter,
task: asyncio.Task,
req_event_emitter: helpers.ASGIRequestEventEmitter,
):
super().__init__(status, headers)
self._task = task
self._stream = ResultBodyStream(body_chunks)
self._req_event_emitter = req_event_emitter
@property
def stream(self) -> ResultBodyStream:
"""Raw response body, as a byte stream."""
return self._stream
async def finalize(self) -> None:
"""Finalize the encapsulated simulated request.
This method causes the request event emitter to begin emitting
``'http.disconnect'`` events and then awaits the completion of the
asyncio task that is running the simulated ASGI request.
"""
self._req_event_emitter.disconnect()
await self._task
# NOTE(kgriffs): The default of asgi_disconnect_ttl was chosen to be
# relatively long (5 minutes) to help testers notice when something
# appears to be "hanging", which might indicates that the app is
# not handling the reception of events correctly.
def simulate_request(
app: Callable[..., Any], # accept any asgi/wsgi app
method: str = 'GET',
path: str = '/',
query_string: str | None = None,
headers: HeaderArg | None = None,
content_type: str | None = None,
body: str | bytes | None = None,
json: Any | None = None,
file_wrapper: Callable[..., Any] | None = None,
wsgierrors: TextIO | None = None,
params: Mapping[str, Any] | None = None,
params_csv: bool = False,
protocol: str = 'http',
host: str = helpers.DEFAULT_HOST,
remote_addr: str | None = None,
extras: Mapping[str, Any] | None = None,
http_version: str = '1.1',
port: int | None = None,
root_path: str | None = None,
cookies: CookieArg | None = None,
asgi_chunk_size: int = 4096,
asgi_disconnect_ttl: int = 300,
) -> Result:
"""Simulate a request to a WSGI or ASGI application.
Performs a request against a WSGI or ASGI application. In the case of
WSGI, uses :any:`wsgiref.validate` to ensure the response is valid.
Note:
In the case of an ASGI request, this method will simulate the entire
app lifecycle in a single shot, including lifespan and client
disconnect events. In order to simulate multiple interleaved
requests, or to test a streaming endpoint (such as one that emits
server-sent events), :class:`~falcon.testing.ASGIConductor` can be
used to more precisely control the app lifecycle.
Keyword Args:
app (callable): The WSGI or ASGI application to call
method (str): An HTTP method to use in the request
(default: 'GET')
path (str): The URL path to request (default: '/').
Note:
The path may contain a query string. However, neither
`query_string` nor `params` may be specified in this case.
root_path (str): The initial portion of the request URL's "path" that
corresponds to the application object, so that the application
knows its virtual "location". This defaults to the empty string,
indicating that the application corresponds to the "root" of the
server.
protocol: The protocol to use for the URL scheme
(default: 'http')
port (int): The TCP port to simulate. Defaults to
the standard port used by the given scheme (i.e., 80 for 'http'
and 443 for 'https'). A string may also be passed, as long as
it can be parsed as an int.
params (dict): A dictionary of query string parameters,
where each key is a parameter name, and each value is
either a ``str`` or something that can be converted
into a ``str``, or a list of such values. If a ``list``,
the value will be converted to a comma-delimited string
of values (e.g., 'thing=1,2,3').
params_csv (bool): Set to ``True`` to encode list values
in query string params as comma-separated values
(e.g., 'thing=1,2,3'). Otherwise, parameters will be encoded by
specifying multiple instances of the parameter
(e.g., 'thing=1&thing=2&thing=3'). Defaults to ``False``.
query_string (str): A raw query string to include in the
request (default: ``None``). If specified, overrides
`params`.
content_type (str): The value to use for the Content-Type header in
the request. If specified, this value will take precedence over
any value set for the Content-Type header in the
`headers` keyword argument. The ``falcon`` module provides a number
of :ref:`constants for common media types <media_type_constants>`.
headers (dict): Extra headers as a dict-like (Mapping) object, or an
iterable yielding a series of two-member (*name*, *value*)
iterables. Each pair of strings provides the name and value
for an HTTP header. If desired, multiple header values may be
combined into a single (*name*, *value*) pair by joining the values
with a comma when the header in question supports the list
format (see also RFC 7230 and RFC 7231). Header names are not
case-sensitive.
Note:
If a User-Agent header is not provided, it will default to::
f'falcon-client/{falcon.__version__}'
body (str): The body of the request (default ''). The value will be
encoded as UTF-8 in the WSGI environ. Alternatively, a byte string
may be passed, in which case it will be used as-is.
json(JSON serializable): A JSON document to serialize as the
body of the request (default: ``None``). If specified,
overrides `body` and sets the Content-Type header to
``'application/json'``, overriding any value specified by either
the `content_type` or `headers` arguments.
file_wrapper (callable): Callable that returns an iterable,
to be used as the value for *wsgi.file_wrapper* in the
WSGI environ (default: ``None``). This can be used to test
high-performance file transmission when `resp.stream` is
set to a file-like object.
host(str): A string to use for the hostname part of the fully
qualified request URL (default: 'falconframework.org')
remote_addr (str): A string to use as the remote IP address for the
request (default: '127.0.0.1'). For WSGI, this corresponds to
the 'REMOTE_ADDR' environ variable. For ASGI, this corresponds
to the IP address used for the 'client' field in the connection
scope.
http_version (str): The HTTP version to simulate. Must be either
'2', '2.0', 1.1', '1.0', or '1' (default '1.1'). If set to '1.0',
the Host header will not be added to the scope.
wsgierrors (io): The stream to use as *wsgierrors* in the WSGI
environ (default ``sys.stderr``)
asgi_chunk_size (int): The maximum number of bytes that will be
sent to the ASGI app in a single ``'http.request'`` event (default
4096).
asgi_disconnect_ttl (int): The maximum number of seconds to wait
since the request was initiated, before emitting an
``'http.disconnect'`` event when the app calls the
receive() function (default 300).
extras (dict): Additional values to add to the WSGI
``environ`` dictionary or the ASGI scope for the request
(default: ``None``)
cookies (dict): Cookies as a dict-like (Mapping) object, or an
iterable yielding a series of two-member (*name*, *value*)
iterables. Each pair of items provides the name and value
for the 'Set-Cookie' header.
Returns:
:class:`~.Result`: The result of the request
"""
if _is_asgi_app(app):
return async_to_sync( # type: ignore[return-value]
_simulate_request_asgi,
app,
method=method,
path=path,
query_string=query_string,
headers=headers,
content_type=content_type,
body=body,
json=json,
params=params,
params_csv=params_csv,
protocol=protocol,
host=host,
remote_addr=remote_addr,
extras=extras,
http_version=http_version,
port=port,
root_path=root_path,
asgi_chunk_size=asgi_chunk_size,
asgi_disconnect_ttl=asgi_disconnect_ttl,
cookies=cookies,
)
path, query_string, headers, body, extras = _prepare_sim_args(
path,
query_string,
params,
params_csv,
content_type,
headers,
body,
json,
extras,
)
env = helpers.create_environ(
method=method,
scheme=protocol,
path=path,
query_string=(query_string or ''),
headers=headers,
body=body or b'',
file_wrapper=file_wrapper,
host=host,
remote_addr=remote_addr,
wsgierrors=wsgierrors,
http_version=http_version,
port=port,
root_path=root_path,
cookies=cookies,
)
if 'REQUEST_METHOD' in extras and extras['REQUEST_METHOD'] != method:
# NOTE(vytas): Even given the duct tape nature of overriding
# arbitrary environ variables, changing the method can potentially
# be very confusing, particularly when using specialized
# simulate_get/post/patch etc methods.
raise ValueError(
'WSGI environ extras may not override the request method. '
'Please use the method parameter.'
)
env.update(extras)
srmock = StartResponseMock()
validator = wsgiref.validate.validator(app)
iterable = validator(env, srmock)
data = helpers.closed_wsgi_iterable(iterable)
assert srmock.status is not None and srmock.headers is not None
return Result(data, srmock.status, srmock.headers)
@overload
async def _simulate_request_asgi(
app: Callable[..., Coroutine[Any, Any, Any]],
method: str = ...,
path: str = ...,
query_string: str | None = ...,
headers: HeaderArg | None = ...,
content_type: str | None = ...,
body: str | bytes | None = ...,
json: Any | None = ...,
params: Mapping[str, Any] | None = ...,
params_csv: bool = ...,
protocol: str = ...,
host: str = ...,
remote_addr: str | None = ...,
extras: Mapping[str, Any] | None = ...,
http_version: str = ...,
port: int | None = ...,
root_path: str | None = ...,
asgi_chunk_size: int = ...,
asgi_disconnect_ttl: int = ...,
cookies: CookieArg | None = ...,
_one_shot: Literal[False] = ...,
_stream_result: Literal[True] = ...,
) -> StreamedResult: ...
@overload
async def _simulate_request_asgi(
app: Callable[..., Coroutine[Any, Any, Any]],
method: str = ...,
path: str = ...,
query_string: str | None = ...,
headers: HeaderArg | None = ...,
content_type: str | None = ...,
body: str | bytes | None = ...,
json: Any | None = ...,
params: Mapping[str, Any] | None = ...,
params_csv: bool = ...,
protocol: str = ...,
host: str = ...,
remote_addr: str | None = ...,
extras: Mapping[str, Any] | None = ...,
http_version: str = ...,
port: int | None = ...,
root_path: str | None = ...,
asgi_chunk_size: int = ...,
asgi_disconnect_ttl: int = ...,
cookies: CookieArg | None = ...,
_one_shot: Literal[True] = ...,
_stream_result: bool = ...,
) -> Result: ...
# NOTE(kgriffs): The default of asgi_disconnect_ttl was chosen to be
# relatively long (5 minutes) to help testers notice when something
# appears to be "hanging", which might indicates that the app is
# not handling the reception of events correctly.
async def _simulate_request_asgi(
app: Callable[..., Coroutine[Any, Any, Any]], # accept any asgi app
method: str = 'GET',
path: str = '/',
query_string: str | None = None,
headers: HeaderArg | None = None,
content_type: str | None = None,
body: str | bytes | None = None,
json: Any | None = None,
params: Mapping[str, Any] | None = None,
params_csv: bool = False,
protocol: str = 'http',
host: str = helpers.DEFAULT_HOST,
remote_addr: str | None = None,
extras: Mapping[str, Any] | None = None,
http_version: str = '1.1',
port: int | None = None,
root_path: str | None = None,
asgi_chunk_size: int = 4096,
asgi_disconnect_ttl: int = 300,
cookies: CookieArg | None = None,
# NOTE(kgriffs): These are undocumented because they are only
# meant to be used internally by the framework (i.e., they are
# not part of the public interface.) In case we ever expose
# simulate_request_asgi() as part of the public interface, we
# don't want these kwargs to be documented.
_one_shot: bool = True,
_stream_result: bool = False,
) -> Result | StreamedResult:
"""Simulate a request to an ASGI application.
Keyword Args:
app (callable): The WSGI or ASGI application to call
method (str): An HTTP method to use in the request
(default: 'GET')
path (str): The URL path to request (default: '/').
Note:
The path may contain a query string. However, neither
`query_string` nor `params` may be specified in this case.
root_path (str): The initial portion of the request URL's "path" that
corresponds to the application object, so that the application
knows its virtual "location". This defaults to the empty string,
indicating that the application corresponds to the "root" of the
server.
protocol: The protocol to use for the URL scheme
(default: 'http')
port (int): The TCP port to simulate. Defaults to
the standard port used by the given scheme (i.e., 80 for 'http'
and 443 for 'https'). A string may also be passed, as long as
it can be parsed as an int.
params (dict): A dictionary of query string parameters,
where each key is a parameter name, and each value is
either a ``str`` or something that can be converted
into a ``str``, or a list of such values. If a ``list``,
the value will be converted to a comma-delimited string
of values (e.g., 'thing=1,2,3').
params_csv (bool): Set to ``False`` to encode list values
in query string params by specifying multiple instances
of the parameter (e.g., 'thing=1&thing=2&thing=3').
Otherwise, parameters will be encoded as comma-separated
values (e.g., 'thing=1,2,3'). Defaults to ``True``.
query_string (str): A raw query string to include in the
request (default: ``None``). If specified, overrides
`params`.
content_type (str): The value to use for the Content-Type header in
the request. If specified, this value will take precedence over
any value set for the Content-Type header in the
`headers` keyword argument. The ``falcon`` module provides a number
of :ref:`constants for common media types <media_type_constants>`.
headers (dict): Extra headers as a dict-like (Mapping) object, or an
iterable yielding a series of two-member (*name*, *value*)
iterables. Each pair of strings provides the name and value
for an HTTP header. If desired, multiple header values may be
combined into a single (*name*, *value*) pair by joining the values
with a comma when the header in question supports the list
format (see also RFC 7230 and RFC 7231). Header names are not
case-sensitive.
Note:
If a User-Agent header is not provided, it will default to::
f'falcon-client/{falcon.__version__}'
body (str): The body of the request (default ''). The value will be
encoded as UTF-8 in the WSGI environ. Alternatively, a byte string
may be passed, in which case it will be used as-is.
json(JSON serializable): A JSON document to serialize as the
body of the request (default: ``None``). If specified,
overrides `body` and sets the Content-Type header to
``'application/json'``, overriding any value specified by either
the `content_type` or `headers` arguments.
host(str): A string to use for the hostname part of the fully
qualified request URL (default: 'falconframework.org')
remote_addr (str): A string to use as the remote IP address for the
request (default: '127.0.0.1'). For WSGI, this corresponds to
the 'REMOTE_ADDR' environ variable. For ASGI, this corresponds
to the IP address used for the 'client' field in the connection
scope.
http_version (str): The HTTP version to simulate. Must be either
'2', '2.0', 1.1', '1.0', or '1' (default '1.1'). If set to '1.0',
the Host header will not be added to the scope.
asgi_chunk_size (int): The maximum number of bytes that will be
sent to the ASGI app in a single ``'http.request'`` event (default
4096).
asgi_disconnect_ttl (int): The maximum number of seconds to wait
since the request was initiated, before emitting an
``'http.disconnect'`` event when the app calls the
receive() function (default 300).
extras (dict): Additional values to add to the WSGI
``environ`` dictionary or the ASGI scope for the request
(default: ``None``)
cookies (dict): Cookies as a dict-like (Mapping) object, or an
iterable yielding a series of two-member (*name*, *value*)
iterables. Each pair of items provides the name and value
for the 'Set-Cookie' header.
Returns:
:class:`~.Result`: The result of the request
"""
path, query_string, headers, body, extras = _prepare_sim_args(
path,
query_string,
params,
params_csv,
content_type,
headers,
body,
json,
extras,
)
# ---------------------------------------------------------------------
# NOTE(kgriffs): 'http' scope
# ---------------------------------------------------------------------
content_length = None
if body is not None:
if isinstance(body, str):
body = body.encode()
content_length = len(body)
http_scope = helpers.create_scope(
path=path,
query_string=query_string,
method=method,
headers=headers,
host=host,
scheme=protocol,
port=port,
http_version=http_version,
remote_addr=remote_addr,
root_path=root_path,
content_length=content_length,
cookies=cookies,
)
if 'method' in extras and extras['method'] != method.upper():
raise ValueError(
'ASGI scope extras may not override the request method. '
'Please use the method parameter.'
)
http_scope.update(extras)
# ---------------------------------------------------------------------
if asgi_disconnect_ttl == 0: # Special case
disconnect_at = 0.0
else:
disconnect_at = time.time() + max(0, asgi_disconnect_ttl)
req_event_emitter = helpers.ASGIRequestEventEmitter(
(body or b''),
chunk_size=asgi_chunk_size,
disconnect_at=disconnect_at,
)
resp_event_collector = helpers.ASGIResponseEventCollector()
if not _one_shot:
task_req = asyncio.create_task(
app(http_scope, req_event_emitter, resp_event_collector)
)
if _stream_result:
# NOTE(kgriffs): Wait until the response has been started and give
# the task a chance to progress. Otherwise, we won't have a
# status or headers to pass to StreamedResult.
while not resp_event_collector.status:
await asyncio.sleep(0)
return StreamedResult(
resp_event_collector.body_chunks,
code_to_http_status(resp_event_collector.status),
resp_event_collector.headers,
task_req,
req_event_emitter,
)
req_event_emitter.disconnect()
await task_req
return Result(
resp_event_collector.body_chunks,
code_to_http_status(resp_event_collector.status),
resp_event_collector.headers,
)
# ---------------------------------------------------------------------
# NOTE(kgriffs): 'lifespan' scope
# ---------------------------------------------------------------------
lifespan_scope = {
'type': ScopeType.LIFESPAN,
'asgi': {
'version': '3.0',
'spec_version': '2.0',
},
}
shutting_down = asyncio.Condition()
lifespan_event_emitter = helpers.ASGILifespanEventEmitter(shutting_down)
lifespan_event_collector = helpers.ASGIResponseEventCollector()
# ---------------------------------------------------------------------
async def conductor() -> None:
# NOTE(kgriffs): We assume this is a Falcon ASGI app, which supports
# the lifespan protocol and thus we do not need to catch
# exceptions that would signify no lifespan protocol support.
task_lifespan = asyncio.create_task(
app(lifespan_scope, lifespan_event_emitter, lifespan_event_collector)
)
await _wait_for_startup(lifespan_event_collector.events)
task_req = asyncio.create_task(
app(http_scope, req_event_emitter, resp_event_collector)
)
req_event_emitter.disconnect()
await task_req
# NOTE(kgriffs): Notify lifespan_event_emitter that it is OK
# to proceed.
async with shutting_down:
shutting_down.notify()
await _wait_for_shutdown(lifespan_event_collector.events)
await task_lifespan
await conductor()
if resp_event_collector.status is None:
# NOTE(kgriffs): An immediate disconnect was simulated, and so
# the app could not return a status.
raise ConnectionError('An immediate disconnect was simulated.')
return Result(
resp_event_collector.body_chunks,
code_to_http_status(resp_event_collector.status),
resp_event_collector.headers,
)
| StreamedResult |
python | dagster-io__dagster | python_modules/libraries/dagster-databricks/dagster_databricks/components/databricks_asset_bundle/configs.py | {
"start": 12983,
"end": 14532
} | class ____(DatabricksBaseTask[jobs.RunJobTask]):
@property
def task_type(self) -> str:
return "run_job"
@property
def task_config_metadata(self) -> Mapping[str, Any]:
task_config_metadata = {}
job_config = self.task_config["run_job_task"]
task_config_metadata["job_id"] = job_config["job_id"]
task_config_metadata["job_parameters"] = self.task_parameters
return task_config_metadata
@classmethod
def from_job_task_config(cls, job_task_config: Mapping[str, Any]) -> "DatabricksJobTask":
run_job_task = job_task_config["run_job_task"]
task_config = {"run_job_task": run_job_task}
# For job tasks, parameters are in job_parameters
task_parameters = run_job_task.get("job_parameters", {})
return cls(
task_key=job_task_config["task_key"],
task_config=task_config,
task_parameters=task_parameters,
depends_on=parse_depends_on(job_task_config.get("depends_on", [])),
job_name=job_task_config["job_name"],
libraries=job_task_config.get("libraries", []),
)
@property
def needs_cluster(self) -> bool:
return False
@property
def submit_task_key(self) -> str:
return "run_job_task"
def to_databricks_sdk_task(self) -> jobs.RunJobTask:
return jobs.RunJobTask(
job_id=self.task_config["run_job_task"]["job_id"],
job_parameters=check.is_dict(self.task_parameters),
)
@record
| DatabricksJobTask |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/snap/snap.py | {
"start": 6136,
"end": 7731
} | class ____(PartitionsSnap, IHaveNew):
partition_keys: Sequence[str]
def __new__(cls, partition_keys: Sequence[str]):
# for back compat reasons we allow str as a Sequence[str] here
if not isinstance(partition_keys, str):
check.sequence_param(
partition_keys,
"partition_keys",
of_type=str,
)
return super().__new__(
cls,
partition_keys=partition_keys,
)
@classmethod
def from_def(cls, partitions_def: "StaticPartitionsDefinition") -> Self: # pyright: ignore[reportIncompatibleMethodOverride]
from dagster._core.definitions.partitions.definition import StaticPartitionsDefinition
check.inst_param(partitions_def, "partitions_def", StaticPartitionsDefinition)
return cls(partition_keys=partitions_def.get_partition_keys())
def get_partitions_definition(self):
from dagster._core.definitions.partitions.definition import StaticPartitionsDefinition
# v1.4 made `StaticPartitionsDefinition` error if given duplicate keys. This caused
# host process errors for users who had not upgraded their user code to 1.4 and had dup
# keys, since the host process `StaticPartitionsDefinition` would throw an error.
keys = _dedup_partition_keys(self.partition_keys)
return StaticPartitionsDefinition(keys)
@whitelist_for_serdes(
storage_name="ExternalPartitionDimensionDefinition",
storage_field_names={"partitions": "external_partitions_def_data"},
)
@record
| StaticPartitionsSnap |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-notion/components.py | {
"start": 1384,
"end": 2311
} | class ____(RecordTransformation):
"""
Transforms the nested 'properties' object within a Notion Page/Database record into a more
normalized form. In Notion's API response, 'properties' is a dictionary where each key
represents the name of a property and its value contains various metadata and the property's
actual value.
The transformed 'properties' will consist of an array where each element is a dictionary
with two keys: 'name', holding the original property name, and 'value', containing the
property's content.
"""
def transform(self, record: MutableMapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
properties = record.get("properties", {})
transformed_properties = [{"name": name, "value": value} for name, value in properties.items()]
record["properties"] = transformed_properties
return record
@dataclass
| NotionPropertiesTransformation |
python | doocs__leetcode | solution/2400-2499/2410.Maximum Matching of Players With Trainers/Solution.py | {
"start": 0,
"end": 385
} | class ____:
def matchPlayersAndTrainers(self, players: List[int], trainers: List[int]) -> int:
players.sort()
trainers.sort()
j, n = 0, len(trainers)
for i, p in enumerate(players):
while j < n and trainers[j] < p:
j += 1
if j == n:
return i
j += 1
return len(players)
| Solution |
python | pytorch__pytorch | torch/_dynamo/variables/base.py | {
"start": 7845,
"end": 8066
} | class ____(NotImplementedError):
vt: "VariableTracker"
def __init__(self, vt: "VariableTracker") -> None:
super().__init__(f"{vt} is not a constant")
self.vt = vt
| AsPythonConstantNotImplementedError |
python | joblib__joblib | joblib/compressor.py | {
"start": 19088,
"end": 19281
} | class ____(CompressorWrapper):
def __init__(self):
CompressorWrapper.__init__(
self, obj=BinaryGzipFile, prefix=_GZIP_PREFIX, extension=".gz"
)
| GzipCompressorWrapper |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 37739,
"end": 37868
} | class ____(EnvironmentVariableMixin, CreateView):
success_message = _("Environment variable created")
| EnvironmentVariableCreate |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC501_google.py | {
"start": 54,
"end": 5088
} | class ____(Exception):
...
_some_error = Exception
# OK
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
Raises:
FasterThanLightError: If speed is greater than the speed of light.
"""
try:
return distance / time
except ZeroDivisionError as exc:
raise FasterThanLightError from exc
# DOC501
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
"""
try:
return distance / time
except ZeroDivisionError as exc:
raise FasterThanLightError from exc
# DOC501
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
"""
try:
return distance / time
except ZeroDivisionError as exc:
raise FasterThanLightError from exc
except:
raise ValueError
# DOC501
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
"""
try:
return distance / time
except ZeroDivisionError as exc:
print('oops')
raise exc
# DOC501
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
"""
try:
return distance / time
except (ZeroDivisionError, ValueError) as exc:
print('oops')
raise exc
# DOC501
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
"""
raise AnotherError
# DOC501
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
"""
raise AnotherError()
# DOC501
def foo(bar: int):
"""Foo.
Args:
bar: Bar.
"""
raise something.SomeError
# DOC501, but can't resolve the error
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
"""
raise _some_error
# OK
def calculate_speed(distance: float, time: float) -> float:
try:
return distance / time
except ZeroDivisionError as exc:
raise FasterThanLightError from exc
# OK
def calculate_speed(distance: float, time: float) -> float:
raise NotImplementedError
# OK
def foo(bar: int):
"""Foo.
Args:
bar: Bar.
Raises:
SomeError: Wow.
"""
raise something.SomeError
# OK
def foo(bar: int):
"""Foo.
Args:
bar: Bar.
Raises:
something.SomeError: Wow.
"""
raise something.SomeError
# DOC501
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
Raises:
TypeError: if you didn't pass a number for both parameters
"""
try:
return distance / time
except ZeroDivisionError:
print("Oh no, why would you divide something by zero?")
raise
except TypeError:
print("Not a number? Shame on you!")
raise
# This is fine
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
Args:
distance: Distance traveled.
time: Time spent traveling.
Returns:
Speed as distance divided by time.
"""
try:
return distance / time
except Exception as e:
print(f"Oh no, we encountered {e}")
raise
def foo():
"""Foo.
Returns:
42: int.
"""
if True:
raise TypeError # DOC501
else:
raise TypeError # no DOC501 here because we already emitted a diagnostic for the earlier `raise TypeError`
raise ValueError # DOC501
return 42
| FasterThanLightError |
python | huggingface__transformers | src/transformers/models/swinv2/modeling_swinv2.py | {
"start": 5690,
"end": 8634
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
# Copied from transformers.models.swin.modeling_swin.window_partition
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.window_reverse
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
num_channels = windows.shape[-1]
windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->Swinv2
| Swinv2ImageClassifierOutput |
python | django__django | tests/auth_tests/test_management.py | {
"start": 3587,
"end": 3878
} | class ____(TestCase):
@mock_inputs({"username": "alice"})
def test_input_not_found(self):
with self.assertRaisesMessage(
ValueError, "Mock input for 'Email address: ' not found."
):
call_command("createsuperuser", stdin=MockTTY())
| MockInputTests |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 8280,
"end": 8396
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
href: str
@dataclasses.dataclass(frozen=True)
| CodeDescription |
python | huggingface__transformers | src/transformers/models/seed_oss/modular_seed_oss.py | {
"start": 1520,
"end": 2381
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
self.residual_dropout = config.residual_dropout
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
down_proj = nn.functional.dropout(down_proj, p=self.residual_dropout, training=self.training)
return down_proj
| SeedOssMLP |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 354955,
"end": 355305
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("message", "octicon")
message = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="message")
octicon = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="octicon")
| HovercardContext |
python | bokeh__bokeh | tests/unit/bokeh/test_objects.py | {
"start": 10951,
"end": 15983
} | class ____(TestContainerMutation):
def test_whether_included_in_props_with_values(self) -> None:
obj = HasListProp()
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo # noqa: F841
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# but changing the list should count as replacing the default
obj.foo.append("hello")
assert 'foo' in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
def test_assignment_maintains_owners(self) -> None:
obj = HasListProp()
old_list = obj.foo
assert isinstance(old_list, PropertyValueList)
assert 1 == len(old_list._owners)
obj.foo = ["a"]
new_list = obj.foo
assert isinstance(new_list, PropertyValueList)
assert old_list is not new_list
assert 0 == len(old_list._owners)
assert 1 == len(new_list._owners)
def test_list_delitem(self) -> None:
obj = HasListProp(foo=["a", "b", "c"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c"],
["a", "c"])
def test_list_delslice(self) -> None:
obj = HasListProp(foo=["a", "b", "c", "d"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
del x[1:3]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "d"])
def test_list_iadd(self) -> None:
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x += ["b"]
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "b"])
def test_list_imul(self) -> None:
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x *= 3
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "a", "a"])
def test_list_setitem(self) -> None:
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x[0] = "b"
self._check_mutation(obj, 'foo', mutate,
["a"],
["b"])
def test_list_setslice(self) -> None:
obj = HasListProp(foo=["a", "b", "c", "d"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x[1:3] = ["x"]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "x", "d"])
def test_list_append(self) -> None:
obj = HasListProp()
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.append("bar"), [], ["bar"])
def test_list_extend(self) -> None:
obj = HasListProp()
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.extend(["x", "y"]), [], ["x", "y"])
def test_list_insert(self) -> None:
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.insert(1, "x"),
["a", "b"],
["a", "x", "b"])
def test_list_pop(self) -> None:
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.pop(),
["a", "b"],
["a"])
def test_list_remove(self) -> None:
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.remove("b"),
["a", "b"],
["a"])
def test_list_reverse(self) -> None:
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.reverse(),
["a", "b"],
["b", "a"])
def test_list_sort(self) -> None:
obj = HasListProp(foo=["b", "a"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.sort(),
["b", "a"],
["a", "b"])
| TestListMutation |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 12159,
"end": 12627
} | class ____(AbstractTemplate):
"""
Typing for `Masked is cudf.NA`
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType) and isinstance(args[1], NAType):
return nb_signature(types.boolean, args[0], na_type)
elif isinstance(args[1], MaskedType) and isinstance(args[0], NAType):
return nb_signature(types.boolean, na_type, args[1])
@cuda_decl_registry.register_global(operator.truth)
| MaskedScalarIsNull |
python | dask__dask | dask/dataframe/dask_expr/_categorical.py | {
"start": 549,
"end": 4716
} | class ____(Accessor):
"""
Accessor object for categorical properties of the Series values.
Examples
--------
>>> s.cat.categories # doctest: +SKIP
Notes
-----
Attributes that depend only on metadata are eager
* categories
* ordered
Attributes depending on the entire dataset are lazy
* codes
* ...
So `df.a.cat.categories` <=> `df.a._meta.cat.categories`
So `df.a.cat.codes` <=> `df.a.map_partitions(lambda x: x.cat.codes)`
"""
_accessor_name = "cat"
_accessor_methods = (
"add_categories",
"as_ordered",
"as_unordered",
"remove_categories",
"rename_categories",
"reorder_categories",
"set_categories",
)
_accessor_properties = ()
@property
def known(self):
"""Whether the categories are fully known"""
return has_known_categories(self._series)
def as_known(self, **kwargs):
"""Ensure the categories in this series are known.
If the categories are known, this is a no-op. If unknown, the
categories are computed, and a new series with known categories is
returned.
Parameters
----------
kwargs
Keywords to pass on to the call to `compute`.
"""
if self.known:
return self._series
from dask.dataframe.dask_expr._collection import new_collection
categories = (
new_collection(PropertyMap(self._series, "cat", "categories"))
.unique()
.compute()
)
return self.set_categories(categories.values)
def as_unknown(self):
"""Ensure the categories in this series are unknown"""
if not self.known:
return self._series
from dask.dataframe.dask_expr import new_collection
return new_collection(AsUnknown(self._series))
@property
def ordered(self):
"""Whether the categories have an ordered relationship"""
return self._delegate_property(self._series._meta, "cat", "ordered")
@property
def categories(self):
"""The categories of this categorical.
If categories are unknown, an error is raised"""
if not self.known:
msg = (
"`df.column.cat.categories` with unknown categories is not "
"supported. Please use `column.cat.as_known()` or "
"`df.categorize()` beforehand to ensure known categories"
)
raise AttributeNotImplementedError(msg)
return self._delegate_property(self._series._meta, "cat", "categories")
@property
def codes(self):
"""The codes of this categorical.
If categories are unknown, an error is raised"""
if not self.known:
msg = (
"`df.column.cat.codes` with unknown categories is not "
"supported. Please use `column.cat.as_known()` or "
"`df.categorize()` beforehand to ensure known categories"
)
raise AttributeNotImplementedError(msg)
from dask.dataframe.dask_expr._collection import new_collection
return new_collection(PropertyMap(self._series, "cat", "codes"))
def remove_unused_categories(self):
"""
Removes categories which are not used
Notes
-----
This method requires a full scan of the data to compute the
unique values, which can be expensive.
"""
# get the set of used categories
present = self._series.dropna().unique()
present = pd.Index(present.compute())
if isinstance(self._series._meta, pd.CategoricalIndex):
meta_cat = self._series._meta
else:
meta_cat = self._series._meta.cat
# Reorder to keep cat:code relationship, filtering unused (-1)
ordered, mask = present.reindex(meta_cat.categories)
if mask is None:
# PANDAS-23963: old and new categories match.
return self._series
new_categories = ordered[mask != -1]
return self.set_categories(new_categories)
| CategoricalAccessor |
python | fastapi__sqlmodel | docs_src/tutorial/automatic_id_none_refresh/tutorial002.py | {
"start": 92,
"end": 2438
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") # (1)!
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") # (2)!
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48) # (3)!
print("Before interacting with the database") # (4)!
print("Hero 1:", hero_1) # (5)!
print("Hero 2:", hero_2) # (6)!
print("Hero 3:", hero_3) # (7)!
with Session(engine) as session: # (8)!
session.add(hero_1) # (9)!
session.add(hero_2) # (10)!
session.add(hero_3) # (11)!
print("After adding to the session") # (12)!
print("Hero 1:", hero_1) # (13)!
print("Hero 2:", hero_2) # (14)!
print("Hero 3:", hero_3) # (15)!
session.commit() # (16)!
print("After committing the session") # (17)!
print("Hero 1:", hero_1) # (18)!
print("Hero 2:", hero_2) # (19)!
print("Hero 3:", hero_3) # (20)!
print("After committing the session, show IDs") # (21)!
print("Hero 1 ID:", hero_1.id) # (22)!
print("Hero 2 ID:", hero_2.id) # (23)!
print("Hero 3 ID:", hero_3.id) # (24)!
print("After committing the session, show names") # (25)!
print("Hero 1 name:", hero_1.name) # (26)!
print("Hero 2 name:", hero_2.name) # (27)!
print("Hero 3 name:", hero_3.name) # (28)!
session.refresh(hero_1) # (29)!
session.refresh(hero_2) # (30)!
session.refresh(hero_3) # (31)!
print("After refreshing the heroes") # (32)!
print("Hero 1:", hero_1) # (33)!
print("Hero 2:", hero_2) # (34)!
print("Hero 3:", hero_3) # (35)!
# (36)!
print("After the session closes") # (37)!
print("Hero 1:", hero_1) # (38)!
print("Hero 2:", hero_2) # (39)!
print("Hero 3:", hero_3) # (40)!
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
| Hero |
python | python__mypy | mypy/copytype.py | {
"start": 880,
"end": 4480
} | class ____(TypeVisitor[ProperType]):
def visit_unbound_type(self, t: UnboundType) -> ProperType:
return t
def visit_any(self, t: AnyType) -> ProperType:
return self.copy_common(t, AnyType(t.type_of_any, t.source_any, t.missing_import_name))
def visit_none_type(self, t: NoneType) -> ProperType:
return self.copy_common(t, NoneType())
def visit_uninhabited_type(self, t: UninhabitedType) -> ProperType:
dup = UninhabitedType()
dup.ambiguous = t.ambiguous
return self.copy_common(t, dup)
def visit_erased_type(self, t: ErasedType) -> ProperType:
return self.copy_common(t, ErasedType())
def visit_deleted_type(self, t: DeletedType) -> ProperType:
return self.copy_common(t, DeletedType(t.source))
def visit_instance(self, t: Instance) -> ProperType:
dup = Instance(t.type, t.args, last_known_value=t.last_known_value)
dup.invalid = t.invalid
return self.copy_common(t, dup)
def visit_type_var(self, t: TypeVarType) -> ProperType:
return self.copy_common(t, t.copy_modified())
def visit_param_spec(self, t: ParamSpecType) -> ProperType:
dup = ParamSpecType(
t.name, t.fullname, t.id, t.flavor, t.upper_bound, t.default, prefix=t.prefix
)
return self.copy_common(t, dup)
def visit_parameters(self, t: Parameters) -> ProperType:
dup = Parameters(
t.arg_types,
t.arg_kinds,
t.arg_names,
variables=t.variables,
is_ellipsis_args=t.is_ellipsis_args,
)
return self.copy_common(t, dup)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> ProperType:
dup = TypeVarTupleType(
t.name, t.fullname, t.id, t.upper_bound, t.tuple_fallback, t.default
)
return self.copy_common(t, dup)
def visit_unpack_type(self, t: UnpackType) -> ProperType:
dup = UnpackType(t.type)
return self.copy_common(t, dup)
def visit_partial_type(self, t: PartialType) -> ProperType:
return self.copy_common(t, PartialType(t.type, t.var, t.value_type))
def visit_callable_type(self, t: CallableType) -> ProperType:
return self.copy_common(t, t.copy_modified())
def visit_tuple_type(self, t: TupleType) -> ProperType:
return self.copy_common(t, TupleType(t.items, t.partial_fallback, implicit=t.implicit))
def visit_typeddict_type(self, t: TypedDictType) -> ProperType:
return self.copy_common(
t, TypedDictType(t.items, t.required_keys, t.readonly_keys, t.fallback)
)
def visit_literal_type(self, t: LiteralType) -> ProperType:
return self.copy_common(t, LiteralType(value=t.value, fallback=t.fallback))
def visit_union_type(self, t: UnionType) -> ProperType:
return self.copy_common(t, UnionType(t.items))
def visit_overloaded(self, t: Overloaded) -> ProperType:
return self.copy_common(t, Overloaded(items=t.items))
def visit_type_type(self, t: TypeType) -> ProperType:
# Use cast since the type annotations in TypeType are imprecise.
return self.copy_common(t, TypeType(cast(Any, t.item), is_type_form=t.is_type_form))
def visit_type_alias_type(self, t: TypeAliasType) -> ProperType:
assert False, "only ProperTypes supported"
def copy_common(self, t: ProperType, t2: ProperType) -> ProperType:
t2.line = t.line
t2.column = t.column
t2.can_be_false = t.can_be_false
t2.can_be_true = t.can_be_true
return t2
| TypeShallowCopier |
python | Lightning-AI__lightning | tests/tests_pytorch/trainer/test_dataloaders.py | {
"start": 1950,
"end": 2255
} | class ____(BoringModel):
def val_dataloader(self):
return [DataLoader(RandomDataset(32, 64)), DataLoader(RandomDataset(32, 64), batch_size=8)]
def validation_step(self, batch, batch_idx, dataloader_idx):
return super().validation_step(batch, batch_idx)
| MultiValDataLoaderBoringModel |
python | langchain-ai__langchain | libs/standard-tests/langchain_tests/integration_tests/chat_models.py | {
"start": 2250,
"end": 2657
} | class ____(BaseCallbackHandler):
options: list[dict | None]
def __init__(self) -> None:
super().__init__()
self.options = []
@override
def on_chat_model_start(
self,
serialized: Any,
messages: Any,
*,
options: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
self.options.append(options)
| _TestCallbackHandler |
python | dagster-io__dagster | python_modules/libraries/dagster-snowflake-pyspark/dagster_snowflake_pyspark/snowflake_pyspark_type_handler.py | {
"start": 7514,
"end": 11157
} | class ____(SnowflakeIOManager):
"""An I/O manager definition that reads inputs from and writes PySpark DataFrames to Snowflake. When
using the SnowflakePySparkIOManager, any inputs and outputs without type annotations will be loaded
as PySpark DataFrames.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_snowflake_pyspark import SnowflakePySparkIOManager
from pyspark.sql import DataFrame
from dagster import Definitions, EnvVar
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_table() -> DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={
"io_manager": SnowflakePySparkIOManager(
database="my_database",
warehouse="my_warehouse", # required for SnowflakePySparkIOManager
account=EnvVar("SNOWFLAKE_ACCOUNT"),
password=EnvVar("SNOWFLAKE_PASSWORD"),
...
)
}
)
Note that the warehouse configuration value is required when using the SnowflakePySparkIOManager
You can set a default schema to store the assets using the ``schema`` configuration value of the Snowflake I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
Definitions(
assets=[my_table]
resources={
"io_manager" SnowflakePySparkIOManager(database="my_database", schema="my_schema", ...)
}
)
On individual assets, you an also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"] # will be used as the schema in snowflake
)
def my_table() -> DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_other_table() -> DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: DataFrame) -> DataFrame:
# my_table will just contain the data from column "a"
...
"""
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePySparkTypeHandler()]
@staticmethod
def default_load_type() -> Optional[type]:
return DataFrame
| SnowflakePySparkIOManager |
python | jazzband__django-polymorphic | example/pexp/models.py | {
"start": 1409,
"end": 1517
} | class ____(ShowFieldTypeAndContent, PolymorphicModel):
field1 = models.CharField(max_length=10)
| TestModelA |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/unit_tests/test_spec_processing.py | {
"start": 405,
"end": 6139
} | class ____(BaseModel):
asset_attributes: Sequence[AssetPostProcessorModel] = []
defs = dg.Definitions(
assets=[
dg.AssetSpec("a", group_name="g1"),
dg.AssetSpec("b", group_name="g2"),
dg.AssetSpec("c", group_name="g2", tags={"tag": "val"}),
],
)
def test_replace_attributes() -> None:
op = AssetPostProcessorModel.model()(
operation="replace",
target="group:g2",
attributes={"tags": {"newtag": "newval"}},
)
newdefs = apply_post_processor_to_defs(model=op, defs=defs, context=ResolutionContext.default())
asset_graph = newdefs.resolve_asset_graph()
assert asset_graph.get(dg.AssetKey("a")).tags == {}
assert asset_graph.get(dg.AssetKey("b")).tags == {"newtag": "newval"}
assert asset_graph.get(dg.AssetKey("c")).tags == {"newtag": "newval"}
def test_merge_attributes() -> None:
op = AssetPostProcessorModel.model()(
operation="merge",
target="group:g2",
attributes={"tags": {"newtag": "newval"}},
)
newdefs = apply_post_processor_to_defs(model=op, defs=defs, context=ResolutionContext.default())
asset_graph = newdefs.resolve_asset_graph()
assert asset_graph.get(dg.AssetKey("a")).tags == {}
assert asset_graph.get(dg.AssetKey("b")).tags == {"newtag": "newval"}
assert asset_graph.get(dg.AssetKey("c")).tags == {"tag": "val", "newtag": "newval"}
def test_render_attributes_asset_context() -> None:
op = AssetPostProcessorModel.model()(
attributes={"tags": {"group_name_tag": "group__{{ asset.group_name }}"}}
)
newdefs = apply_post_processor_to_defs(model=op, defs=defs, context=ResolutionContext.default())
asset_graph = newdefs.resolve_asset_graph()
assert asset_graph.get(dg.AssetKey("a")).tags == {"group_name_tag": "group__g1"}
assert asset_graph.get(dg.AssetKey("b")).tags == {"group_name_tag": "group__g2"}
assert asset_graph.get(dg.AssetKey("c")).tags == {"tag": "val", "group_name_tag": "group__g2"}
def test_render_attributes_custom_context() -> None:
op = AssetPostProcessorModel.model()(
operation="replace",
target="group:g2",
attributes={
"tags": {"a": "{{ foo }}", "b": "prefix_{{ foo }}"},
"metadata": "{{ metadata }}",
"automation_condition": "{{ custom_cron('@daily') }}",
},
)
def _custom_cron(s):
return AutomationCondition.cron_tick_passed(s) & ~AutomationCondition.in_progress()
metadata = {"a": 1, "b": "str", "d": 1.23}
newdefs = apply_post_processor_to_defs(
model=op,
defs=defs,
context=ResolutionContext.default().with_scope(
foo="theval", metadata=metadata, custom_cron=_custom_cron
),
)
asset_graph = newdefs.resolve_asset_graph()
assert asset_graph.get(dg.AssetKey("a")).tags == {}
assert asset_graph.get(dg.AssetKey("a")).metadata == {}
assert asset_graph.get(dg.AssetKey("a")).automation_condition is None
for k in ["b", "c"]:
node = asset_graph.get(dg.AssetKey(k))
assert node.tags == {"a": "theval", "b": "prefix_theval"}
assert node.metadata == metadata
assert node.automation_condition == _custom_cron("@daily")
@pytest.mark.parametrize(
"python,expected",
[
# default to merge and a * target
(
{"attributes": {"tags": {"a": "b"}}},
AssetPostProcessorModel.model()(target="*", attributes={"tags": {"a": "b"}}),
),
(
{"operation": "replace", "attributes": {"tags": {"a": "b"}}},
AssetPostProcessorModel.model()(
operation="replace",
target="*",
attributes={"tags": {"a": "b"}},
),
),
# explicit target
(
{"attributes": {"tags": {"a": "b"}}, "target": "group:g2"},
AssetPostProcessorModel.model()(
target="group:g2",
attributes={"tags": {"a": "b"}},
),
),
(
{"operation": "replace", "attributes": {"tags": {"a": "b"}}, "target": "group:g2"},
AssetPostProcessorModel.model()(
operation="replace",
target="group:g2",
attributes={"tags": {"a": "b"}},
),
),
],
)
def test_load_attributes(python, expected) -> None:
loaded = TypeAdapter(Sequence[AssetPostProcessorModel.model()]).validate_python([python])
assert len(loaded) == 1
assert loaded[0] == expected
def test_prefixing():
prefix = ["sweet_prefix"]
translated = TranslatorResolvingInfo(
resolution_context=ResolutionContext.default(),
asset_attributes=dg.AssetAttributesModel(
key_prefix=prefix,
),
).get_asset_spec(dg.AssetSpec("a"), {})
assert translated.key.has_prefix(prefix)
def test_key_set():
spec = dg.AssetSpec("a")
translated = TranslatorResolvingInfo(
resolution_context=ResolutionContext.default(),
asset_attributes=dg.AssetAttributesModel(
key="{{ spec.key.to_user_string() + '_key' }}",
),
).get_asset_spec(spec, {"spec": spec})
assert translated.key.to_user_string().endswith("_key")
def test_key_and_prefix():
prefix = ["sweet_prefix"]
spec = dg.AssetSpec("a")
translated = TranslatorResolvingInfo(
resolution_context=ResolutionContext.default(),
asset_attributes=dg.AssetAttributesModel(
key="{{ spec.key.to_user_string() + '_key' }}",
key_prefix=prefix,
),
).get_asset_spec(spec, {"spec": spec})
assert translated.key.to_user_string().endswith("_key")
assert translated.key.has_prefix(prefix)
| M |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 41273,
"end": 45550
} | class ____(Operation):
def __init__(
self,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
*,
name=None,
):
super().__init__(name=name)
self.strides = strides
self.padding = padding.lower()
self.data_format = data_format
self.dilation_rate = dilation_rate
def call(self, inputs, depthwise_kernel, pointwise_kernel):
return backend.nn.separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
self.strides,
self.padding,
self.data_format,
self.dilation_rate,
)
def compute_output_spec(self, inputs, depthwise_kernel, pointwise_kernel):
output_shape = list(
depthwise_conv(
inputs,
depthwise_kernel,
self.strides,
self.padding,
self.data_format,
self.dilation_rate,
).shape
)
if self.data_format == "channels_last":
output_shape[-1] = pointwise_kernel.shape[-1]
else:
output_shape[1] = pointwise_kernel.shape[-1]
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_export(
[
"keras.ops.separable_conv",
"keras.ops.nn.separable_conv",
]
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
"""General N-D separable convolution.
This ops supports 1D and 2D separable convolution. `separable_conv` is
a depthwise conv followed by a pointwise conv.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,) + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`.
depthwise_kernel: Tensor of rank N+2. `depthwise_kernel` has shape
[kernel_spatial_shape, num_input_channels, num_channels_multiplier],
`num_input_channels` should match the number of channels in
`inputs`.
pointwise_kernel: Tensor of rank N+2. `pointwise_kernel` has shape
`(*ones_like(kernel_spatial_shape),
num_input_channels * num_channels_multiplier, num_output_channels)`.
strides: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the strides of the convolution along each spatial
dimension. If `strides` is int, then every spatial dimension shares
the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the dilation rate to use for dilated convolution. If
`dilation_rate` is int, then every spatial dimension shares
the same `dilation_rate`.
Returns:
A tensor of rank N+2, the result of the depthwise conv operation.
"""
data_format = standardize_data_format(data_format)
padding = padding.lower()
if any_symbolic_tensors((inputs,)):
return SeparableConv(
strides,
padding,
data_format,
dilation_rate,
).symbolic_call(inputs, depthwise_kernel, pointwise_kernel)
return backend.nn.separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
| SeparableConv |
python | walkccc__LeetCode | solutions/3555. Smallest Subarray to Sort in Every Sliding Window/3555.py | {
"start": 0,
"end": 404
} | class ____:
def minSubarraySort(self, nums: list[int], k):
ans = []
for i in range(len(nums) - k + 1):
window = nums[i:i+k]
sortedWindow = sorted(window)
l = 0
r = k - 1
while l < k and window[l] == sortedWindow[l]:
l += 1
while r >= 0 and window[r] == sortedWindow[r]:
r -= 1
ans.append(0 if l > r else r - l + 1)
return ans
| Solution |
python | astropy__astropy | astropy/utils/iers/tests/test_iers.py | {
"start": 8857,
"end": 21634
} | class ____:
def setup_class(self):
"""Set up useful data for the tests."""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = get_pkg_data_filename(
os.path.join("data", "finals2000A-2016-02-30-test")
)
self.iers_a_file_2 = get_pkg_data_filename(
os.path.join("data", "finals2000A-2016-04-30-test")
)
self.iers_a_url_1 = Path(self.iers_a_file_1).as_uri()
self.iers_a_url_2 = Path(self.iers_a_file_2).as_uri()
self.t = Time.now() + TimeDelta(10, format="jd") * np.arange(self.N)
# This group of tests requires auto downloading to be on
self._auto_download = iers.conf.auto_download
iers.conf.auto_download = True
# auto_download = False is tested in test_IERS_B_parameters_loading_into_IERS_Auto()
def teardown_class(self):
# Restore the auto downloading setting
iers.conf.auto_download = self._auto_download
def teardown_method(self, method):
"""Run this after every test."""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("iers_auto_url_mirror", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", self.ame):
with pytest.raises(
ValueError,
match=re.escape(iers.INTERPOLATE_ERROR.format(self.ame)),
):
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter("ignore", iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227] * self.N) * u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced."""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", 5.0):
with pytest.raises(
ValueError,
match=(
r"IERS auto_max_age configuration value must be larger than 10"
r" days"
),
):
iers_table = iers.IERS_Auto.open()
_ = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
def test_simple(self):
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat["MJD"][0] == 57359.0 * u.d
assert dat["MJD"][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta["predictive_mjd"]
dat._time_now = Time(predictive_mjd, format="mjd") + 7 * u.d
# Look at times before and after the test file begins. 0.1292934 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1292934
)
assert np.allclose(
dat.ut1_utc(Time(60000, format="mjd").jd).value, -0.2246227
)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format="mjd") + 60 * u.d
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1292934
)
with (
pytest.warns(
iers.IERSStaleWarning, match="IERS_Auto predictive values are older"
) as warns,
pytest.raises(
ValueError,
match="interpolating from IERS_Auto using predictive values",
),
):
dat.ut1_utc(Time(60000, format="mjd").jd)
assert len(warns) == 1
# Confirm that disabling the download means no warning because there is no
# refresh to even fail, but there will still be the interpolation error
with (
iers.conf.set_temp("auto_download", False),
pytest.raises(
ValueError,
match="interpolating from IERS_Auto using predictive values that are more",
),
):
dat.ut1_utc(Time(60000, format="mjd").jd)
# Warning only (i.e., no exception) if we are getting return status
with pytest.warns(
iers.IERSStaleWarning, match="IERS_Auto predictive values are older"
):
dat.ut1_utc(Time(60000, format="mjd").jd, return_status=True)
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp("auto_max_age", None):
dat.ut1_utc(Time(60000, format="mjd").jd)
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1292934
)
assert np.allclose(dat.ut1_utc(Time(60000, format="mjd").jd).value, -0.3)
# Now the time range should be different.
assert dat["MJD"][0] == 57359.0 * u.d
assert dat["MJD"][-1] == (57539.0 + 60) * u.d
@pytest.mark.parametrize("query", ["ut1_utc", "pm_xy"])
@pytest.mark.parametrize("jd", [np.array([]), Time([], format="mjd")])
@pytest.mark.parametrize("return_status", [False, True])
def test_empty_mjd(query, jd, return_status):
# Regression test for gh-17008
iers_table = iers.IERS_Auto.open()
result = getattr(iers_table, query)(jd, return_status=return_status)
n_exp = (1 if query == "ut1_utc" else 2) + (1 if return_status else 0)
if n_exp == 1:
assert isinstance(result, np.ndarray)
assert result.size == 0
else:
assert len(result) == n_exp
assert all(r.size == 0 for r in result)
def test_IERS_B_parameters_loading_into_IERS_Auto():
# Make sure that auto downloading is off
with iers.conf.set_temp("auto_download", False):
A = iers.IERS_Auto.open()
B = iers.IERS_B.open()
ok_A = A["MJD"] <= B["MJD"][-1]
assert not np.all(ok_A), "IERS B covers all of IERS A: should not happen"
# We only overwrite IERS_B values in the IERS_A table that were already
# there in the first place. Better take that into account.
ok_A &= np.isfinite(A["UT1_UTC_B"])
i_B = np.searchsorted(B["MJD"], A["MJD"][ok_A])
assert np.all(np.diff(i_B) == 1), "Valid region not contiguous"
assert np.all(A["MJD"][ok_A] == B["MJD"][i_B])
# Check that values are copied correctly. Since units are not
# necessarily the same, we use allclose with very strict tolerance.
for name in ("UT1_UTC", "PM_x", "PM_y", "dX_2000A", "dY_2000A"):
assert_quantity_allclose(
A[name][ok_A],
B[name][i_B],
rtol=1e-15,
err_msg=(
f"Bug #9206 IERS B parameter {name} not copied over "
"correctly to IERS Auto"
),
)
# Issue with FTP, rework test into previous one when it's fixed
@pytest.mark.skipif(CI, reason="Flaky on CI")
@pytest.mark.remote_data
def test_iers_a_dl():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert "UT1_UTC_A" in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_a_dl_mirror():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL_MIRROR, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert "UT1_UTC_A" in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_b_dl():
iersb_tab = iers.IERS_B.open(iers.IERS_B_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersb_tab) > 0
assert "UT1_UTC" in iersb_tab.colnames
finally:
iers.IERS_B.close()
def test_iers_b_out_of_range_handling():
# The following error/warning applies only to IERS_B, not to the default IERS_Auto
with iers.earth_orientation_table.set(iers.IERS_B.open()):
now = Time.now()
# Should be fine with bundled IERS-B
(now - 300 * u.day).ut1
# Default is to raise an error
match = r"\(some\) times are outside of range covered by IERS table"
with pytest.raises(iers.IERSRangeError, match=match):
(now + 100 * u.day).ut1
with iers.conf.set_temp("iers_degraded_accuracy", "warn"):
with pytest.warns(iers.IERSDegradedAccuracyWarning, match=match):
(now + 100 * u.day).ut1
with iers.conf.set_temp("iers_degraded_accuracy", "ignore"):
(now + 100 * u.day).ut1
@pytest.mark.remote_data
def test_iers_download_error_handling(tmp_path):
# Make sure an IERS-A table isn't already loaded
with set_temp_cache(tmp_path), iers.conf.set_temp("auto_download", True):
iers.IERS_A.close()
iers.IERS_Auto.close()
iers.IERS.close()
now = Time.now()
# bad site name
with iers.conf.set_temp("iers_auto_url", "FAIL FAIL"):
# site that exists but doesn't have IERS data
with iers.conf.set_temp("iers_auto_url_mirror", "https://google.com"):
with pytest.warns(iers.IERSWarning) as record:
with iers.conf.set_temp("iers_degraded_accuracy", "ignore"):
(now + 400 * u.day).ut1
assert len(record) == 3
assert str(record[0].message).startswith(
"failed to download FAIL FAIL: Malformed URL"
)
assert str(record[1].message).startswith(
"malformed IERS table from https://google.com"
)
assert str(record[2].message).startswith(
"unable to download valid IERS file, using bundled IERS-A"
)
OLD_DATA_FILES = {
"Leap_Second.dat": IERS_LEAP_SECOND_FILE,
"ReadMe.finals2000A": IERS_A_README,
"ReadMe.eopc04": IERS_B_README,
"eopc04.1962-now": IERS_B_FILE,
}
@pytest.mark.parametrize("data_file", sorted(OLD_DATA_FILES))
def test_get_pkg_data_filename_backcompat(data_file):
# Check that get_pkg_data_filename continues to work without breakage
# if users use it to access IERS tables and READMEs that used to be in
# astropy/utils/iers/data.
with pytest.warns(
AstropyDeprecationWarning,
match=f"Accessing {data_file} in this way is deprecated",
):
filename = get_pkg_data_filename(
"data/" + data_file, package="astropy.utils.iers"
)
assert filename == OLD_DATA_FILES[data_file]
| TestIERS_Auto |
python | aio-libs__aiohttp | aiohttp/payload.py | {
"start": 1108,
"end": 1505
} | class ____(str, enum.Enum):
normal = "normal"
try_first = "try_first"
try_last = "try_last"
def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload":
return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
def register_payload(
factory: type["Payload"], type: Any, *, order: Order = Order.normal
) -> None:
PAYLOAD_REGISTRY.register(factory, type, order=order)
| Order |
python | huggingface__transformers | src/transformers/models/openai/modeling_openai.py | {
"start": 11683,
"end": 17050
} | class ____(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)])
self.register_buffer("position_ids", torch.arange(config.n_positions), persistent=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrix from position and token embeddings
position_ids = self.position_ids[None, : input_shape[-1]]
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, attention_mask, output_attentions=output_attentions)
hidden_states = outputs[0]
if output_attentions:
all_attentions = all_attentions + (outputs[1],)
hidden_states = hidden_states.view(*output_shape)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
@auto_docstring(
custom_intro="""
OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
"""
)
| OpenAIGPTModel |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/root.py | {
"start": 426,
"end": 1696
} | class ____(ConfigurableClass):
def __init__(self, base_dir: str, inst_data: Optional[ConfigurableClassData] = None):
self._base_dir = base_dir
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@property
def base_dir(self) -> str:
return self._base_dir
def file_manager_dir(self, run_id: str) -> str:
check.str_param(run_id, "run_id")
return os.path.join(self.base_dir, "storage", run_id, "files")
@property
def storage_dir(self) -> str:
return os.path.join(self.base_dir, "storage")
@property
def schedules_dir(self) -> str:
return os.path.join(self.base_dir, "schedules")
@classmethod
def from_config_value( # pyright: ignore[reportIncompatibleMethodOverride]
cls, inst_data: Optional[ConfigurableClassData], config_value: LocalArtifactStorageConfig
) -> "LocalArtifactStorage":
return LocalArtifactStorage(inst_data=inst_data, **config_value)
@classmethod
def config_type(cls) -> UserConfigSchema:
return {"base_dir": StringSource}
def dispose(self):
pass
| LocalArtifactStorage |
python | facelessuser__soupsieve | tests/test_level2/test_first_child.py | {
"start": 55,
"end": 738
} | class ____(util.TestCase):
"""Test first child selector."""
def test_first_child(self):
"""Test first child."""
self.assert_selector(
"""
<div id="div">
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
""",
"span:first-child",
["1", "4"],
flags=util.HTML
)
| TestFirstChild |
python | spack__spack | .github/workflows/bin/format-rst.py | {
"start": 1992,
"end": 2257
} | class ____:
def __init__(self, path: str, line: int, message: str) -> None:
self.path = path
self.line = line
self.message = message
def __str__(self) -> str:
return _warning(f"{self.path}:{self.line}: {self.message}")
| Warning |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 49556,
"end": 66059
} | class ____(QueryTest, AssertsCompiledSQL):
"""test sql.Comparator implementation for MapperProperties"""
__dialect__ = "default"
def _test(self, clause, expected, entity=None, checkparams=None):
dialect = default.DefaultDialect()
if entity is not None:
# specify a lead entity, so that when we are testing
# correlation, the correlation actually happens
sess = fixture_session()
lead = sess.query(entity)
context = lead._compile_context()
context.compile_state.statement._label_style = (
LABEL_STYLE_TABLENAME_PLUS_COL
)
lead = context.compile_state.statement.compile(dialect=dialect)
expected = (str(lead) + " WHERE " + expected).replace("\n", "")
clause = sess.query(entity).filter(clause)
self.assert_compile(clause, expected, checkparams=checkparams)
def _test_filter_aliases(
self, clause, expected, from_, onclause, checkparams=None
):
dialect = default.DefaultDialect()
sess = fixture_session()
lead = sess.query(from_).join(onclause, aliased=True)
full = lead.filter(clause)
context = lead._compile_context()
context.statement._label_style = LABEL_STYLE_TABLENAME_PLUS_COL
lead = context.statement.compile(dialect=dialect)
expected = (str(lead) + " WHERE " + expected).replace("\n", "")
self.assert_compile(full, expected, checkparams=checkparams)
@testing.combinations(
(operators.add, "+"),
(operators.mul, "*"),
(operators.sub, "-"),
argnames="py_op, sql_op",
id_="ar",
)
@testing.combinations(
(lambda User: 5, lambda User: User.id, ":id_1 %s users.id"),
(lambda: 5, lambda: literal(6), ":param_1 %s :param_2"),
(lambda User: User.id, lambda: 5, "users.id %s :id_1"),
(lambda User: User.id, lambda: literal("b"), "users.id %s :param_1"),
(lambda User: User.id, lambda User: User.id, "users.id %s users.id"),
(lambda: literal(5), lambda: "b", ":param_1 %s :param_2"),
(lambda: literal(5), lambda User: User.id, ":param_1 %s users.id"),
(lambda: literal(5), lambda: literal(6), ":param_1 %s :param_2"),
argnames="lhs, rhs, res",
id_="aar",
)
def test_arithmetic(self, py_op, sql_op, lhs, rhs, res):
User = self.classes.User
lhs = testing.resolve_lambda(lhs, User=User)
rhs = testing.resolve_lambda(rhs, User=User)
fixture_session().query(User)
self._test(py_op(lhs, rhs), res % sql_op)
@testing.combinations(
(operators.lt, "<", ">"),
(operators.gt, ">", "<"),
(operators.eq, "=", "="),
(operators.ne, "!=", "!="),
(operators.le, "<=", ">="),
(operators.ge, ">=", "<="),
id_="arr",
argnames="py_op, fwd_op, rev_op",
)
@testing.lambda_combinations(
lambda User, ualias: (
("a", User.id, ":id_1", "users.id"),
("a", literal("b"), ":param_2", ":param_1"), # note swap!
(User.id, "b", "users.id", ":id_1"),
(User.id, literal("b"), "users.id", ":param_1"),
(User.id, User.id, "users.id", "users.id"),
(literal("a"), "b", ":param_1", ":param_2"),
(literal("a"), User.id, ":param_1", "users.id"),
(literal("a"), literal("b"), ":param_1", ":param_2"),
(ualias.id, literal("b"), "users_1.id", ":param_1"),
(User.id, ualias.name, "users.id", "users_1.name"),
(User.name, ualias.name, "users.name", "users_1.name"),
(ualias.name, User.name, "users_1.name", "users.name"),
),
argnames="fixture",
)
def test_comparison(self, py_op, fwd_op, rev_op, fixture):
User = self.classes.User
fixture_session().query(User)
ualias = aliased(User)
lhs, rhs, l_sql, r_sql = fixture(User=User, ualias=ualias)
# the compiled clause should match either (e.g.):
# 'a' < 'b' -or- 'b' > 'a'.
compiled = str(
py_op(lhs, rhs).compile(dialect=default.DefaultDialect())
)
fwd_sql = "%s %s %s" % (l_sql, fwd_op, r_sql)
rev_sql = "%s %s %s" % (r_sql, rev_op, l_sql)
self.assert_(
compiled == fwd_sql or compiled == rev_sql,
"\n'"
+ compiled
+ "'\n does not match\n'"
+ fwd_sql
+ "'\n or\n'"
+ rev_sql
+ "'",
)
def test_o2m_compare_to_null(self):
User = self.classes.User
self._test(User.id == None, "users.id IS NULL") # noqa
self._test(User.id != None, "users.id IS NOT NULL") # noqa
self._test(~(User.id == None), "users.id IS NOT NULL") # noqa
self._test(~(User.id != None), "users.id IS NULL") # noqa
self._test(None == User.id, "users.id IS NULL") # noqa
self._test(~(None == User.id), "users.id IS NOT NULL") # noqa
def test_m2o_compare_to_null(self):
Address = self.classes.Address
self._test(Address.user == None, "addresses.user_id IS NULL") # noqa
self._test(
~(Address.user == None), "addresses.user_id IS NOT NULL" # noqa
)
self._test(
~(Address.user != None), "addresses.user_id IS NULL" # noqa
)
self._test(None == Address.user, "addresses.user_id IS NULL") # noqa
self._test(
~(None == Address.user), "addresses.user_id IS NOT NULL" # noqa
)
def test_o2m_compare_to_null_aliased(self):
User = self.classes.User
u1 = aliased(User)
self._test(u1.id == None, "users_1.id IS NULL") # noqa
self._test(u1.id != None, "users_1.id IS NOT NULL") # noqa
self._test(~(u1.id == None), "users_1.id IS NOT NULL") # noqa
self._test(~(u1.id != None), "users_1.id IS NULL") # noqa
def test_m2o_compare_to_null_aliased(self):
Address = self.classes.Address
a1 = aliased(Address)
self._test(a1.user == None, "addresses_1.user_id IS NULL") # noqa
self._test(
~(a1.user == None), "addresses_1.user_id IS NOT NULL" # noqa
)
self._test(a1.user != None, "addresses_1.user_id IS NOT NULL") # noqa
self._test(~(a1.user != None), "addresses_1.user_id IS NULL") # noqa
def test_relationship_unimplemented(self):
User = self.classes.User
for op in [
User.addresses.like,
User.addresses.ilike,
User.addresses.__le__,
User.addresses.__gt__,
]:
assert_raises(NotImplementedError, op, "x")
def test_o2m_any(self):
User, Address = self.classes.User, self.classes.Address
self._test(
User.addresses.any(Address.id == 17),
"EXISTS (SELECT 1 FROM addresses "
"WHERE users.id = addresses.user_id AND addresses.id = :id_1)",
entity=User,
)
def test_o2m_any_aliased(self):
User, Address = self.classes.User, self.classes.Address
u1 = aliased(User)
a1 = aliased(Address)
self._test(
u1.addresses.of_type(a1).any(a1.id == 17),
"EXISTS (SELECT 1 FROM addresses AS addresses_1 "
"WHERE users_1.id = addresses_1.user_id AND "
"addresses_1.id = :id_1)",
entity=u1,
)
def test_m2o_compare_instance(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
self._test(Address.user == u7, ":param_1 = addresses.user_id")
def test_m2o_compare_instance_negated(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
self._test(
Address.user != u7,
"addresses.user_id != :user_id_1 OR addresses.user_id IS NULL",
checkparams={"user_id_1": 7},
)
def test_m2o_compare_instance_negated_warn_on_none(self):
User, Address = self.classes.User, self.classes.Address
u7_transient = User(id=None)
with expect_warnings("Got None for value of column users.id; "):
self._test(
Address.user != u7_transient,
"addresses.user_id != :user_id_1 "
"OR addresses.user_id IS NULL",
checkparams={"user_id_1": None},
)
def test_m2o_compare_instance_aliased(self):
User, Address = self.classes.User, self.classes.Address
u7 = User(id=5)
attributes.instance_state(u7)._commit_all(attributes.instance_dict(u7))
u7.id = 7
u7_transient = User(id=7)
a1 = aliased(Address)
self._test(
a1.user == u7,
":param_1 = addresses_1.user_id",
checkparams={"param_1": 7},
)
self._test(
a1.user != u7,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
checkparams={"user_id_1": 7},
)
a1 = aliased(Address)
self._test(
a1.user == u7_transient,
":param_1 = addresses_1.user_id",
checkparams={"param_1": 7},
)
self._test(
a1.user != u7_transient,
"addresses_1.user_id != :user_id_1 OR addresses_1.user_id IS NULL",
checkparams={"user_id_1": 7},
)
def test_selfref_relationship(self):
Node = self.classes.Node
nalias = aliased(Node)
# auto self-referential aliasing
self._test(
Node.children.any(Node.data == "n1"),
"EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
"nodes.id = nodes_1.parent_id AND nodes_1.data = :data_1)",
entity=Node,
checkparams={"data_1": "n1"},
)
# needs autoaliasing
self._test(
Node.children == None, # noqa
"NOT (EXISTS (SELECT 1 FROM nodes AS nodes_1 "
"WHERE nodes.id = nodes_1.parent_id))",
entity=Node,
checkparams={},
)
self._test(
Node.parent == None, # noqa
"nodes.parent_id IS NULL",
checkparams={},
)
self._test(
nalias.parent == None, # noqa
"nodes_1.parent_id IS NULL",
checkparams={},
)
self._test(
nalias.parent != None, # noqa
"nodes_1.parent_id IS NOT NULL",
checkparams={},
)
self._test(
nalias.children == None, # noqa
"NOT (EXISTS ("
"SELECT 1 FROM nodes WHERE nodes_1.id = nodes.parent_id))",
entity=nalias,
checkparams={},
)
self._test(
nalias.children.any(Node.data == "some data"),
"EXISTS (SELECT 1 FROM nodes WHERE "
"nodes_1.id = nodes.parent_id AND nodes.data = :data_1)",
entity=nalias,
checkparams={"data_1": "some data"},
)
# this fails because self-referential any() is auto-aliasing;
# the fact that we use "nalias" here means we get two aliases.
# self._test(
# Node.children.any(nalias.data == 'some data'),
# "EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
# "nodes.id = nodes_1.parent_id AND nodes_1.data = :data_1)",
# entity=Node
# )
self._test(
nalias.parent.has(Node.data == "some data"),
"EXISTS (SELECT 1 FROM nodes WHERE nodes.id = nodes_1.parent_id "
"AND nodes.data = :data_1)",
entity=nalias,
checkparams={"data_1": "some data"},
)
self._test(
Node.parent.has(Node.data == "some data"),
"EXISTS (SELECT 1 FROM nodes AS nodes_1 WHERE "
"nodes_1.id = nodes.parent_id AND nodes_1.data = :data_1)",
entity=Node,
checkparams={"data_1": "some data"},
)
self._test(
Node.parent == Node(id=7),
":param_1 = nodes.parent_id",
checkparams={"param_1": 7},
)
self._test(
nalias.parent == Node(id=7),
":param_1 = nodes_1.parent_id",
checkparams={"param_1": 7},
)
self._test(
nalias.parent != Node(id=7),
"nodes_1.parent_id != :parent_id_1 "
"OR nodes_1.parent_id IS NULL",
checkparams={"parent_id_1": 7},
)
self._test(
nalias.parent != Node(id=7),
"nodes_1.parent_id != :parent_id_1 "
"OR nodes_1.parent_id IS NULL",
checkparams={"parent_id_1": 7},
)
self._test(
nalias.children.contains(Node(id=7, parent_id=12)),
"nodes_1.id = :param_1",
checkparams={"param_1": 12},
)
def test_multilevel_any(self):
User, Address, Dingaling = (
self.classes.User,
self.classes.Address,
self.classes.Dingaling,
)
sess = fixture_session()
q = sess.query(User).filter(
User.addresses.any(
and_(Address.id == Dingaling.address_id, Dingaling.data == "x")
)
)
# new since #2746 - correlate_except() now takes context into account
# so its usage in any() is not as disrupting.
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"WHERE EXISTS (SELECT 1 "
"FROM addresses, dingalings "
"WHERE users.id = addresses.user_id AND "
"addresses.id = dingalings.address_id AND "
"dingalings.data = :data_1)",
)
def test_op(self):
User = self.classes.User
self._test(User.name.op("ilike")("17"), "users.name ilike :name_1")
def test_in(self):
User = self.classes.User
self._test(
User.id.in_(["a", "b"]), "users.id IN (__[POSTCOMPILE_id_1])"
)
def test_in_on_relationship_not_supported(self):
User, Address = self.classes.User, self.classes.Address
assert_raises(NotImplementedError, Address.user.in_, [User(id=5)])
def test_in_instrumented_attribute(self):
"""test #12019"""
User = self.classes.User
self._test(
User.id.in_([User.id, User.name]),
"users.id IN (users.id, users.name)",
)
def test_neg(self):
User = self.classes.User
self._test(-User.id, "-users.id")
self._test(User.id + -User.id, "users.id + -users.id")
def test_between(self):
User = self.classes.User
self._test(
User.id.between("a", "b"), "users.id BETWEEN :id_1 AND :id_2"
)
def test_collate(self):
User = self.classes.User
self._test(
collate(User.name, "utf8_bin"), "users.name COLLATE utf8_bin"
)
self._test(
User.name.collate("utf8_bin"), "users.name COLLATE utf8_bin"
)
def test_selfref_between(self):
User = self.classes.User
ualias = aliased(User)
self._test(
User.id.between(ualias.id, ualias.id),
"users.id BETWEEN users_1.id AND users_1.id",
)
self._test(
ualias.id.between(User.id, User.id),
"users_1.id BETWEEN users.id AND users.id",
)
def test_clauses(self):
User, Address = self.classes.User, self.classes.Address
for expr, compare in (
(func.max(User.id), "max(users.id)"),
(User.id.desc(), "users.id DESC"),
(
between(5, User.id, Address.id),
":param_1 BETWEEN users.id AND addresses.id",
),
# this one would require adding compile() to
# InstrumentedScalarAttribute. do we want this ?
# (User.id, "users.id")
):
c = expr.compile(dialect=default.DefaultDialect())
assert str(c) == compare, "%s != %s" % (str(c), compare)
| OperatorTest |
python | charliermarsh__ruff | crates/ty_completion_eval/truth/object-attr-instance-methods/main.py | {
"start": 0,
"end": 330
} | class ____:
def __init__(self): pass
def lion(self): pass
def tiger(self): pass
def bear(self): pass
def chicken(self): pass
def turkey(self): pass
def wasp(self): pass
def rabbit(self): pass
def squirrel(self): pass
quux = Quux()
quux.tur<CURSOR: turkey>
quux = Quux()
quux.be<CURSOR: bear>
| Quux |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/event.py | {
"start": 1794,
"end": 2336
} | class ____(BaseEvent):
"""Event fired when attachment processing starts."""
page_id: str = Field(description="ID of the parent page")
attachment_id: str = Field(description="ID of the attachment")
attachment_name: str = Field(description="Name of the attachment")
attachment_type: str = Field(description="MIME type of the attachment")
attachment_size: int = Field(description="Size of the attachment in bytes")
attachment_link: str = Field(description="Link to the attachment")
| SNOWKBAttachmentProcessingStartEvent |
python | pytorch__pytorch | torch/utils/data/datapipes/dataframe/dataframes.py | {
"start": 10705,
"end": 11015
} | class ____(Capture):
def __init__(self, left, right, ctx) -> None:
self.ctx = ctx
self.left = left
self.right = right
def __str__(self) -> str:
return f"{self.left} - {self.right}"
def execute(self):
return get_val(self.left) - get_val(self.right)
| CaptureSub |
python | ansible__ansible | test/units/mock/custom_types.py | {
"start": 66,
"end": 498
} | class ____(c.Mapping):
"""Minimally functional Mapping implementation for testing."""
def __init__(self, data: dict) -> None:
self._data = data
def __getitem__(self, __key):
return self._data[__key]
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
return f'{type(self).__name__}({self._data!r})'
| CustomMapping |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 94489,
"end": 98081
} | class ____(APITestCase):
def setUp(self):
super().setUp()
self.login_as(self.user)
self.dashboard = Dashboard.objects.create(
title="Dashboard 1",
created_by_id=self.user.id,
organization=self.organization,
)
self.anon_users_query: _QueryDict = {
"name": "Anonymous Users",
"fields": ["count()"],
"aggregates": ["count()"],
"columns": [],
"fieldAliases": ["Count Alias"],
"conditions": "!has:user.email",
}
self.known_users_query: _QueryDict = {
"name": "Known Users",
"fields": ["count_unique(user.email)"],
"aggregates": ["count_unique(user.email)"],
"columns": [],
"fieldAliases": [],
"conditions": "has:user.email",
}
self.geo_errors_query: _QueryDict = {
"name": "Errors by Geo",
"fields": ["count()", "geo.country_code"],
"aggregates": ["count()"],
"columns": ["geo.country_code"],
"fieldAliases": [],
"conditions": "has:geo.country_code",
}
def do_request(self, method, url, data=None):
func = getattr(self.client, method)
return func(url, data=data)
def assert_serialized_widget_query(self, data, widget_data_source):
if "id" in data:
assert data["id"] == str(widget_data_source.id)
if "name" in data:
assert data["name"] == widget_data_source.name
if "fields" in data:
assert data["fields"] == widget_data_source.fields
if "conditions" in data:
assert data["conditions"] == widget_data_source.conditions
if "orderby" in data:
assert data["orderby"] == widget_data_source.orderby
if "aggregates" in data:
assert data["aggregates"] == widget_data_source.aggregates
if "columns" in data:
assert data["columns"] == widget_data_source.columns
if "fieldAliases" in data:
assert data["fieldAliases"] == widget_data_source.field_aliases
if "selectedAggregate" in data:
assert data["selectedAggregate"] == widget_data_source.selected_aggregate
def get_widgets(self, dashboard_id):
return DashboardWidget.objects.filter(dashboard_id=dashboard_id).order_by("order")
def assert_serialized_widget(self, data, expected_widget):
if "id" in data:
assert data["id"] == str(expected_widget.id)
if "title" in data:
assert data["title"] == expected_widget.title
if "interval" in data:
assert data["interval"] == expected_widget.interval
if "limit" in data:
assert data["limit"] == expected_widget.limit
if "displayType" in data:
assert data["displayType"] == DashboardWidgetDisplayTypes.get_type_name(
expected_widget.display_type
)
if "layout" in data:
assert data["layout"] == expected_widget.detail["layout"]
if "datasetSource" in data:
assert data["datasetSource"] == DATASET_SOURCES[expected_widget.dataset_source]
def create_user_member_role(self):
self.user = self.create_user(is_superuser=False)
self.create_member(
user=self.user,
organization=self.organization,
role="member",
teams=[self.team],
)
self.login_as(self.user)
@pytest.mark.migrations
| OrganizationDashboardWidgetTestCase |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/image_ops/extract_volume_patches_grad_test.py | {
"start": 1248,
"end": 3771
} | class ____(test.TestCase, parameterized.TestCase):
"""Gradient-checking for ExtractVolumePatches op."""
@parameterized.parameters([
{
'in_shape': [2, 5, 5, 5, 3],
'ksizes': [1, 1, 1, 1, 1],
'strides': [1, 2, 3, 4, 1],
},
{
'in_shape': [2, 7, 7, 7, 3],
'ksizes': [1, 3, 3, 3, 1],
'strides': [1, 1, 1, 1, 1],
},
{
'in_shape': [2, 5, 7, 6, 3],
'ksizes': [1, 3, 2, 2, 1],
'strides': [1, 1, 1, 1, 1],
},
{
'in_shape': [2, 7, 8, 6, 3],
'ksizes': [1, 2, 3, 2, 1],
'strides': [1, 2, 4, 3, 1],
},
])
def testGradient(self, in_shape, ksizes, strides):
if test_util.is_gpu_available():
self.skipTest('b/171837334: skip gpu test.')
# Set graph seed for determinism.
random_seed = 42
random_seed_lib.set_random_seed(random_seed)
with self.cached_session():
np.random.seed(random_seed)
input_val = constant_op.constant(
np.random.random(in_shape), dtype=dtypes.float32)
for padding in ['VALID', 'SAME']:
def extract(in_val, ksizes=ksizes, strides=strides, padding=padding):
return array_ops.extract_volume_patches(in_val, ksizes, strides,
padding)
rtn = gradient_checker_v2.compute_gradient(extract, [input_val])
err = gradient_checker_v2.max_error(*rtn)
print('extract_volume_patches gradient err: %.4e' % err)
self.assertLess(err, 1e-4)
@parameterized.parameters(set((True, context.executing_eagerly())))
def testConstructGradientWithLargeVolumes(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
batch_size = 4
planes = 8
height = 32
width = 32
ksize = 5
shape = (batch_size, planes, height, width, 1)
volumes = variables.Variable(
np.random.uniform(size=np.prod(shape)).reshape(shape), name='inputs')
tape.watch(volumes)
patches = array_ops.extract_volume_patches(
volumes,
ksizes=[1, ksize, ksize, ksize, 1],
strides=[1, 1, 1, 1, 1],
padding='SAME')
# Github issue: #20146
# tf.extract_volume_patches() gradient very slow at graph construction
# time.
gradients = tape.gradient(patches, volumes)
# Won't time out.
self.assertIsNotNone(gradients)
if __name__ == '__main__':
test.main()
| ExtractVolumePatchesGradTest |
python | doocs__leetcode | solution/1200-1299/1239.Maximum Length of a Concatenated String with Unique Characters/Solution.py | {
"start": 0,
"end": 407
} | class ____:
def maxLength(self, arr: List[str]) -> int:
s = [0]
for t in arr:
x = 0
for b in map(lambda c: ord(c) - 97, t):
if x >> b & 1:
x = 0
break
x |= 1 << b
if x:
s.extend((x | y) for y in s if (x & y) == 0)
return max(x.bit_count() for x in s)
| Solution |
python | buildout__buildout | src/zc/buildout/_package_index.py | {
"start": 12896,
"end": 13437
} | class ____:
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
| ContentChecker |
python | protocolbuffers__protobuf | python/google/protobuf/service_reflection.py | {
"start": 3359,
"end": 8119
} | class ____(object):
"""This class constructs a protocol service class using a service descriptor.
Given a service descriptor, this class constructs a class that represents
the specified service descriptor. One service builder instance constructs
exactly one service class. That means all instances of that class share the
same builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
service class.
"""
self.descriptor = service_descriptor
def BuildService(builder, cls):
"""Constructs the service class.
Args:
cls: The class that will be constructed.
"""
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
# Making sure to use exact argument names from the abstract interface in
# service.py to match the type signature
def _WrapCallMethod(self, method_descriptor, rpc_controller, request, done):
return builder._CallMethod(self, method_descriptor, rpc_controller,
request, done)
def _WrapGetRequestClass(self, method_descriptor):
return builder._GetRequestClass(method_descriptor)
def _WrapGetResponseClass(self, method_descriptor):
return builder._GetResponseClass(method_descriptor)
builder.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: builder.descriptor)
cls.GetDescriptor.__doc__ = 'Returns the service descriptor.'
cls.GetRequestClass = _WrapGetRequestClass
cls.GetResponseClass = _WrapGetResponseClass
for method in builder.descriptor.methods:
setattr(cls, method.name, builder._GenerateNonImplementedMethod(method))
def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
"""Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback)
def _GetRequestClass(self, method_descriptor):
"""Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class
def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class
def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
"""
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback))
def _NonImplementedMethod(self, method_name, rpc_controller, callback):
"""The body of all methods in the generated service class.
Args:
method_name: Name of the method being executed.
rpc_controller: RPC controller used to execute this method.
callback: A callback which will be invoked when the method finishes.
"""
rpc_controller.SetFailed('Method %s not implemented.' % method_name)
callback(None)
| _ServiceBuilder |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 19084,
"end": 19286
} | class ____(PrefectBaseModel):
"""Filter by `Log.flow_run_id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of flow run IDs to include"
)
| LogFilterFlowRunId |
python | great-expectations__great_expectations | great_expectations/data_context/store/checkpoint_store.py | {
"start": 860,
"end": 4538
} | class ____(Store):
_key_class = StringKey
def __init__(
self,
store_backend: dict | None = None,
runtime_environment: dict | None = None,
store_name: str = "no_store_name",
) -> None:
store_backend_class = self._determine_store_backend_class(store_backend)
if store_backend and issubclass(store_backend_class, TupleStoreBackend):
store_backend["filepath_suffix"] = store_backend.get("filepath_suffix", ".json")
super().__init__(
store_backend=store_backend,
runtime_environment=runtime_environment,
store_name=store_name,
)
def get_key(self, name: str, id: str | None = None) -> GXCloudIdentifier | StringKey:
"""Given a name and optional ID, build the correct key for use in the CheckpointStore."""
if self.cloud_mode:
return GXCloudIdentifier(
resource_type=GXCloudRESTResource.CHECKPOINT,
id=id,
resource_name=name,
)
return self._key_class(key=name)
@override
@classmethod
def gx_cloud_response_json_to_object_dict(cls, response_json: dict) -> dict:
response_data = response_json["data"]
checkpoint_data: dict
if isinstance(response_data, list):
if len(response_data) != 1:
if len(response_data) == 0:
msg = f"Cannot parse empty data from GX Cloud payload: {response_json}"
else:
msg = f"Cannot parse multiple items from GX Cloud payload: {response_json}"
raise ValueError(msg)
checkpoint_data = response_data[0]
else:
checkpoint_data = response_data
return cls._convert_raw_json_to_object_dict(checkpoint_data)
@override
@staticmethod
def _convert_raw_json_to_object_dict(data: dict) -> dict:
return data
@override
def serialize(self, value):
# In order to enable the custom json_encoders in Checkpoint, we need to set `models_as_dict` off # noqa: E501 # FIXME CoP
# Ref: https://docs.pydantic.dev/1.10/usage/exporting_models/#serialising-self-reference-or-other-models
data = value.json(models_as_dict=False, indent=2, sort_keys=True, exclude_none=True)
if self.cloud_mode:
return json.loads(data)
return data
@override
def deserialize(self, value):
from great_expectations.checkpoint.checkpoint import Checkpoint
if self.cloud_mode:
return Checkpoint.parse_obj(value)
return Checkpoint.parse_raw(value)
@override
def _add(self, key: DataContextKey, value: Checkpoint, **kwargs):
if not self.cloud_mode:
value.id = str(uuid.uuid4())
return super()._add(key=key, value=value, **kwargs)
@override
def _update(self, key: DataContextKey, value: Checkpoint, **kwargs):
try:
super()._update(key=key, value=value, **kwargs)
except gx_exceptions.StoreBackendError as e:
name = key.to_tuple()[0]
raise ValueError(f"Could not update Checkpoint '{name}'") from e # noqa: TRY003 # FIXME CoP
@staticmethod
def default_checkpoints_exist(directory_path: str) -> bool:
if not directory_path:
return False
checkpoints_directory_path: str = os.path.join( # noqa: PTH118 # FIXME CoP
directory_path,
DataContextConfigDefaults.DEFAULT_CHECKPOINT_STORE_BASE_DIRECTORY_RELATIVE_NAME.value,
)
return os.path.isdir(checkpoints_directory_path) # noqa: PTH112 # FIXME CoP
| CheckpointStore |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 1838,
"end": 2936
} | class ____(TestCase):
"""Tests for ``tail()``"""
def test_iterator_greater(self):
"""Length of iterator is greater than requested tail"""
self.assertEqual(list(mi.tail(3, iter('ABCDEFG'))), list('EFG'))
def test_iterator_equal(self):
"""Length of iterator is equal to the requested tail"""
self.assertEqual(list(mi.tail(7, iter('ABCDEFG'))), list('ABCDEFG'))
def test_iterator_less(self):
"""Length of iterator is less than requested tail"""
self.assertEqual(list(mi.tail(8, iter('ABCDEFG'))), list('ABCDEFG'))
def test_sized_greater(self):
"""Length of sized iterable is greater than requested tail"""
self.assertEqual(list(mi.tail(3, 'ABCDEFG')), list('EFG'))
def test_sized_equal(self):
"""Length of sized iterable is less than requested tail"""
self.assertEqual(list(mi.tail(7, 'ABCDEFG')), list('ABCDEFG'))
def test_sized_less(self):
"""Length of sized iterable is less than requested tail"""
self.assertEqual(list(mi.tail(8, 'ABCDEFG')), list('ABCDEFG'))
| TailTests |
python | django__django | django/db/models/lookups.py | {
"start": 15850,
"end": 16530
} | class ____:
underflow_exception = EmptyResultSet
overflow_exception = EmptyResultSet
def process_rhs(self, compiler, connection):
rhs = self.rhs
if isinstance(rhs, int):
field_internal_type = self.lhs.output_field.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(
field_internal_type
)
if min_value is not None and rhs < min_value:
raise self.underflow_exception
if max_value is not None and rhs > max_value:
raise self.overflow_exception
return super().process_rhs(compiler, connection)
| IntegerFieldOverflow |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_at_time.py | {
"start": 269,
"end": 5521
} | class ____:
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_at_time(self, tzstr, frame_or_series):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="h")
ts = frame_or_series(
np.random.default_rng(2).standard_normal(len(rng)), index=rng
)
ts_local = ts.tz_localize(tzstr)
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
tm.assert_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_at_time(self, frame_or_series):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(
np.random.default_rng(2).standard_normal((len(rng), 2)), index=rng
)
ts = tm.get_obj(ts, frame_or_series)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_equal(result, expected)
def test_at_time_midnight(self, frame_or_series):
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(
np.random.default_rng(2).standard_normal((len(rng), 3)), index=rng
)
ts = tm.get_obj(ts, frame_or_series)
result = ts.at_time(time(0, 0))
tm.assert_equal(result, ts)
def test_at_time_nonexistent(self, frame_or_series):
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.default_rng(2).standard_normal(len(rng)), rng)
ts = tm.get_obj(ts, frame_or_series)
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=timezone.utc)]
)
def test_at_time_errors(self, hour):
# GH#24043
dti = date_range("2018", periods=3, freq="h")
df = DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH#24043
dti = date_range("2018", periods=3, freq="h", tz="US/Pacific")
df = DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=zoneinfo.ZoneInfo("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self, frame_or_series):
# GH#20725
obj = DataFrame([[1, 2, 3], [4, 5, 6]])
obj = tm.get_obj(obj, frame_or_series)
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex
obj.at_time("00:00")
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/2/2000", freq="5min")
ts = DataFrame(np.random.default_rng(2).standard_normal((len(rng), len(rng))))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
# Without clearing freq, result has freq 1440T and expected 5T
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
def test_at_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq="30min")
df = DataFrame(
np.random.default_rng(2).standard_normal((len(index), 5)), index=index
)
akey = time(12, 0, 0)
ainds = [24, 72, 120, 168]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, expected2)
assert len(result) == 4
def test_at_time_ambiguous_format_deprecation(self):
# GH#50839
rng = date_range("1/1/2000", "1/5/2000", freq="125min")
ts = DataFrame(list(range(len(rng))), index=rng)
msg1 = "The string '.*' cannot be parsed"
with tm.assert_produces_warning(Pandas4Warning, match=msg1):
ts.at_time("2022-12-12 00:00:00")
with tm.assert_produces_warning(Pandas4Warning, match=msg1):
ts.at_time("2022-12-12 00:00:00 +09:00")
with tm.assert_produces_warning(Pandas4Warning, match=msg1):
ts.at_time("2022-12-12 00:00:00.000000")
# The dateutil parser raises on these, so we can give the future behavior
# immediately using pd.core.tools.to_time
ts.at_time("235500")
ts.at_time("115500PM")
| TestAtTime |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_trace.py | {
"start": 1341,
"end": 8373
} | class ____(TestCase):
"""Test serialization of columnar uptime data to span format."""
def setUp(self) -> None:
super().setUp()
self.project_slugs = {1: "test-project", 2: "another-project"}
self.snuba_params = mock.MagicMock(spec=SnubaParams)
self.snuba_params.project_ids = [1]
def test_basic_uptime_item_serialization(self):
"""Test basic serialization with all required fields."""
row_dict = {
"sentry.item_id": ProtoAttributeValue(val_str="check-123"),
"sentry.project_id": ProtoAttributeValue(val_int=1),
"guid": ProtoAttributeValue(val_str="check-123"),
"sentry.trace_id": ProtoAttributeValue(val_str="a" * 32),
"check_status": ProtoAttributeValue(val_str="success"),
"http_status_code": ProtoAttributeValue(val_int=200),
"request_url": ProtoAttributeValue(val_str="https://example.com"),
"original_url": ProtoAttributeValue(val_str="https://example.com"),
"actual_check_time_us": ProtoAttributeValue(val_int=1700000000000000),
"check_duration_us": ProtoAttributeValue(val_int=500000),
"subscription_id": ProtoAttributeValue(val_str="sub-456"),
"region": ProtoAttributeValue(val_str="us-east-1"),
"request_sequence": ProtoAttributeValue(val_int=0),
}
with override_settings(UPTIME_REGIONS=TEST_UPTIME_REGIONS):
result = _serialize_columnar_uptime_item(row_dict, self.project_slugs)
assert result["event_id"] == "check-123"
assert result["project_id"] == 1
assert result["project_slug"] == "test-project"
assert result["transaction_id"] == "a" * 32
assert result["transaction"] == "uptime.check"
assert result["event_type"] == "uptime_check"
assert result["op"] == "uptime.request"
assert result["duration"] == 500.0
assert result["name"] == "https://example.com"
assert result["description"] == "Uptime Check Request [success]"
assert result["region_name"] == "US East (N. Virginia)"
assert result["start_timestamp"] == 1700000000
assert result["end_timestamp"] == 1700000000.5
attrs = result["additional_attributes"]
assert attrs["guid"] == "check-123"
assert attrs["check_status"] == "success"
assert attrs["http_status_code"] == 200
assert attrs["request_url"] == "https://example.com"
assert attrs["original_url"] == "https://example.com"
assert attrs["subscription_id"] == "sub-456"
assert attrs["region"] == "us-east-1"
assert attrs["request_sequence"] == 0
assert "project_id" not in attrs
assert "organization.id" not in attrs
assert "timestamp" not in attrs
def test_redirect_chain_serialization(self):
"""Test serialization of redirect chain with different URLs."""
row_dict = {
"sentry.item_id": ProtoAttributeValue(val_str="check-789"),
"sentry.project_id": ProtoAttributeValue(val_int=1),
"guid": ProtoAttributeValue(val_str="check-789"),
"sentry.trace_id": ProtoAttributeValue(val_str="b" * 32),
"check_status": ProtoAttributeValue(val_str="success"),
"http_status_code": ProtoAttributeValue(val_int=301),
"request_url": ProtoAttributeValue(val_str="https://www.example.com"),
"original_url": ProtoAttributeValue(val_str="https://example.com"),
"actual_check_time_us": ProtoAttributeValue(val_int=1700000000000000),
"check_duration_us": ProtoAttributeValue(val_int=300000),
"region": ProtoAttributeValue(val_str="eu-west-1"),
"request_sequence": ProtoAttributeValue(val_int=1),
}
result = _serialize_columnar_uptime_item(row_dict, self.project_slugs)
assert result["description"] == "Uptime Check Request [success]"
assert result["name"] == "https://www.example.com"
assert result["additional_attributes"]["request_url"] == "https://www.example.com"
assert result["additional_attributes"]["original_url"] == "https://example.com"
assert result["additional_attributes"]["request_sequence"] == 1
def test_null_and_missing_fields(self):
"""Test handling of null and missing optional fields."""
row_dict = {
"sentry.item_id": ProtoAttributeValue(val_str="check-null"),
"sentry.project_id": ProtoAttributeValue(val_int=1),
"guid": ProtoAttributeValue(val_str="check-null"),
"sentry.trace_id": ProtoAttributeValue(val_str="c" * 32),
"check_status": ProtoAttributeValue(val_str="failure"),
"http_status_code": ProtoAttributeValue(is_null=True),
"request_url": ProtoAttributeValue(val_str="https://test.com"),
"actual_check_time_us": ProtoAttributeValue(val_int=1700000000000000),
"dns_lookup_duration_us": ProtoAttributeValue(val_int=50000),
"tcp_connection_duration_us": ProtoAttributeValue(is_null=True),
"region": ProtoAttributeValue(val_str="us-east-1"),
}
result = _serialize_columnar_uptime_item(row_dict, self.project_slugs)
assert result["duration"] == 0.0
assert result["name"] == "https://test.com"
assert result["description"] == "Uptime Check Request [failure]"
attrs = result["additional_attributes"]
assert "http_status_code" not in attrs
assert "original_url" not in attrs
assert attrs["dns_lookup_duration_us"] == 50000
assert "tcp_connection_duration_us" not in attrs
def test_region_name_mapping(self):
"""Test that region codes are properly mapped to region names."""
test_cases = [
("us-east-1", "US East (N. Virginia)"),
("eu-west-1", "Europe (Ireland)"),
("nonexistent-region", "Unknown"),
]
for region_code, expected_name in test_cases:
row_dict = {
"sentry.item_id": ProtoAttributeValue(val_str=f"check-{region_code}"),
"sentry.project_id": ProtoAttributeValue(val_int=1),
"guid": ProtoAttributeValue(val_str=f"check-{region_code}"),
"sentry.trace_id": ProtoAttributeValue(val_str="a" * 32),
"check_status": ProtoAttributeValue(val_str="success"),
"request_url": ProtoAttributeValue(val_str="https://example.com"),
"actual_check_time_us": ProtoAttributeValue(val_int=1700000000000000),
"check_duration_us": ProtoAttributeValue(val_int=500000),
"region": ProtoAttributeValue(val_str=region_code),
}
with override_settings(UPTIME_REGIONS=TEST_UPTIME_REGIONS):
result = _serialize_columnar_uptime_item(row_dict, self.project_slugs)
assert result["region_name"] == expected_name
| TestSerializeColumnarUptimeItem |
python | pytorch__pytorch | test/inductor/test_debug_trace.py | {
"start": 6921,
"end": 7977
} | class ____:
var_ranges = {p0: 256}
index0 = p0
def body(self, ops):
get_index = self.get_index('index0')
load = ops.load('arg0_1', get_index)
constant = ops.constant(1.0, torch.float32)
add = ops.add(load, constant)
get_index_1 = self.get_index('index0')
store = ops.store('buf0', get_index_1, add, None)
return store
op0_op1.snodes[1] =
op1: SchedulerNode(ComputedBuffer)
op1.writes = [MemoryDep('buf1', c0, {c0: 256})]
op1.unmet_dependencies = [MemoryDep('buf0', c0, {c0: 256})]
op1.met_dependencies = []
op1.outputs = [
buf1: ComputedBuffer
buf1.layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf1.users = [NodeUser(node=ExternKernelSchedulerNode(name='op2'), can_inplace=False, is_weak=False)]
]
op1.group.device = cpu
op1.group.iteration = ((256,), ())
op1.sizes = ([256], [])
buf0_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
buf1_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
| op0_loop_body |
python | huggingface__transformers | src/transformers/models/chameleon/modeling_chameleon.py | {
"start": 1723,
"end": 2552
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
ChameleonRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Chameleon
| ChameleonRMSNorm |
python | ApeWorX__ape | src/ape_ethereum/_converters.py | {
"start": 538,
"end": 1203
} | class ____(ConverterAPI):
"""Converts units like `1 ether` to 1e18 wei."""
def is_convertible(self, value: str) -> bool:
if not isinstance(value, str):
return False
elif " " not in value or len(value.split(" ")) > 2:
return False
val, unit = value.split(" ")
return unit.lower() in ETHER_UNITS and bool(NUMBER_PATTERN.match(val))
def convert(self, value: str) -> int:
value, unit = value.split(" ")
converted_value = int(
Decimal(value.replace("_", "").replace(",", "")) * ETHER_UNITS[unit.lower()]
)
return CurrencyValue(converted_value)
| WeiConversions |
python | google__jax | tests/hijax_test.py | {
"start": 5397,
"end": 5528
} | class ____:
elts: tuple
def __repr__(self):
return 'Tup{' + ','.join(map(repr, self.elts)) + '}'
@dataclass(frozen=True)
| HiTup |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/partition.py | {
"start": 309,
"end": 1213
} | class ____(Generic[T_cov]):
"""A Partition represents a single slice of the entire set of a job's possible work. It consists
of a value, which is an object that represents that partition, and an optional name, which is
used to label the partition in a human-readable way.
Args:
value (Any): The object for this partition
name (str): Name for this partition
"""
def __init__(self, value: Any, name: Optional[str] = None):
self._value = value
self._name = check.str_param(name or str(value), "name")
@property
def value(self) -> T_cov:
return self._value
@property
def name(self) -> str:
return self._name
def __eq__(self, other: object) -> bool:
if not isinstance(other, Partition):
return False
else:
return self.value == other.value and self.name == other.name
| Partition |
python | openai__openai-python | src/openai/types/beta/realtime/conversation_item_delete_event.py | {
"start": 233,
"end": 549
} | class ____(BaseModel):
item_id: str
"""The ID of the item to delete."""
type: Literal["conversation.item.delete"]
"""The event type, must be `conversation.item.delete`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| ConversationItemDeleteEvent |
python | pytorch__pytorch | test/distributed/tensor/test_placement_types.py | {
"start": 398,
"end": 3201
} | class ____(TestCase):
def test_type_identification(self):
shard = Shard(3)
strided_shard = _StridedShard(dim=3, split_factor=7)
partial_sum = Partial("sum")
partial_max = Partial("max")
replicate = Replicate()
ident_tests = (
(shard, True, False, False),
(strided_shard, True, False, False),
(partial_sum, False, True, False),
(partial_max, False, True, False),
(replicate, False, False, True),
)
for do_deepcopy in (False, True):
for placement, is_shard, is_partial, is_replicate in ident_tests:
if do_deepcopy:
placement = copy.deepcopy(placement)
self.assertEqual(placement.is_shard(), is_shard)
self.assertEqual(placement.is_partial(), is_partial)
self.assertEqual(placement.is_replicate(), is_replicate)
def test_equality(self):
equivalence_classes = (
(Shard(3), _StridedShard(dim=3, split_factor=7)),
(Shard(4), _StridedShard(dim=4, split_factor=9)),
(Replicate(),),
(Partial("sum"),),
(Partial("max"),),
)
for eq_class in equivalence_classes:
# Each item in the equivalence class should be equal to every other item in
# its class.
for lhs, rhs in itertools.product(eq_class, eq_class):
self.assertEqual(lhs, rhs)
# Each item in the equivalence class should not be equal to any item in any
# other class.
for other_class in equivalence_classes:
if other_class is eq_class:
continue
for lhs, rhs in itertools.product(eq_class, other_class):
self.assertNotEqual(lhs, rhs)
# Testing this case doesn't seem to fit neatly into the above equivalence class
# framework.
self.assertNotEqual(
_StridedShard(dim=3, split_factor=1), _StridedShard(dim=3, split_factor=2)
)
@unittest.skipIf(
sys.version_info < (3, 10), "kw_only is only available in python >= 3.10"
)
def test_strided_shard_kwonly_argument(self):
with self.assertRaises(TypeError):
_StridedShard(3, 4)
_StridedShard(3, split_factor=4)
def test_strided_shard_isinstance_shard(self):
assert isinstance(_StridedShard(dim=3, split_factor=7), Shard)
def test_dynamo_can_identify_placement_classes(self):
for cls in (Replicate, Shard, _StridedShard, Partial):
self.assertTrue(
PlacementClassVariable.is_placement_type(cls), msg=f"failed on {cls}"
)
if __name__ == "__main__":
run_tests()
| PlacementTypesTestCase |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pytest_style/PT023.py | {
"start": 793,
"end": 874
} | class ____:
@pytest.mark.foo()
def test_something():
pass
| TestClass |
python | streamlit__streamlit | lib/streamlit/external/langchain/streamlit_callback_handler.py | {
"start": 4570,
"end": 10021
} | class ____:
"""Encapsulates the Streamlit UI for a single LLM 'thought' during a LangChain Agent
run. Each tool usage gets its own thought; and runs also generally having a
concluding thought where the Agent determines that it has an answer to the prompt.
Each thought gets its own expander UI.
"""
def __init__(
self,
parent_container: DeltaGenerator,
labeler: LLMThoughtLabeler,
expanded: bool,
collapse_on_complete: bool,
) -> None:
self._container = parent_container.status(
labeler.get_initial_label(), expanded=expanded
)
self._state = LLMThoughtState.THINKING
self._llm_token_stream = ""
self._llm_token_stream_placeholder: DeltaGenerator | None = None
self._last_tool: ToolRecord | None = None
self._collapse_on_complete = collapse_on_complete
self._labeler = labeler
@property
def container(self) -> StatusContainer:
"""The container we're writing into."""
return self._container
@property
def last_tool(self) -> ToolRecord | None:
"""The last tool executed by this thought."""
return self._last_tool
def _reset_llm_token_stream(self) -> None:
if self._llm_token_stream_placeholder is not None:
self._llm_token_stream_placeholder.markdown(self._llm_token_stream)
self._llm_token_stream = ""
self._llm_token_stream_placeholder = None
def on_llm_start(self, serialized: dict[str, Any], prompts: list[str]) -> None:
self._reset_llm_token_stream()
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# This is only called when the LLM is initialized with `streaming=True`
self._llm_token_stream += _convert_newlines(token)
if self._llm_token_stream_placeholder is None:
self._llm_token_stream_placeholder = self._container.empty()
self._llm_token_stream_placeholder.markdown(self._llm_token_stream + "▕")
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
# `response` is the concatenation of all the tokens received by the LLM.
# If we're receiving streaming tokens from `on_llm_new_token`, this response
# data is redundant
self._reset_llm_token_stream()
# set the container status to complete
self.complete(self._labeler.get_final_agent_thought_label())
def on_llm_error(self, error: BaseException, *args: Any, **kwargs: Any) -> None:
self._container.exception(error)
self._state = LLMThoughtState.ERROR
self.complete("LLM encountered an error...")
def on_tool_start(
self, serialized: dict[str, Any], input_str: str, **kwargs: Any
) -> None:
# Called with the name of the tool we're about to run (in `serialized[name]`),
# and its input. We change our container's label to be the tool name.
self._state = LLMThoughtState.RUNNING_TOOL
tool_name = serialized["name"]
self._last_tool = ToolRecord(name=tool_name, input_str=input_str)
self._container.update(
label=self._labeler.get_tool_label(self._last_tool, is_complete=False),
state="running",
)
if len(input_str) > MAX_TOOL_INPUT_STR_LENGTH:
# output is printed later in on_tool_end
self._container.markdown(f"**Input:**\n\n{input_str}\n\n**Output:**")
def on_tool_end(
self,
output: str,
color: str | None = None,
observation_prefix: str | None = None,
llm_prefix: str | None = None,
**kwargs: Any,
) -> None:
self._container.markdown(output)
def on_tool_error(self, error: BaseException, *args: Any, **kwargs: Any) -> None:
self._container.markdown("**Tool encountered an error...**")
self._container.exception(error)
self._container.update(state="error")
def on_agent_action(
self, action: AgentAction, color: str | None = None, **kwargs: Any
) -> Any:
# Called when we're about to kick off a new tool. The `action` data
# tells us the tool we're about to use, and the input we'll give it.
# We don't output anything here, because we'll receive this same data
# when `on_tool_start` is called immediately after.
pass
def complete(self, final_label: str | None = None) -> None:
"""Finish the thought."""
if final_label is None and self._state == LLMThoughtState.RUNNING_TOOL:
if self._last_tool is None:
raise RuntimeError(
"_last_tool should never be null when _state == RUNNING_TOOL"
)
final_label = self._labeler.get_tool_label(
self._last_tool, is_complete=True
)
if self._last_tool and self._last_tool.name == "_Exception":
self._state = LLMThoughtState.ERROR
elif self._state != LLMThoughtState.ERROR:
self._state = LLMThoughtState.COMPLETE
if self._collapse_on_complete:
# Add a quick delay to show the user the final output before we collapse
time.sleep(0.25)
self._container.update(
label=final_label,
expanded=False if self._collapse_on_complete else None,
state="error" if self._state == LLMThoughtState.ERROR else "complete",
)
| LLMThought |
python | aio-libs__aiohttp | tests/test_web_exceptions.py | {
"start": 11572,
"end": 14213
} | class ____:
def test_ctor(self) -> None:
exc = web.HTTPUnavailableForLegalReasons(
link="http://warning.or.kr/",
headers={"X-Custom": "value"},
reason="Zaprescheno",
text="text",
content_type="custom",
)
assert exc.link == URL("http://warning.or.kr/")
assert exc.text == "text"
compare: Mapping[str, str] = {
"X-Custom": "value",
"Content-Type": "custom",
"Link": '<http://warning.or.kr/>; rel="blocked-by"',
}
assert exc.headers == compare
assert exc.reason == "Zaprescheno"
assert exc.status == 451
def test_no_link(self) -> None:
with pytest.raises(TypeError):
web.HTTPUnavailableForLegalReasons() # type: ignore[call-arg]
def test_none_link(self) -> None:
exc = web.HTTPUnavailableForLegalReasons(link=None)
assert exc.link is None
assert "Link" not in exc.headers
def test_empty_link(self) -> None:
exc = web.HTTPUnavailableForLegalReasons(link="")
assert exc.link is None
assert "Link" not in exc.headers
def test_link_str(self) -> None:
exc = web.HTTPUnavailableForLegalReasons(link="http://warning.or.kr/")
assert exc.link == URL("http://warning.or.kr/")
assert exc.headers["Link"] == '<http://warning.or.kr/>; rel="blocked-by"'
def test_link_url(self) -> None:
exc = web.HTTPUnavailableForLegalReasons(link=URL("http://warning.or.kr/"))
assert exc.link == URL("http://warning.or.kr/")
assert exc.headers["Link"] == '<http://warning.or.kr/>; rel="blocked-by"'
def test_link_CRLF(self) -> None:
exc = web.HTTPUnavailableForLegalReasons(link="http://warning.or.kr/\r\n")
assert "\r\n" not in exc.headers["Link"]
def test_pickle(self) -> None:
resp = web.HTTPUnavailableForLegalReasons(
link="http://warning.or.kr/",
headers={"X-Custom": "value"},
reason="Zaprescheno",
text="text",
content_type="custom",
)
resp.foo = "bar" # type: ignore[attr-defined]
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(resp, proto)
resp2 = pickle.loads(pickled)
assert resp2.link == URL("http://warning.or.kr/")
assert resp2.text == "text"
assert resp2.headers == resp.headers
assert resp2.reason == "Zaprescheno"
assert resp2.status == 451
assert resp2.foo == "bar"
| TestHTTPUnavailableForLegalReasons |
python | django__django | tests/expressions_window/tests.py | {
"start": 1031,
"end": 79437
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
classification = Classification.objects.create()
Employee.objects.bulk_create(
[
Employee(
name=e[0],
salary=e[1],
department=e[2],
hire_date=e[3],
age=e[4],
bonus=Decimal(e[1]) / 400,
classification=classification,
)
for e in [
("Jones", 45000, "Accounting", datetime.datetime(2005, 11, 1), 20),
(
"Williams",
37000,
"Accounting",
datetime.datetime(2009, 6, 1),
20,
),
("Jenson", 45000, "Accounting", datetime.datetime(2008, 4, 1), 20),
("Adams", 50000, "Accounting", datetime.datetime(2013, 7, 1), 50),
("Smith", 55000, "Sales", datetime.datetime(2007, 6, 1), 30),
("Brown", 53000, "Sales", datetime.datetime(2009, 9, 1), 30),
("Johnson", 40000, "Marketing", datetime.datetime(2012, 3, 1), 30),
("Smith", 38000, "Marketing", datetime.datetime(2009, 10, 1), 20),
("Wilkinson", 60000, "IT", datetime.datetime(2011, 3, 1), 40),
("Moore", 34000, "IT", datetime.datetime(2013, 8, 1), 40),
("Miller", 100000, "Management", datetime.datetime(2005, 6, 1), 40),
("Johnson", 80000, "Management", datetime.datetime(2005, 7, 1), 50),
]
]
)
employees = list(Employee.objects.order_by("pk"))
PastEmployeeDepartment.objects.bulk_create(
[
PastEmployeeDepartment(employee=employees[6], department="Sales"),
PastEmployeeDepartment(employee=employees[10], department="IT"),
]
)
def test_dense_rank(self):
tests = [
ExtractYear(F("hire_date")).asc(),
F("hire_date__year").asc(),
"hire_date__year",
]
for order_by in tests:
with self.subTest(order_by=order_by):
qs = Employee.objects.annotate(
rank=Window(expression=DenseRank(), order_by=order_by),
)
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 1),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 2),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 3),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 4),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 4),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 4),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 5),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 6),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 7),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 7),
],
lambda entry: (
entry.name,
entry.salary,
entry.department,
entry.hire_date,
entry.rank,
),
ordered=False,
)
def test_department_salary(self):
qs = Employee.objects.annotate(
department_sum=Window(
expression=Sum("salary"),
partition_by=F("department"),
order_by=[F("hire_date").asc()],
)
).order_by("department", "department_sum")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 45000, 45000),
("Jenson", "Accounting", 45000, 90000),
("Williams", "Accounting", 37000, 127000),
("Adams", "Accounting", 50000, 177000),
("Wilkinson", "IT", 60000, 60000),
("Moore", "IT", 34000, 94000),
("Miller", "Management", 100000, 100000),
("Johnson", "Management", 80000, 180000),
("Smith", "Marketing", 38000, 38000),
("Johnson", "Marketing", 40000, 78000),
("Smith", "Sales", 55000, 55000),
("Brown", "Sales", 53000, 108000),
],
lambda entry: (
entry.name,
entry.department,
entry.salary,
entry.department_sum,
),
)
def test_rank(self):
"""
Rank the employees based on the year they're were hired. Since there
are multiple employees hired in different years, this will contain
gaps.
"""
qs = Employee.objects.annotate(
rank=Window(
expression=Rank(),
order_by=F("hire_date__year").asc(),
)
)
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 1),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 4),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 5),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 6),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 6),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 6),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 9),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 10),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 11),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 11),
],
lambda entry: (
entry.name,
entry.salary,
entry.department,
entry.hire_date,
entry.rank,
),
ordered=False,
)
def test_row_number(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted. Depending on the backend,
Oracle requires an ordering-clause in the Window expression.
"""
qs = Employee.objects.annotate(
row_number=Window(
expression=RowNumber(),
order_by=F("pk").asc(),
)
).order_by("pk")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 1),
("Williams", "Accounting", 2),
("Jenson", "Accounting", 3),
("Adams", "Accounting", 4),
("Smith", "Sales", 5),
("Brown", "Sales", 6),
("Johnson", "Marketing", 7),
("Smith", "Marketing", 8),
("Wilkinson", "IT", 9),
("Moore", "IT", 10),
("Miller", "Management", 11),
("Johnson", "Management", 12),
],
lambda entry: (entry.name, entry.department, entry.row_number),
)
def test_row_number_no_ordering(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted.
"""
# Add a default ordering for consistent results across databases.
qs = Employee.objects.annotate(
row_number=Window(
expression=RowNumber(),
)
).order_by("pk")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 1),
("Williams", "Accounting", 2),
("Jenson", "Accounting", 3),
("Adams", "Accounting", 4),
("Smith", "Sales", 5),
("Brown", "Sales", 6),
("Johnson", "Marketing", 7),
("Smith", "Marketing", 8),
("Wilkinson", "IT", 9),
("Moore", "IT", 10),
("Miller", "Management", 11),
("Johnson", "Management", 12),
],
lambda entry: (entry.name, entry.department, entry.row_number),
)
def test_avg_salary_department(self):
qs = Employee.objects.annotate(
avg_salary=Window(
expression=Avg("salary"),
order_by=F("department").asc(),
partition_by="department",
)
).order_by("department", "-salary", "name")
self.assertQuerySetEqual(
qs,
[
("Adams", 50000, "Accounting", 44250.00),
("Jenson", 45000, "Accounting", 44250.00),
("Jones", 45000, "Accounting", 44250.00),
("Williams", 37000, "Accounting", 44250.00),
("Wilkinson", 60000, "IT", 47000.00),
("Moore", 34000, "IT", 47000.00),
("Miller", 100000, "Management", 90000.00),
("Johnson", 80000, "Management", 90000.00),
("Johnson", 40000, "Marketing", 39000.00),
("Smith", 38000, "Marketing", 39000.00),
("Smith", 55000, "Sales", 54000.00),
("Brown", 53000, "Sales", 54000.00),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.avg_salary,
),
)
def test_lag(self):
"""
Compute the difference between an employee's salary and the next
highest salary in the employee's department. Return None if the
employee has the lowest salary.
"""
qs = Employee.objects.annotate(
lag=Window(
expression=Lag(expression="salary", offset=1),
partition_by=F("department"),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("department", F("salary").asc(), F("name").asc())
self.assertQuerySetEqual(
qs,
[
("Williams", 37000, "Accounting", None),
("Jenson", 45000, "Accounting", 37000),
("Jones", 45000, "Accounting", 45000),
("Adams", 50000, "Accounting", 45000),
("Moore", 34000, "IT", None),
("Wilkinson", 60000, "IT", 34000),
("Johnson", 80000, "Management", None),
("Miller", 100000, "Management", 80000),
("Smith", 38000, "Marketing", None),
("Johnson", 40000, "Marketing", 38000),
("Brown", 53000, "Sales", None),
("Smith", 55000, "Sales", 53000),
],
transform=lambda row: (row.name, row.salary, row.department, row.lag),
)
def test_lag_decimalfield(self):
qs = Employee.objects.annotate(
lag=Window(
expression=Lag(expression="bonus", offset=1),
partition_by=F("department"),
order_by=[F("bonus").asc(), F("name").asc()],
)
).order_by("department", F("bonus").asc(), F("name").asc())
self.assertQuerySetEqual(
qs,
[
("Williams", 92.5, "Accounting", None),
("Jenson", 112.5, "Accounting", 92.5),
("Jones", 112.5, "Accounting", 112.5),
("Adams", 125, "Accounting", 112.5),
("Moore", 85, "IT", None),
("Wilkinson", 150, "IT", 85),
("Johnson", 200, "Management", None),
("Miller", 250, "Management", 200),
("Smith", 95, "Marketing", None),
("Johnson", 100, "Marketing", 95),
("Brown", 132.5, "Sales", None),
("Smith", 137.5, "Sales", 132.5),
],
transform=lambda row: (row.name, row.bonus, row.department, row.lag),
)
def test_order_by_decimalfield(self):
qs = Employee.objects.annotate(
rank=Window(expression=Rank(), order_by="bonus")
).order_by("-bonus", "id")
self.assertQuerySetEqual(
qs,
[
("Miller", 250.0, 12),
("Johnson", 200.0, 11),
("Wilkinson", 150.0, 10),
("Smith", 137.5, 9),
("Brown", 132.5, 8),
("Adams", 125.0, 7),
("Jones", 112.5, 5),
("Jenson", 112.5, 5),
("Johnson", 100.0, 4),
("Smith", 95.0, 3),
("Williams", 92.5, 2),
("Moore", 85.0, 1),
],
transform=lambda row: (row.name, float(row.bonus), row.rank),
)
def test_first_value(self):
qs = Employee.objects.annotate(
first_value=Window(
expression=FirstValue("salary"),
partition_by=F("department"),
order_by=F("hire_date").asc(),
)
).order_by("department", "hire_date")
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 45000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 45000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 45000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 60000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 38000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 55000),
],
lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.first_value,
),
)
def test_last_value(self):
qs = Employee.objects.annotate(
last_value=Window(
expression=LastValue("hire_date"),
partition_by=F("department"),
order_by=F("hire_date").asc(),
)
)
self.assertQuerySetEqual(
qs,
[
(
"Adams",
"Accounting",
datetime.date(2013, 7, 1),
50000,
datetime.date(2013, 7, 1),
),
(
"Jenson",
"Accounting",
datetime.date(2008, 4, 1),
45000,
datetime.date(2008, 4, 1),
),
(
"Jones",
"Accounting",
datetime.date(2005, 11, 1),
45000,
datetime.date(2005, 11, 1),
),
(
"Williams",
"Accounting",
datetime.date(2009, 6, 1),
37000,
datetime.date(2009, 6, 1),
),
(
"Moore",
"IT",
datetime.date(2013, 8, 1),
34000,
datetime.date(2013, 8, 1),
),
(
"Wilkinson",
"IT",
datetime.date(2011, 3, 1),
60000,
datetime.date(2011, 3, 1),
),
(
"Miller",
"Management",
datetime.date(2005, 6, 1),
100000,
datetime.date(2005, 6, 1),
),
(
"Johnson",
"Management",
datetime.date(2005, 7, 1),
80000,
datetime.date(2005, 7, 1),
),
(
"Johnson",
"Marketing",
datetime.date(2012, 3, 1),
40000,
datetime.date(2012, 3, 1),
),
(
"Smith",
"Marketing",
datetime.date(2009, 10, 1),
38000,
datetime.date(2009, 10, 1),
),
(
"Brown",
"Sales",
datetime.date(2009, 9, 1),
53000,
datetime.date(2009, 9, 1),
),
(
"Smith",
"Sales",
datetime.date(2007, 6, 1),
55000,
datetime.date(2007, 6, 1),
),
],
transform=lambda row: (
row.name,
row.department,
row.hire_date,
row.salary,
row.last_value,
),
ordered=False,
)
def test_function_list_of_values(self):
qs = (
Employee.objects.annotate(
lead=Window(
expression=Lead(expression="salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by="department",
)
)
.values_list("name", "salary", "department", "hire_date", "lead")
.order_by("department", F("hire_date").asc(), F("name").desc())
)
self.assertNotIn("GROUP BY", str(qs.query))
self.assertSequenceEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 37000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 50000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 40000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
)
def test_min_department(self):
"""An alternative way to specify a query for FirstValue."""
qs = Employee.objects.annotate(
min_salary=Window(
expression=Min("salary"),
partition_by=F("department"),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("department", "salary", "name")
self.assertQuerySetEqual(
qs,
[
("Williams", "Accounting", 37000, 37000),
("Jenson", "Accounting", 45000, 37000),
("Jones", "Accounting", 45000, 37000),
("Adams", "Accounting", 50000, 37000),
("Moore", "IT", 34000, 34000),
("Wilkinson", "IT", 60000, 34000),
("Johnson", "Management", 80000, 80000),
("Miller", "Management", 100000, 80000),
("Smith", "Marketing", 38000, 38000),
("Johnson", "Marketing", 40000, 38000),
("Brown", "Sales", 53000, 53000),
("Smith", "Sales", 55000, 53000),
],
lambda row: (row.name, row.department, row.salary, row.min_salary),
)
def test_max_per_year(self):
"""
Find the maximum salary awarded in the same year as the
employee was hired, regardless of the department.
"""
qs = Employee.objects.annotate(
max_salary_year=Window(
expression=Max("salary"),
order_by=ExtractYear("hire_date").asc(),
partition_by=ExtractYear("hire_date"),
)
).order_by(ExtractYear("hire_date"), "salary")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 45000, 2005, 100000),
("Johnson", "Management", 80000, 2005, 100000),
("Miller", "Management", 100000, 2005, 100000),
("Smith", "Sales", 55000, 2007, 55000),
("Jenson", "Accounting", 45000, 2008, 45000),
("Williams", "Accounting", 37000, 2009, 53000),
("Smith", "Marketing", 38000, 2009, 53000),
("Brown", "Sales", 53000, 2009, 53000),
("Wilkinson", "IT", 60000, 2011, 60000),
("Johnson", "Marketing", 40000, 2012, 40000),
("Moore", "IT", 34000, 2013, 50000),
("Adams", "Accounting", 50000, 2013, 50000),
],
lambda row: (
row.name,
row.department,
row.salary,
row.hire_date.year,
row.max_salary_year,
),
)
def test_cume_dist(self):
"""
Compute the cumulative distribution for the employees based on the
salary in increasing order. Equal to rank/total number of rows (12).
"""
qs = Employee.objects.annotate(
cume_dist=Window(
expression=CumeDist(),
order_by=F("salary").asc(),
)
).order_by("salary", "name")
# Round result of cume_dist because Oracle uses greater precision.
self.assertQuerySetEqual(
qs,
[
("Moore", "IT", 34000, 0.0833333333),
("Williams", "Accounting", 37000, 0.1666666667),
("Smith", "Marketing", 38000, 0.25),
("Johnson", "Marketing", 40000, 0.3333333333),
("Jenson", "Accounting", 45000, 0.5),
("Jones", "Accounting", 45000, 0.5),
("Adams", "Accounting", 50000, 0.5833333333),
("Brown", "Sales", 53000, 0.6666666667),
("Smith", "Sales", 55000, 0.75),
("Wilkinson", "IT", 60000, 0.8333333333),
("Johnson", "Management", 80000, 0.9166666667),
("Miller", "Management", 100000, 1),
],
lambda row: (
row.name,
row.department,
row.salary,
round(row.cume_dist, 10),
),
)
def test_nthvalue(self):
qs = Employee.objects.annotate(
nth_value=Window(
expression=NthValue(expression="salary", nth=2),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by=F("department"),
)
).order_by("department", "hire_date", "name")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", datetime.date(2005, 11, 1), 45000, None),
("Jenson", "Accounting", datetime.date(2008, 4, 1), 45000, 45000),
("Williams", "Accounting", datetime.date(2009, 6, 1), 37000, 45000),
("Adams", "Accounting", datetime.date(2013, 7, 1), 50000, 45000),
("Wilkinson", "IT", datetime.date(2011, 3, 1), 60000, None),
("Moore", "IT", datetime.date(2013, 8, 1), 34000, 34000),
("Miller", "Management", datetime.date(2005, 6, 1), 100000, None),
("Johnson", "Management", datetime.date(2005, 7, 1), 80000, 80000),
("Smith", "Marketing", datetime.date(2009, 10, 1), 38000, None),
("Johnson", "Marketing", datetime.date(2012, 3, 1), 40000, 40000),
("Smith", "Sales", datetime.date(2007, 6, 1), 55000, None),
("Brown", "Sales", datetime.date(2009, 9, 1), 53000, 53000),
],
lambda row: (
row.name,
row.department,
row.hire_date,
row.salary,
row.nth_value,
),
)
def test_lead(self):
"""
Determine what the next person hired in the same department makes.
Because the dataset is ambiguous, the name is also part of the
ordering clause. No default is provided, so None/NULL should be
returned.
"""
qs = Employee.objects.annotate(
lead=Window(
expression=Lead(expression="salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by="department",
)
).order_by("department", F("hire_date").asc(), F("name").desc())
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 37000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 50000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 40000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.lead,
),
)
def test_lead_offset(self):
"""
Determine what the person hired after someone makes. Due to
ambiguity, the name is also included in the ordering.
"""
qs = Employee.objects.annotate(
lead=Window(
expression=Lead("salary", offset=2),
partition_by="department",
order_by=F("hire_date").asc(),
)
)
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 37000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 50000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), None),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), None),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), None),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), None),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.lead,
),
ordered=False,
)
@skipUnlessDBFeature("supports_default_in_lead_lag")
def test_lead_default(self):
qs = Employee.objects.annotate(
lead_default=Window(
expression=Lead(expression="salary", offset=5, default=60000),
partition_by=F("department"),
order_by=F("department").asc(),
)
)
self.assertEqual(
list(qs.values_list("lead_default", flat=True).distinct()), [60000]
)
def test_ntile(self):
"""
Compute the group for each of the employees across the entire company,
based on how high the salary is for them. There are twelve employees
so it divides evenly into four groups.
"""
qs = Employee.objects.annotate(
ntile=Window(
expression=Ntile(num_buckets=4),
order_by="-salary",
)
).order_by("ntile", "-salary", "name")
self.assertQuerySetEqual(
qs,
[
("Miller", "Management", 100000, 1),
("Johnson", "Management", 80000, 1),
("Wilkinson", "IT", 60000, 1),
("Smith", "Sales", 55000, 2),
("Brown", "Sales", 53000, 2),
("Adams", "Accounting", 50000, 2),
("Jenson", "Accounting", 45000, 3),
("Jones", "Accounting", 45000, 3),
("Johnson", "Marketing", 40000, 3),
("Smith", "Marketing", 38000, 4),
("Williams", "Accounting", 37000, 4),
("Moore", "IT", 34000, 4),
],
lambda x: (x.name, x.department, x.salary, x.ntile),
)
def test_percent_rank(self):
"""
Calculate the percentage rank of the employees across the entire
company based on salary and name (in case of ambiguity).
"""
qs = Employee.objects.annotate(
percent_rank=Window(
expression=PercentRank(),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("percent_rank")
# Round to account for precision differences among databases.
self.assertQuerySetEqual(
qs,
[
("Moore", "IT", 34000, 0.0),
("Williams", "Accounting", 37000, 0.0909090909),
("Smith", "Marketing", 38000, 0.1818181818),
("Johnson", "Marketing", 40000, 0.2727272727),
("Jenson", "Accounting", 45000, 0.3636363636),
("Jones", "Accounting", 45000, 0.4545454545),
("Adams", "Accounting", 50000, 0.5454545455),
("Brown", "Sales", 53000, 0.6363636364),
("Smith", "Sales", 55000, 0.7272727273),
("Wilkinson", "IT", 60000, 0.8181818182),
("Johnson", "Management", 80000, 0.9090909091),
("Miller", "Management", 100000, 1.0),
],
transform=lambda row: (
row.name,
row.department,
row.salary,
round(row.percent_rank, 10),
),
)
def test_nth_returns_null(self):
"""
Find the nth row of the data set. None is returned since there are
fewer than 20 rows in the test data.
"""
qs = Employee.objects.annotate(
nth_value=Window(
expression=NthValue("salary", nth=20), order_by=F("salary").asc()
)
)
self.assertEqual(
list(qs.values_list("nth_value", flat=True).distinct()), [None]
)
def test_multiple_partitioning(self):
"""
Find the maximum salary for each department for people hired in the
same year.
"""
qs = Employee.objects.annotate(
max=Window(
expression=Max("salary"),
partition_by=[F("department"), F("hire_date__year")],
),
past_department_count=Count("past_departments"),
).order_by("department", "hire_date", "name")
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000, 0),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 45000, 0),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 37000, 0),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 50000, 0),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000, 0),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 34000, 0),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000, 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000, 0),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000, 0),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 40000, 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000, 0),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 53000, 0),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.max,
row.past_department_count,
),
)
def test_multiple_ordering(self):
"""
Accumulate the salaries over the departments based on hire_date.
If two people were hired on the same date in the same department, the
ordering clause will render a different result for those people.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="department",
order_by=[F("hire_date").asc(), F("name").asc()],
)
).order_by("department", "sum")
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 127000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 177000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 94000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 180000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 78000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 108000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
def test_empty_ordering(self):
"""
Explicit empty ordering makes little sense but it is something that
was historically allowed.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="department",
order_by=[],
)
).order_by("department", "sum")
self.assertEqual(len(qs), 12)
def test_related_ordering_with_count(self):
qs = Employee.objects.annotate(
department_sum=Window(
expression=Sum("salary"),
partition_by=F("department"),
order_by=["classification__code"],
)
)
self.assertEqual(qs.count(), 12)
def test_filter(self):
qs = Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
# Direct window reference.
self.assertQuerySetEqual(
qs.filter(department_salary_rank=1),
["Adams", "Wilkinson", "Miller", "Johnson", "Smith"],
lambda employee: employee.name,
)
# Through a combined expression containing a window.
self.assertQuerySetEqual(
qs.filter(department_avg_age_diff__gt=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
# Intersection of multiple windows.
self.assertQuerySetEqual(
qs.filter(department_salary_rank=1, department_avg_age_diff__gt=0),
["Miller"],
lambda employee: employee.name,
)
# Union of multiple windows.
self.assertQuerySetEqual(
qs.filter(Q(department_salary_rank=1) | Q(department_avg_age_diff__gt=0)),
[
"Adams",
"Jenson",
"Jones",
"Williams",
"Wilkinson",
"Miller",
"Johnson",
"Smith",
"Smith",
],
lambda employee: employee.name,
)
def test_filter_conditional_annotation(self):
qs = (
Employee.objects.annotate(
rank=Window(Rank(), partition_by="department", order_by="-salary"),
case_first_rank=Case(
When(rank=1, then=True),
default=False,
),
q_first_rank=Q(rank=1),
)
.order_by("name")
.values_list("name", flat=True)
)
for annotation in ["case_first_rank", "q_first_rank"]:
with self.subTest(annotation=annotation):
self.assertSequenceEqual(
qs.filter(**{annotation: True}),
["Adams", "Johnson", "Miller", "Smith", "Wilkinson"],
)
def test_filter_conditional_expression(self):
qs = (
Employee.objects.filter(
Exact(Window(Rank(), partition_by="department", order_by="-salary"), 1)
)
.order_by("name")
.values_list("name", flat=True)
)
self.assertSequenceEqual(
qs, ["Adams", "Johnson", "Miller", "Smith", "Wilkinson"]
)
def test_filter_column_ref_rhs(self):
qs = (
Employee.objects.annotate(
max_dept_salary=Window(Max("salary"), partition_by="department")
)
.filter(max_dept_salary=F("salary"))
.order_by("name")
.values_list("name", flat=True)
)
self.assertSequenceEqual(
qs, ["Adams", "Johnson", "Miller", "Smith", "Wilkinson"]
)
def test_filter_values(self):
qs = (
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
)
.order_by("department", "name")
.values_list(Upper("name"), flat=True)
)
self.assertSequenceEqual(
qs.filter(department_salary_rank=1),
["ADAMS", "WILKINSON", "MILLER", "JOHNSON", "SMITH"],
)
def test_filter_alias(self):
qs = Employee.objects.alias(
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
self.assertQuerySetEqual(
qs.filter(department_avg_age_diff__gt=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
def test_filter_select_related(self):
qs = (
Employee.objects.alias(
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
)
.select_related("classification")
.filter(department_avg_age_diff__gt=0)
.order_by("department", "name")
)
self.assertQuerySetEqual(
qs,
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
with self.assertNumQueries(0):
qs[0].classification
def test_exclude(self):
qs = Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
# Direct window reference.
self.assertQuerySetEqual(
qs.exclude(department_salary_rank__gt=1),
["Adams", "Wilkinson", "Miller", "Johnson", "Smith"],
lambda employee: employee.name,
)
# Through a combined expression containing a window.
self.assertQuerySetEqual(
qs.exclude(department_avg_age_diff__lte=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
# Union of multiple windows.
self.assertQuerySetEqual(
qs.exclude(
Q(department_salary_rank__gt=1) | Q(department_avg_age_diff__lte=0)
),
["Miller"],
lambda employee: employee.name,
)
# Intersection of multiple windows.
self.assertQuerySetEqual(
qs.exclude(department_salary_rank__gt=1, department_avg_age_diff__lte=0),
[
"Adams",
"Jenson",
"Jones",
"Williams",
"Wilkinson",
"Miller",
"Johnson",
"Smith",
"Smith",
],
lambda employee: employee.name,
)
def test_heterogeneous_filter(self):
qs = (
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
)
.order_by("name")
.values_list("name", flat=True)
)
# Heterogeneous filter between window function and aggregates pushes
# the WHERE clause to the QUALIFY outer query.
self.assertSequenceEqual(
qs.filter(
department_salary_rank=1, department__in=["Accounting", "Management"]
),
["Adams", "Miller"],
)
self.assertSequenceEqual(
qs.filter(
Q(department_salary_rank=1)
| Q(department__in=["Accounting", "Management"])
),
[
"Adams",
"Jenson",
"Johnson",
"Johnson",
"Jones",
"Miller",
"Smith",
"Wilkinson",
"Williams",
],
)
# Heterogeneous filter between window function and aggregates pushes
# the HAVING clause to the QUALIFY outer query.
qs = qs.annotate(past_department_count=Count("past_departments"))
self.assertSequenceEqual(
qs.filter(department_salary_rank=1, past_department_count__gte=1),
["Johnson", "Miller"],
)
self.assertSequenceEqual(
qs.filter(Q(department_salary_rank=1) | Q(past_department_count__gte=1)),
["Adams", "Johnson", "Miller", "Smith", "Wilkinson"],
)
def test_limited_filter(self):
"""
A query filtering against a window function have its limit applied
after window filtering takes place.
"""
self.assertQuerySetEqual(
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
)
)
.filter(department_salary_rank=1)
.order_by("department")[0:3],
["Adams", "Wilkinson", "Miller"],
lambda employee: employee.name,
)
def test_filter_count(self):
with CaptureQueriesContext(connection) as ctx:
self.assertEqual(
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
)
)
.filter(department_salary_rank=1)
.count(),
5,
)
self.assertEqual(len(ctx.captured_queries), 1)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 3)
self.assertNotIn("group by", sql)
@skipUnlessDBFeature("supports_frame_range_fixed_distance")
def test_range_n_preceding_and_following(self):
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=F("salary").asc(),
partition_by="department",
frame=ValueRange(start=-2, end=2),
)
)
self.assertIn("RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING", str(qs.query))
self.assertQuerySetEqual(
qs,
[
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 37000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 90000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 50000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 53000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 40000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 34000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 80000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
ordered=False,
)
@skipUnlessDBFeature(
"supports_frame_exclusion", "supports_frame_range_fixed_distance"
)
def test_range_exclude_current(self):
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=F("salary").asc(),
partition_by="department",
frame=ValueRange(end=2, exclusion=WindowFrameExclusion.CURRENT_ROW),
)
).order_by("department", "salary")
self.assertIn(
"RANGE BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING EXCLUDE CURRENT ROW",
str(qs.query),
)
self.assertQuerySetEqual(
qs,
[
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), None),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 82000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 82000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 127000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), None),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 38000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
def test_range_unbound(self):
"""
A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="age",
order_by=[F("age").asc()],
frame=ValueRange(start=None, end=None),
)
).order_by("department", "hire_date", "name")
self.assertIn(
"RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING", str(qs.query)
)
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 45000, datetime.date(2005, 11, 1), 165000),
("Jenson", "Accounting", 45000, datetime.date(2008, 4, 1), 165000),
("Williams", "Accounting", 37000, datetime.date(2009, 6, 1), 165000),
("Adams", "Accounting", 50000, datetime.date(2013, 7, 1), 130000),
("Wilkinson", "IT", 60000, datetime.date(2011, 3, 1), 194000),
("Moore", "IT", 34000, datetime.date(2013, 8, 1), 194000),
("Miller", "Management", 100000, datetime.date(2005, 6, 1), 194000),
("Johnson", "Management", 80000, datetime.date(2005, 7, 1), 130000),
("Smith", "Marketing", 38000, datetime.date(2009, 10, 1), 165000),
("Johnson", "Marketing", 40000, datetime.date(2012, 3, 1), 148000),
("Smith", "Sales", 55000, datetime.date(2007, 6, 1), 148000),
("Brown", "Sales", 53000, datetime.date(2009, 9, 1), 148000),
],
transform=lambda row: (
row.name,
row.department,
row.salary,
row.hire_date,
row.sum,
),
)
def test_subquery_row_range_rank(self):
qs = Employee.objects.annotate(
highest_avg_salary_date=Subquery(
Employee.objects.filter(
department=OuterRef("department"),
)
.annotate(
avg_salary=Window(
expression=Avg("salary"),
order_by=[F("hire_date").asc()],
frame=RowRange(start=-1, end=1),
),
)
.order_by("-avg_salary", "hire_date")
.values("hire_date")[:1],
),
).order_by("department", "name")
self.assertQuerySetEqual(
qs,
[
("Adams", "Accounting", datetime.date(2005, 11, 1)),
("Jenson", "Accounting", datetime.date(2005, 11, 1)),
("Jones", "Accounting", datetime.date(2005, 11, 1)),
("Williams", "Accounting", datetime.date(2005, 11, 1)),
("Moore", "IT", datetime.date(2011, 3, 1)),
("Wilkinson", "IT", datetime.date(2011, 3, 1)),
("Johnson", "Management", datetime.date(2005, 6, 1)),
("Miller", "Management", datetime.date(2005, 6, 1)),
("Johnson", "Marketing", datetime.date(2009, 10, 1)),
("Smith", "Marketing", datetime.date(2009, 10, 1)),
("Brown", "Sales", datetime.date(2007, 6, 1)),
("Smith", "Sales", datetime.date(2007, 6, 1)),
],
transform=lambda row: (
row.name,
row.department,
row.highest_avg_salary_date,
),
)
@skipUnlessDBFeature("supports_frame_exclusion")
def test_row_range_rank_exclude_current_row(self):
qs = Employee.objects.annotate(
avg_salary_cohort=Window(
expression=Avg("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(
start=-1, end=1, exclusion=WindowFrameExclusion.CURRENT_ROW
),
)
).order_by("hire_date")
self.assertIn(
"ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW",
str(qs.query),
)
self.assertQuerySetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 72500),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 67500),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 46000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 49000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 37500),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 56500),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 39000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 55000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 37000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 50000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.avg_salary_cohort,
),
)
@skipUnlessDBFeature("supports_frame_exclusion")
def test_row_range_rank_exclude_group(self):
qs = Employee.objects.annotate(
avg_salary_cohort=Window(
expression=Avg("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=-1, end=1, exclusion=WindowFrameExclusion.GROUP),
)
).order_by("hire_date")
self.assertIn(
"ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE GROUP",
str(qs.query),
)
self.assertQuerySetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 72500),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 67500),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 46000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 49000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 37500),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 56500),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 39000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 55000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 37000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 50000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.avg_salary_cohort,
),
)
@skipUnlessDBFeature("supports_frame_exclusion")
def test_row_range_rank_exclude_ties(self):
qs = Employee.objects.annotate(
sum_salary_cohort=Window(
expression=Sum("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=-1, end=1, exclusion=WindowFrameExclusion.TIES),
)
).order_by("hire_date")
self.assertIn(
"ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE TIES",
str(qs.query),
)
self.assertQuerySetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 180000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 225000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 180000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 145000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 137000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 135000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 128000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 151000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 138000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 150000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 124000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 84000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum_salary_cohort,
),
)
@skipUnlessDBFeature("supports_frame_exclusion")
def test_row_range_rank_exclude_no_others(self):
qs = Employee.objects.annotate(
sum_salary_cohort=Window(
expression=Sum("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(
start=-1, end=1, exclusion=WindowFrameExclusion.NO_OTHERS
),
)
).order_by("hire_date")
self.assertIn(
"ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE NO OTHERS",
str(qs.query),
)
self.assertQuerySetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 180000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 225000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 180000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 145000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 137000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 135000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 128000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 151000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 138000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 150000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 124000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 84000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum_salary_cohort,
),
)
@skipIfDBFeature("supports_frame_exclusion")
def test_unsupported_frame_exclusion_raises_error(self):
msg = "This backend does not support window frame exclusions."
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
avg_salary_cohort=Window(
expression=Avg("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(
start=-1, end=1, exclusion=WindowFrameExclusion.CURRENT_ROW
),
)
)
)
@skipUnlessDBFeature("supports_frame_exclusion")
def test_invalid_frame_exclusion_value_raises_error(self):
msg = "RowRange.exclusion must be a WindowFrameExclusion instance."
with self.assertRaisesMessage(TypeError, msg):
Employee.objects.annotate(
avg_salary_cohort=Window(
expression=Avg("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=-1, end=1, exclusion="RUBBISH"),
)
)
def test_row_range_rank(self):
"""
A query with ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING.
The resulting sum is the sum of the three next (if they exist) and all
previous rows according to the ordering clause.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=None, end=3),
)
).order_by("sum", "hire_date")
self.assertIn("ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING", str(qs.query))
self.assertQuerySetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 280000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 325000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 362000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 415000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 453000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 513000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 553000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 603000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 637000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 637000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 637000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 637000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
def test_row_range_both_preceding(self):
"""
A query with ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING.
The resulting sum is the sum of the previous two (if they exist) rows
according to the ordering clause.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=-2, end=-1),
)
).order_by("hire_date")
self.assertIn("ROWS BETWEEN 2 PRECEDING AND 1 PRECEDING", str(qs.query))
self.assertQuerySetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), None),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 180000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 125000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 100000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 100000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 82000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 90000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 91000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 98000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 100000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 90000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
def test_row_range_both_following(self):
"""
A query with ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING.
The resulting sum is the sum of the following two (if they exist) rows
according to the ordering clause.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=1, end=2),
)
).order_by("hire_date")
self.assertIn("ROWS BETWEEN 1 FOLLOWING AND 2 FOLLOWING", str(qs.query))
self.assertQuerySetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 125000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 100000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 82000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 91000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 98000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 100000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 90000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 84000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_distinct_window_function(self):
"""
Window functions are not aggregates, and hence a query to filter out
duplicates may be useful.
"""
qs = (
Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by=ExtractYear("hire_date"),
order_by=ExtractYear("hire_date"),
),
year=ExtractYear("hire_date"),
)
.filter(sum__gte=45000)
.values("year", "sum")
.distinct("year")
.order_by("year")
)
results = [
{"year": 2005, "sum": 225000},
{"year": 2007, "sum": 55000},
{"year": 2008, "sum": 45000},
{"year": 2009, "sum": 128000},
{"year": 2011, "sum": 60000},
{"year": 2013, "sum": 84000},
]
for idx, val in zip(range(len(results)), results):
with self.subTest(result=val):
self.assertEqual(qs[idx], val)
def test_fail_update(self):
"""Window expressions can't be used in an UPDATE statement."""
msg = (
"Window expressions are not allowed in this query (salary=<Window: "
"Max(Col(expressions_window_employee, expressions_window.Employee.salary)) "
"OVER (PARTITION BY Col(expressions_window_employee, "
"expressions_window.Employee.department))>)."
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.filter(department="Management").update(
salary=Window(expression=Max("salary"), partition_by="department"),
)
def test_fail_insert(self):
"""Window expressions can't be used in an INSERT statement."""
msg = (
"Window expressions are not allowed in this query (salary=<Window: "
"Sum(Value(10000)) OVER ()"
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.create(
name="Jameson",
department="Management",
hire_date=datetime.date(2007, 7, 1),
salary=Window(expression=Sum(Value(10000))),
)
def test_window_expression_within_subquery(self):
subquery_qs = Employee.objects.annotate(
highest=Window(
FirstValue("id"),
partition_by=F("department"),
order_by=F("salary").desc(),
)
).values("highest")
highest_salary = Employee.objects.filter(pk__in=subquery_qs)
self.assertCountEqual(
highest_salary.values("department", "salary"),
[
{"department": "Accounting", "salary": 50000},
{"department": "Sales", "salary": 55000},
{"department": "Marketing", "salary": 40000},
{"department": "IT", "salary": 60000},
{"department": "Management", "salary": 100000},
],
)
@skipUnlessDBFeature("supports_json_field")
def test_key_transform(self):
Detail.objects.bulk_create(
[
Detail(value={"department": "IT", "name": "Smith", "salary": 37000}),
Detail(value={"department": "IT", "name": "Nowak", "salary": 32000}),
Detail(value={"department": "HR", "name": "Brown", "salary": 50000}),
Detail(value={"department": "HR", "name": "Smith", "salary": 55000}),
Detail(value={"department": "PR", "name": "Moore", "salary": 90000}),
]
)
tests = [
(KeyTransform("department", "value"), KeyTransform("name", "value")),
(F("value__department"), F("value__name")),
]
for partition_by, order_by in tests:
with self.subTest(partition_by=partition_by, order_by=order_by):
qs = Detail.objects.annotate(
department_sum=Window(
expression=Sum(
Cast(
KeyTextTransform("salary", "value"),
output_field=IntegerField(),
)
),
partition_by=[partition_by],
order_by=[order_by],
)
).order_by("value__department", "department_sum")
self.assertQuerySetEqual(
qs,
[
("Brown", "HR", 50000, 50000),
("Smith", "HR", 55000, 105000),
("Nowak", "IT", 32000, 32000),
("Smith", "IT", 37000, 69000),
("Moore", "PR", 90000, 90000),
],
lambda entry: (
entry.value["name"],
entry.value["department"],
entry.value["salary"],
entry.department_sum,
),
)
def test_invalid_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got '3'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(start=3),
)
)
)
def test_invalid_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got '-3'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end=-3),
)
)
)
def test_invalid_start_end_value_for_row_range(self):
msg = "start cannot be greater than end."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=RowRange(start=4, end=-3),
)
)
)
def test_invalid_type_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end="a"),
)
)
)
def test_invalid_type_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
frame=ValueRange(start="a"),
)
)
)
def test_invalid_type_end_row_range(self):
msg = "end argument must be an integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
frame=RowRange(end="a"),
)
)
)
@skipUnlessDBFeature("only_supports_unbounded_with_preceding_and_following")
def test_unsupported_range_frame_start(self):
msg = (
"%s only supports UNBOUNDED together with PRECEDING and FOLLOWING."
% connection.display_name
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(start=-1),
)
)
)
@skipUnlessDBFeature("only_supports_unbounded_with_preceding_and_following")
def test_unsupported_range_frame_end(self):
msg = (
"%s only supports UNBOUNDED together with PRECEDING and FOLLOWING."
% connection.display_name
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end=1),
)
)
)
def test_invalid_type_start_row_range(self):
msg = "start argument must be an integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=RowRange(start="a"),
)
)
)
def test_invalid_filter(self):
msg = (
"Heterogeneous disjunctive predicates against window functions are not "
"implemented when performing conditional aggregation."
)
qs = Employee.objects.annotate(
window=Window(Rank()),
past_dept_cnt=Count("past_departments"),
)
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs.filter(Q(window=1) | Q(department="Accounting")))
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs.exclude(window=1, department="Accounting"))
| WindowFunctionTests |
python | mwaskom__seaborn | tests/_core/test_properties.py | {
"start": 1402,
"end": 1883
} | class ____(DataFixtures):
def test_bad_scale_arg_str(self, num_vector):
err = "Unknown magic arg for x scale: 'xxx'."
with pytest.raises(ValueError, match=err):
Coordinate("x").infer_scale("xxx", num_vector)
def test_bad_scale_arg_type(self, cat_vector):
err = "Magic arg for x scale must be str, not list."
with pytest.raises(TypeError, match=err):
Coordinate("x").infer_scale([1, 2, 3], cat_vector)
| TestCoordinate |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B019.py | {
"start": 1989,
"end": 2148
} | class ____(enum.Enum):
ONE = enum.auto()
TWO = enum.auto()
@functools.cache
def bar(self, arg: str) -> str:
return f"{self} - {arg}"
| Foo |
python | huggingface__transformers | tests/models/clipseg/test_modeling_clipseg.py | {
"start": 4535,
"end": 7280
} | class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as CLIPSeg does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (CLIPSegVisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = CLIPSegVisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=CLIPSegVisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="CLIPSeg does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "CIDAS/clipseg-rd64-refined"
model = CLIPSegVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| CLIPSegVisionModelTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/overrides.py | {
"start": 3763,
"end": 3946
} | class ____(TooManyOverrides):
def return_source(self):
return _test_source()
def call_too_many_overrides(t: TooManyOverrides):
t.return_source()
| TooManyOverridesChild3 |
python | wandb__wandb | wandb/vendor/pygments/lexers/business.py | {
"start": 24074,
"end": 25361
} | class ____(RegexLexer):
"""
Lexer for `GoodData-CL
<http://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/\
com/gooddata/processor/COMMANDS.txt>`_
script files.
.. versionadded:: 1.4
"""
name = 'GoodData-CL'
aliases = ['gooddata-cl']
filenames = ['*.gdc']
mimetypes = ['text/x-gooddata-cl']
flags = re.IGNORECASE
tokens = {
'root': [
# Comments
(r'#.*', Comment.Single),
# Function call
(r'[a-z]\w*', Name.Function),
# Argument list
(r'\(', Punctuation, 'args-list'),
# Punctuation
(r';', Punctuation),
# Space is not significant
(r'\s+', Text)
],
'args-list': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'[a-z]\w*', Name.Variable),
(r'=', Operator),
(r'"', String, 'string-literal'),
(r'[0-9]+(?:\.[0-9]+)?(?:e[+-]?[0-9]{1,3})?', Number),
# Space is not significant
(r'\s', Text)
],
'string-literal': [
(r'\\[tnrfbae"\\]', String.Escape),
(r'"', String, '#pop'),
(r'[^\\"]+', String)
]
}
| GoodDataCLLexer |
python | altair-viz__altair | altair/expr/core.py | {
"start": 6629,
"end": 7269
} | class ____(OperatorMixin, SchemaBase):
"""
Expression.
Base object for enabling build-up of Javascript expressions using
a Python syntax. Calling ``repr(obj)`` will return a Javascript
representation of the object and the operations it encodes.
"""
_schema = {"type": "string"}
def to_dict(self, *args, **kwargs):
return repr(self)
def __setattr__(self, attr, val) -> None:
# We don't need the setattr magic defined in SchemaBase
return object.__setattr__(self, attr, val)
# item access
def __getitem__(self, val):
return GetItemExpression(self, val)
| Expression |
python | numba__numba | numba/core/options.py | {
"start": 1830,
"end": 2999
} | class ____:
"""Defines how user-level target options are mapped to the target flags.
"""
nopython = _mapping("enable_pyobject", operator.not_)
forceobj = _mapping("force_pyobject")
looplift = _mapping("enable_looplift")
_nrt = _mapping("nrt")
debug = _mapping("debuginfo")
boundscheck = _mapping("boundscheck")
nogil = _mapping("release_gil")
writable_args = _mapping("writable_args")
no_rewrites = _mapping("no_rewrites")
no_cpython_wrapper = _mapping("no_cpython_wrapper")
no_cfunc_wrapper = _mapping("no_cfunc_wrapper")
parallel = _mapping("auto_parallel")
fastmath = _mapping("fastmath")
error_model = _mapping("error_model")
inline = _mapping("inline")
forceinline = _mapping("forceinline")
_dbg_extend_lifetimes = _mapping("dbg_extend_lifetimes")
_dbg_optnone = _mapping("dbg_optnone")
def include_default_options(*args):
"""Returns a mixin class with a subset of the options
Parameters
----------
*args : str
Option names to include.
"""
glbs = {k: getattr(DefaultOptions, k) for k in args}
return type("OptionMixins", (), glbs)
| DefaultOptions |
python | scipy__scipy | benchmarks/benchmarks/interpolate.py | {
"start": 16770,
"end": 17093
} | class ____(Benchmark):
def setup(self):
self.z = np.exp(np.linspace(-0.5, 0.5 + 15j*np.pi, num=1000))
self.pts = np.linspace(-1, 1, num=1000)
def time_AAA(self):
r = interpolate.AAA(self.z, np.tan(np.pi*self.z/2))
r(self.pts)
r.poles()
r.residues()
r.roots()
| AAA |
python | ansible__ansible | test/integration/targets/ansible-doc/filter_plugins/other.py | {
"start": 228,
"end": 484
} | class ____(object):
""" Ansible core jinja2 filters """
def filters(self):
return {
'donothing': donothing,
'nodocs': donothing,
'split': donothing,
'b64decode': donothing,
}
| FilterModule |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super1.py | {
"start": 1652,
"end": 1869
} | class ____(NamedTuple("NT1", [("y", int), ("x", int)])):
def method(self, v: tuple[int, int]):
cls = type(self)
v = super().__new__(cls, *v)
return type(self)(self.y + v.y, self.x + v.x)
| ClassH |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 7332,
"end": 7446
} | class ____(AirflowException):
"""Raise when a file type is not supported."""
| AirflowUnsupportedFileTypeException |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.