language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numba__numba | numba/tests/test_listobject.py | {
"start": 38574,
"end": 39513
} | class ____(MemoryLeakMixin, TestCase):
"""Test list iter. """
def test_list_iter(self):
@njit
def foo(items):
l = listobject.new_list(int32)
l.extend(items)
# use a simple sum to check this w/o having to return a list
r = 0
for j in l:
r += j
return r
items = (1, 2, 3, 4)
self.assertEqual(
foo(items),
sum(items)
)
def test_list_iter_self_mutation(self):
self.disable_leak_check()
@njit
def foo():
l = listobject.new_list(int32)
l.extend((1, 2, 3, 4))
for i in l:
l.append(i)
with self.assertRaises(RuntimeError) as raises:
foo()
self.assertIn(
'list was mutated during iteration'.format(**locals()),
str(raises.exception),
)
| TestIter |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 34824,
"end": 37057
} | class ____(Response):
"""
Response of events.add_batch endpoint.
:param added:
:type added: int
:param errors:
:type errors: int
:param errors_info:
:type errors_info: dict
"""
_service = "events"
_action = "add_batch"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"added": {"type": ["integer", "null"]},
"errors": {"type": ["integer", "null"]},
"errors_info": {"type": ["object", "null"]},
},
"type": "object",
}
def __init__(
self,
added: Optional[int] = None,
errors: Optional[int] = None,
errors_info: Optional[dict] = None,
**kwargs: Any
) -> None:
super(AddBatchResponse, self).__init__(**kwargs)
self.added = added
self.errors = errors
self.errors_info = errors_info
@schema_property("added")
def added(self) -> Optional[int]:
return self._property_added
@added.setter
def added(self, value: Optional[int]) -> None:
if value is None:
self._property_added = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "added", six.integer_types)
self._property_added = value
@schema_property("errors")
def errors(self) -> Optional[int]:
return self._property_errors
@errors.setter
def errors(self, value: Optional[int]) -> None:
if value is None:
self._property_errors = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "errors", six.integer_types)
self._property_errors = value
@schema_property("errors_info")
def errors_info(self) -> Optional[dict]:
return self._property_errors_info
@errors_info.setter
def errors_info(self, value: Optional[dict]) -> None:
if value is None:
self._property_errors_info = None
return
self.assert_isinstance(value, "errors_info", (dict,))
self._property_errors_info = value
| AddBatchResponse |
python | getsentry__sentry | src/sentry/api/endpoints/organization_stats_v2.py | {
"start": 6635,
"end": 6816
} | class ____(TypedDict):
start: str
end: str
intervals: list[str]
groups: list[_StatsGroup]
@extend_schema(tags=["Organizations"])
@region_silo_endpoint
| StatsApiResponse |
python | kubernetes-client__python | kubernetes/client/models/authentication_v1_token_request.py | {
"start": 383,
"end": 7746
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1TokenRequestSpec',
'status': 'V1TokenRequestStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""AuthenticationV1TokenRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this AuthenticationV1TokenRequest. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this AuthenticationV1TokenRequest.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this AuthenticationV1TokenRequest. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this AuthenticationV1TokenRequest. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this AuthenticationV1TokenRequest.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this AuthenticationV1TokenRequest. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this AuthenticationV1TokenRequest. # noqa: E501
:return: The metadata of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this AuthenticationV1TokenRequest.
:param metadata: The metadata of this AuthenticationV1TokenRequest. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this AuthenticationV1TokenRequest. # noqa: E501
:return: The spec of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: V1TokenRequestSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this AuthenticationV1TokenRequest.
:param spec: The spec of this AuthenticationV1TokenRequest. # noqa: E501
:type: V1TokenRequestSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
@property
def status(self):
"""Gets the status of this AuthenticationV1TokenRequest. # noqa: E501
:return: The status of this AuthenticationV1TokenRequest. # noqa: E501
:rtype: V1TokenRequestStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this AuthenticationV1TokenRequest.
:param status: The status of this AuthenticationV1TokenRequest. # noqa: E501
:type: V1TokenRequestStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AuthenticationV1TokenRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AuthenticationV1TokenRequest):
return True
return self.to_dict() != other.to_dict()
| AuthenticationV1TokenRequest |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/sensors/wasb.py | {
"start": 1319,
"end": 4326
} | class ____(BaseSensorOperator):
"""
Waits for a blob to arrive on Azure Blob Storage.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param wasb_conn_id: Reference to the :ref:`wasb connection <howto/connection:wasb>`.
:param check_options: Optional keyword arguments that
`WasbHook.check_for_blob()` takes.
:param deferrable: Run sensor in the deferrable mode.
:param public_read: whether an anonymous public read access should be used. Default is False
"""
template_fields: Sequence[str] = ("container_name", "blob_name")
def __init__(
self,
*,
container_name: str,
blob_name: str,
wasb_conn_id: str = "wasb_default",
check_options: dict | None = None,
public_read: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
if check_options is None:
check_options = {}
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.blob_name = blob_name
self.check_options = check_options
self.public_read = public_read
self.deferrable = deferrable
def poke(self, context: Context):
self.log.info("Poking for blob: %s\n in wasb://%s", self.blob_name, self.container_name)
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
return hook.check_for_blob(self.container_name, self.blob_name, **self.check_options)
def execute(self, context: Context) -> None:
"""
Poll for state of the job run.
In deferrable mode, the polling is deferred to the triggerer. Otherwise
the sensor waits synchronously.
"""
if not self.deferrable:
super().execute(context=context)
else:
if not self.poke(context=context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=WasbBlobSensorTrigger(
container_name=self.container_name,
blob_name=self.blob_name,
wasb_conn_id=self.wasb_conn_id,
public_read=self.public_read,
poke_interval=self.poke_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, str]) -> None:
"""
Return immediately - callback for when the trigger fires.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event:
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(event["message"])
else:
raise AirflowException("Did not receive valid event from the triggerer")
| WasbBlobSensor |
python | doocs__leetcode | solution/0300-0399/0324.Wiggle Sort II/Solution2.py | {
"start": 0,
"end": 552
} | class ____:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
bucket = [0] * 5001
for v in nums:
bucket[v] += 1
n = len(nums)
j = 5000
for i in range(1, n, 2):
while bucket[j] == 0:
j -= 1
nums[i] = j
bucket[j] -= 1
for i in range(0, n, 2):
while bucket[j] == 0:
j -= 1
nums[i] = j
bucket[j] -= 1
| Solution |
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 8327,
"end": 9215
} | class ____(BaseStringType):
@classmethod
def convert_from_xml( # pyright: ignore[reportIncompatibleMethodOverride]
cls, str_value: str
) -> RGBColor | str:
if str_value == "auto":
return ST_HexColorAuto.AUTO
return RGBColor.from_string(str_value)
@classmethod
def convert_to_xml( # pyright: ignore[reportIncompatibleMethodOverride]
cls, value: RGBColor
) -> str:
"""Keep alpha hex numerals all uppercase just for consistency."""
# expecting 3-tuple of ints in range 0-255
return "%02X%02X%02X" % value
@classmethod
def validate(cls, value: Any) -> None:
# must be an RGBColor object ---
if not isinstance(value, RGBColor):
raise ValueError(
"rgb color value must be RGBColor object, got %s %s" % (type(value), value)
)
| ST_HexColor |
python | google__pytype | pytype/overlays/overlay.py | {
"start": 418,
"end": 3663
} | class ____(abstract.Module):
"""A layer between pytype and a module's pytd definition.
An overlay pretends to be a module, but provides members that generate extra
typing information that cannot be expressed in a pytd file. For example,
collections.namedtuple is a factory method that generates class definitions
at runtime. An overlay is needed for Pytype to generate these classes.
An Overlay will typically import its underlying module in its __init__, e.g.
by calling ctx.loader.import_name(). Due to this, Overlays should only be used
when their underlying module is imported by the Python script being analyzed!
A subclass of Overlay should have an __init__ with the signature:
def __init__(self, ctx)
Attributes:
real_module: An abstract.Module wrapping the AST for the underlying module.
"""
def __init__(self, ctx, name, member_map, ast):
"""Initialize the overlay.
Args:
ctx: Instance of context.Context.
name: A string containing the name of the underlying module.
member_map: Dict of str to abstract.BaseValues that provide type
information not available in the underlying module.
ast: An pytd.TypeDeclUnit containing the AST for the underlying module.
Used to access type information for members of the module that are not
explicitly provided by the overlay.
"""
super().__init__(ctx, name, member_map, ast)
self.real_module = ctx.convert.constant_to_value(
ast, subst=datatypes.AliasingDict(), node=ctx.root_node
)
def _convert_member(
self,
name: str,
member: BuilderType,
subst: dict[str, cfg.Variable] | None = None,
) -> cfg.Variable:
val = member(self.ctx, self.name)
val.module = self.name
return val.to_variable(self.ctx.root_node)
def get_module(self, name):
"""Returns the abstract.Module for the given name."""
if name in self._member_map:
return self
else:
return self.real_module
def items(self):
items = super().items()
items += [
(name, item)
for name, item in self.real_module.items()
if name not in self._member_map
]
return items
def maybe_load_member(self, member_name):
"""Try to lazily load a member by name."""
# We may encounter errors such as [recursion-error] from recursive loading
# of a TypingContainer or [not-supported-yet] for a typing feature in a
# too-low version. If there are errors, we discard the result.
with self.ctx.errorlog.checkpoint() as record:
member_var = self.load_lazy_attribute(member_name, store=False)
member = abstract_utils.get_atomic_value(member_var)
# AnnotationClass is a placeholder used in the construction of parameterized
# types, not a real type.
if record.errors or isinstance(member, abstract.AnnotationClass):
return None
self.members[member_name] = member_var
return member
def add_name(name, builder):
"""Turns (name, ctx, module) -> val signatures into (ctx, module) -> val."""
return lambda ctx, module: builder(name, ctx, module)
def drop_module(builder):
"""Turns (ctx) -> val signatures into (ctx, module) -> val."""
return lambda ctx, module: builder(ctx)
| Overlay |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 844012,
"end": 845008
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"created_at",
"email",
"invitation_type",
"invitee",
"inviter",
"organization",
"role",
)
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
email = sgqlc.types.Field(String, graphql_name="email")
invitation_type = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationInvitationType), graphql_name="invitationType"
)
invitee = sgqlc.types.Field("User", graphql_name="invitee")
inviter = sgqlc.types.Field(sgqlc.types.non_null("User"), graphql_name="inviter")
organization = sgqlc.types.Field(
sgqlc.types.non_null(Organization), graphql_name="organization"
)
role = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationInvitationRole), graphql_name="role"
)
| OrganizationInvitation |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/extra/_array_helpers.py | {
"start": 1333,
"end": 9855
} | class ____(NamedTuple):
input_shapes: tuple[Shape, ...]
result_shape: Shape
@check_function
def check_argument(condition, fail_message, *f_args, **f_kwargs):
if not condition:
raise InvalidArgument(fail_message.format(*f_args, **f_kwargs))
@check_function
def order_check(name, floor, min_, max_):
if floor > min_:
raise InvalidArgument(f"min_{name} must be at least {floor} but was {min_}")
if min_ > max_:
raise InvalidArgument(f"min_{name}={min_} is larger than max_{name}={max_}")
# 32 is a dimension limit specific to NumPy, and does not necessarily apply to
# other array/tensor libraries. Historically these strategies were built for the
# NumPy extra, so it's nice to keep these limits, and it's seemingly unlikely
# someone would want to generate >32 dim arrays anyway.
# See https://github.com/HypothesisWorks/hypothesis/pull/3067.
NDIM_MAX = 32
@check_function
def check_valid_dims(dims, name):
if dims > NDIM_MAX:
raise InvalidArgument(
f"{name}={dims}, but Hypothesis does not support arrays with "
f"more than {NDIM_MAX} dimensions"
)
@defines_strategy()
def array_shapes(
*,
min_dims: int = 1,
max_dims: int | None = None,
min_side: int = 1,
max_side: int | None = None,
) -> st.SearchStrategy[Shape]:
"""Return a strategy for array shapes (tuples of int >= 1).
* ``min_dims`` is the smallest length that the generated shape can possess.
* ``max_dims`` is the largest length that the generated shape can possess,
defaulting to ``min_dims + 2``.
* ``min_side`` is the smallest size that a dimension can possess.
* ``max_side`` is the largest size that a dimension can possess,
defaulting to ``min_side + 5``.
"""
check_type(int, min_dims, "min_dims")
check_type(int, min_side, "min_side")
check_valid_dims(min_dims, "min_dims")
if max_dims is None:
max_dims = min(min_dims + 2, NDIM_MAX)
check_type(int, max_dims, "max_dims")
check_valid_dims(max_dims, "max_dims")
if max_side is None:
max_side = min_side + 5
check_type(int, max_side, "max_side")
order_check("dims", 0, min_dims, max_dims)
order_check("side", 0, min_side, max_side)
return st.lists(
st.integers(min_side, max_side), min_size=min_dims, max_size=max_dims
).map(tuple)
@defines_strategy()
def valid_tuple_axes(
ndim: int,
*,
min_size: int = 0,
max_size: int | None = None,
) -> st.SearchStrategy[tuple[int, ...]]:
"""All tuples will have a length >= ``min_size`` and <= ``max_size``. The default
value for ``max_size`` is ``ndim``.
Examples from this strategy shrink towards an empty tuple, which render most
sequential functions as no-ops.
The following are some examples drawn from this strategy.
.. code-block:: pycon
>>> [valid_tuple_axes(3).example() for i in range(4)]
[(-3, 1), (0, 1, -1), (0, 2), (0, -2, 2)]
``valid_tuple_axes`` can be joined with other strategies to generate
any type of valid axis object, i.e. integers, tuples, and ``None``:
.. code-block:: python
any_axis_strategy = none() | integers(-ndim, ndim - 1) | valid_tuple_axes(ndim)
"""
check_type(int, ndim, "ndim")
check_type(int, min_size, "min_size")
if max_size is None:
max_size = ndim
check_type(int, max_size, "max_size")
order_check("size", 0, min_size, max_size)
check_valid_interval(max_size, ndim, "max_size", "ndim")
axes = st.integers(0, max(0, 2 * ndim - 1)).map(
lambda x: x if x < ndim else x - 2 * ndim
)
return st.lists(
axes, min_size=min_size, max_size=max_size, unique_by=lambda x: x % ndim
).map(tuple)
@defines_strategy()
def broadcastable_shapes(
shape: Shape,
*,
min_dims: int = 0,
max_dims: int | None = None,
min_side: int = 1,
max_side: int | None = None,
) -> st.SearchStrategy[Shape]:
"""Return a strategy for shapes that are broadcast-compatible with the
provided shape.
Examples from this strategy shrink towards a shape with length ``min_dims``.
The size of an aligned dimension shrinks towards size ``1``. The size of an
unaligned dimension shrink towards ``min_side``.
* ``shape`` is a tuple of integers.
* ``min_dims`` is the smallest length that the generated shape can possess.
* ``max_dims`` is the largest length that the generated shape can possess,
defaulting to ``max(len(shape), min_dims) + 2``.
* ``min_side`` is the smallest size that an unaligned dimension can possess.
* ``max_side`` is the largest size that an unaligned dimension can possess,
defaulting to 2 plus the size of the largest aligned dimension.
The following are some examples drawn from this strategy.
.. code-block:: pycon
>>> [broadcastable_shapes(shape=(2, 3)).example() for i in range(5)]
[(1, 3), (), (2, 3), (2, 1), (4, 1, 3), (3, )]
"""
check_type(tuple, shape, "shape")
check_type(int, min_side, "min_side")
check_type(int, min_dims, "min_dims")
check_valid_dims(min_dims, "min_dims")
strict_check = max_side is None or max_dims is None
if max_dims is None:
max_dims = min(max(len(shape), min_dims) + 2, NDIM_MAX)
check_type(int, max_dims, "max_dims")
check_valid_dims(max_dims, "max_dims")
if max_side is None:
max_side = max(shape[-max_dims:] + (min_side,)) + 2
check_type(int, max_side, "max_side")
order_check("dims", 0, min_dims, max_dims)
order_check("side", 0, min_side, max_side)
if strict_check:
dims = max_dims
bound_name = "max_dims"
else:
dims = min_dims
bound_name = "min_dims"
# check for unsatisfiable min_side
if not all(min_side <= s for s in shape[::-1][:dims] if s != 1):
raise InvalidArgument(
f"Given shape={shape}, there are no broadcast-compatible "
f"shapes that satisfy: {bound_name}={dims} and min_side={min_side}"
)
# check for unsatisfiable [min_side, max_side]
if not (
min_side <= 1 <= max_side or all(s <= max_side for s in shape[::-1][:dims])
):
raise InvalidArgument(
f"Given base_shape={shape}, there are no broadcast-compatible "
f"shapes that satisfy all of {bound_name}={dims}, "
f"min_side={min_side}, and max_side={max_side}"
)
if not strict_check:
# reduce max_dims to exclude unsatisfiable dimensions
for n, s in zip(range(max_dims), shape[::-1], strict=False):
if s < min_side and s != 1:
max_dims = n
break
if not (min_side <= 1 <= max_side or s <= max_side):
max_dims = n
break
return MutuallyBroadcastableShapesStrategy(
num_shapes=1,
base_shape=shape,
min_dims=min_dims,
max_dims=max_dims,
min_side=min_side,
max_side=max_side,
).map(lambda x: x.input_shapes[0])
# See https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html
# Implementation based on numpy.lib.function_base._parse_gufunc_signature
# with minor upgrades to handle numeric and optional dimensions. Examples:
#
# add (),()->() binary ufunc
# sum1d (i)->() reduction
# inner1d (i),(i)->() vector-vector multiplication
# matmat (m,n),(n,p)->(m,p) matrix multiplication
# vecmat (n),(n,p)->(p) vector-matrix multiplication
# matvec (m,n),(n)->(m) matrix-vector multiplication
# matmul (m?,n),(n,p?)->(m?,p?) combination of the four above
# cross1d (3),(3)->(3) cross product with frozen dimensions
#
# Note that while no examples of such usage are given, Numpy does allow
# generalised ufuncs that have *multiple output arrays*. This is not
# currently supported by Hypothesis - please contact us if you would use it!
#
# We are unsure if gufuncs allow frozen dimensions to be optional, but it's
# easy enough to support here - and so we will unless we learn otherwise.
_DIMENSION = r"\w+\??" # Note that \w permits digits too!
_SHAPE = rf"\((?:{_DIMENSION}(?:,{_DIMENSION}){{0,31}})?\)"
_ARGUMENT_LIST = f"{_SHAPE}(?:,{_SHAPE})*"
_SIGNATURE = rf"^{_ARGUMENT_LIST}->{_SHAPE}$"
_SIGNATURE_MULTIPLE_OUTPUT = rf"^{_ARGUMENT_LIST}->{_ARGUMENT_LIST}$"
| BroadcastableShapes |
python | joerick__pyinstrument | pyinstrument/frame.py | {
"start": 11409,
"end": 12625
} | class ____:
_frames: list[Frame]
_exit_frames: list[Frame] | None
def __init__(self, root: Frame):
self.root = root
self.id = str(uuid.uuid4())
self._frames = []
self._exit_frames = None
self.add_frame(root)
@property
def frames(self) -> Sequence[Frame]:
return tuple(self._frames)
def add_frame(self, frame: Frame):
if frame.group:
frame.group.remove_frame(frame)
self._frames.append(frame)
frame.group = self
def remove_frame(self, frame: Frame):
assert frame.group is self
self._frames.remove(frame)
frame.group = None
@property
def exit_frames(self):
"""
Returns a list of frames whose children include a frame outside of the group
"""
if self._exit_frames is None:
exit_frames: list[Frame] = []
for frame in self.frames:
if any(c.group != self for c in frame.children):
exit_frames.append(frame)
self._exit_frames = exit_frames
return self._exit_frames
def __repr__(self):
return "FrameGroup(len(frames)=%d)" % len(self.frames)
| FrameGroup |
python | kamyu104__LeetCode-Solutions | Python/minimum-difficulty-of-a-job-schedule.py | {
"start": 39,
"end": 845
} | class ____(object):
def minDifficulty(self, jobDifficulty, d):
"""
:type jobDifficulty: List[int]
:type d: int
:rtype: int
"""
if len(jobDifficulty) < d:
return -1
dp = [[float("inf")]*len(jobDifficulty) for _ in xrange(d)]
dp[0][0] = jobDifficulty[0]
for i in xrange(1, len(jobDifficulty)):
dp[0][i] = max(dp[0][i-1], jobDifficulty[i])
for i in xrange(1, d):
for j in xrange(i, len(jobDifficulty)):
curr_max = jobDifficulty[j]
for k in reversed(xrange(i, j+1)):
curr_max = max(curr_max, jobDifficulty[k])
dp[i][j] = min(dp[i][j], dp[i-1][k-1] + curr_max)
return dp[d-1][len(jobDifficulty)-1]
| Solution |
python | langchain-ai__langchain | libs/standard-tests/langchain_tests/conftest.py | {
"start": 329,
"end": 1673
} | class ____:
"""Custom serializer for VCR cassettes using YAML and gzip.
We're using a custom serializer to avoid the default yaml serializer
used by VCR, which is not designed to be safe for untrusted input.
This step is an extra precaution necessary because the cassette files
are in compressed YAML format, which makes it more difficult to inspect
their contents during development or debugging.
"""
@staticmethod
def serialize(cassette_dict: dict) -> bytes:
"""Convert cassette to YAML and compress it."""
cassette_dict["requests"] = [
{
"method": request.method,
"uri": request.uri,
"body": request.body,
"headers": {k: [v] for k, v in request.headers.items()},
}
for request in cassette_dict["requests"]
]
yml = yaml.safe_dump(cassette_dict)
return gzip.compress(yml.encode("utf-8"))
@staticmethod
def deserialize(data: bytes) -> dict:
"""Decompress data and convert it from YAML."""
decoded_yaml = gzip.decompress(data).decode("utf-8")
cassette = cast("dict[str, Any]", yaml.safe_load(decoded_yaml))
cassette["requests"] = [Request(**request) for request in cassette["requests"]]
return cassette
| CustomSerializer |
python | langchain-ai__langchain | libs/core/langchain_core/outputs/chat_result.py | {
"start": 127,
"end": 1324
} | class ____(BaseModel):
"""Use to represent the result of a chat model call with a single prompt.
This container is used internally by some implementations of chat model,
it will eventually be mapped to a more general `LLMResult` object, and
then projected into an `AIMessage` object.
LangChain users working with chat models will usually access information via
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks). Please refer the `AIMessage` and `LLMResult` schema documentation
for more information.
"""
generations: list[ChatGeneration]
"""List of the chat generations.
Generations is a list to allow for multiple candidate generations for a single
input prompt.
"""
llm_output: dict | None = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.
"""
| ChatResult |
python | conda__conda | conda/gateways/repodata/__init__.py | {
"start": 11234,
"end": 15517
} | class ____(UserDict):
"""Load/save info file that accompanies cached `repodata.json`."""
# Accept old keys for new serialization
_aliased = {
"_mod": LAST_MODIFIED_KEY,
"_etag": ETAG_KEY,
"_cache_control": CACHE_CONTROL_KEY,
"_url": URL_KEY,
}
# Enforce string type on these keys
_strings = {"mod", "etag", "cache_control", "url"}
def __init__(
self,
cache_path_json: Path | str = "",
cache_path_state: Path | str = "",
repodata_fn="",
dict=None,
):
# dict is a positional-only argument in UserDict.
super().__init__(dict)
self.cache_path_json = pathlib.Path(cache_path_json)
self.cache_path_state = pathlib.Path(cache_path_state)
# XXX may not be that useful/used compared to the full URL
self.repodata_fn = repodata_fn
@property
def mod(self) -> str:
"""
Last-Modified header or ""
"""
return self.get(LAST_MODIFIED_KEY) or ""
@mod.setter
def mod(self, value):
self[LAST_MODIFIED_KEY] = value or ""
@property
def etag(self) -> str:
"""
Etag header or ""
"""
return self.get(ETAG_KEY) or ""
@etag.setter
def etag(self, value):
self[ETAG_KEY] = value or ""
@property
def cache_control(self) -> str:
"""
Cache-Control header or ""
"""
return self.get(CACHE_CONTROL_KEY) or ""
@cache_control.setter
def cache_control(self, value):
self[CACHE_CONTROL_KEY] = value or ""
def has_format(self, format: str) -> tuple[bool, datetime.datetime | None]:
# "has_zst": {
# // UTC RFC3999 timestamp of when we last checked whether the file is available or not
# // in this case the `repodata.json.zst` file
# // Note: same format as conda TUF spec
# "last_checked": "2023-01-08T11:45:44Z",
# // false = unavailable, true = available
# "value": BOOLEAN
# },
key = f"has_{format}"
if key not in self:
return (True, None) # we want to check by default
try:
obj = self[key]
last_checked_str = obj["last_checked"]
if last_checked_str.endswith("Z"):
last_checked_str = f"{last_checked_str[:-1]}+00:00"
last_checked = datetime.datetime.fromisoformat(last_checked_str)
value = bool(obj["value"])
return (value, last_checked)
except (KeyError, ValueError, TypeError) as e:
log.warning(
f"error parsing `has_` object from `<cache key>{CACHE_STATE_SUFFIX}`",
exc_info=e,
)
self.pop(key)
return False, datetime.datetime.now(tz=datetime.timezone.utc)
def set_has_format(self, format: str, value: bool):
key = f"has_{format}"
self[key] = {
"last_checked": datetime.datetime.now(tz=datetime.timezone.utc).isoformat()[
: -len("+00:00")
]
+ "Z",
"value": value,
}
def clear_has_format(self, format: str):
"""Remove 'has_{format}' instead of setting to False."""
key = f"has_{format}"
self.pop(key, None)
def should_check_format(self, format: str) -> bool:
"""Return True if named format should be attempted."""
has, when = self.has_format(format)
return (
has is True
or isinstance(when, datetime.datetime)
and datetime.datetime.now(tz=datetime.timezone.utc) - when
> CHECK_ALTERNATE_FORMAT_INTERVAL
)
def __contains__(self, key: str) -> bool:
key = self._aliased.get(key, key)
return super().__contains__(key)
def __setitem__(self, key: str, item: Any) -> None:
key = self._aliased.get(key, key)
if key in self._strings and not isinstance(item, str):
log.debug('Replaced non-str RepodataState[%s] with ""', key)
item = ""
return super().__setitem__(key, item)
def __getitem__(self, key: str) -> Any:
key = self._aliased.get(key, key)
return super().__getitem__(key)
| RepodataState |
python | pytorch__pytorch | test/fx/test_fx_param_shape_control_flow.py | {
"start": 1567,
"end": 1809
} | class ____(MyModuleBase):
def __init__(self, in_channels):
super().__init__()
self.param = torch.nn.Parameter(torch.randn(in_channels, 3))
def no_relu(self):
return self.param.numel() < 10 * 3
| MyModuleParamNumEl |
python | doocs__leetcode | solution/0000-0099/0036.Valid Sudoku/Solution.py | {
"start": 0,
"end": 656
} | class ____:
def isValidSudoku(self, board: List[List[str]]) -> bool:
row = [[False] * 9 for _ in range(9)]
col = [[False] * 9 for _ in range(9)]
sub = [[False] * 9 for _ in range(9)]
for i in range(9):
for j in range(9):
c = board[i][j]
if c == '.':
continue
num = int(c) - 1
k = i // 3 * 3 + j // 3
if row[i][num] or col[j][num] or sub[k][num]:
return False
row[i][num] = True
col[j][num] = True
sub[k][num] = True
return True
| Solution |
python | ray-project__ray | release/llm_tests/benchmark/load_test.py | {
"start": 10910,
"end": 13600
} | class ____(BaseProvider):
DEFAULT_MODEL_NAME = "ensemble"
def get_url(self):
assert not self.parsed_options.chat, "Chat is not supported"
assert not self.parsed_options.stream, "Stream is not supported"
assert self.parsed_options.n == 1, "n > 1 is not supported"
return f"/v2/models/{self.model}/infer"
def format_payload(self, prompt, max_tokens, images):
assert images is None, "images are not supported"
# matching latest TRT-LLM example, your model configuration might be different
data = {
"inputs": [
{
"name": "text_input",
"datatype": "BYTES",
"shape": [1, 1],
"data": [[prompt]],
},
{
"name": "max_tokens",
"datatype": "UINT32",
"shape": [1, 1],
"data": [[max_tokens]],
},
{
"name": "bad_words",
"datatype": "BYTES",
"shape": [1, 1],
"data": [[""]],
},
{
"name": "stop_words",
"datatype": "BYTES",
"shape": [1, 1],
"data": [[""]],
},
{
"name": "temperature",
"datatype": "FP32",
"shape": [1, 1],
"data": [[self.parsed_options.temperature]],
},
]
}
assert self.parsed_options.logprobs is None, "logprobs are not supported"
return data
def parse_output_json(self, data, prompt):
for output in data["outputs"]:
if output["name"] == "text_output":
assert output["datatype"] == "BYTES"
assert output["shape"] == [1]
text = output["data"][0]
# Triton returns the original prompt in the output, cut it off
text = text.removeprefix("<s> ")
if text.startswith(prompt):
# HF tokenizers get confused by the leading space
text = text[len(prompt) :].removeprefix(" ")
else:
print("WARNING: prompt not found in the output")
return ChunkMetadata(
text=text,
logprob_tokens=None,
usage_tokens=None,
prompt_usage_tokens=None,
)
raise ValueError("text_output not found in the response")
| TritonInferProvider |
python | PyCQA__pylint | pylint/checkers/base/basic_error_checker.py | {
"start": 4963,
"end": 24225
} | class ____(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
'in a generator function or method (e.g. with some "yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %r in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement, remove the else and"
" de-indent all the code inside it",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
"W0136": (
"'continue' discouraged inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause. This will raise a SyntaxWarning "
"starting in Python 3.14.",
),
"W0137": (
"'break' discouraged inside 'finally' clause",
"break-in-finally",
"Emitted when the `break` keyword is found "
"inside a finally clause. This will raise a SyntaxWarning "
"starting in Python 3.14.",
),
}
@utils.only_required_for_messages("function-redefined")
def visit_classdef(self, node: nodes.ClassDef) -> None:
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple: nodes.Tuple) -> bool:
starred_count = 0
for elem in assign_tuple.itered():
match elem:
case nodes.Tuple():
return self._too_many_starred_for_tuple(elem)
case nodes.Starred():
starred_count += 1
return starred_count > 1
@utils.only_required_for_messages(
"too-many-star-expressions", "invalid-star-assignment-target"
)
def visit_assign(self, node: nodes.Assign) -> None:
match assign_target := node.targets[0]:
case nodes.Starred():
# Check *a = b
self.add_message("invalid-star-assignment-target", node=node)
case nodes.Tuple():
# Check *a, *b = ...
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.only_required_for_messages("star-needs-assignment-target")
def visit_starred(self, node: nodes.Starred) -> None:
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, nodes.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(node.parent, (nodes.List, nodes.Tuple, nodes.Set, nodes.Dict)):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, nodes.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.only_required_for_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(
(node.is_method() and "method") or "function", node
)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
nodes.Return, skip_klass=(nodes.FunctionDef, nodes.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = {}
for arg in node.args.arguments:
if arg.name in arg_clusters:
self.add_message(
"duplicate-argument-name",
node=arg,
args=(arg.name,),
confidence=HIGH,
)
else:
arg_clusters[arg.name] = arg
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node: nodes.FunctionDef) -> None:
scope_globals = {
name: child
for child in node.nodes_of_class(nodes.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(nodes.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node: nodes.FunctionDef) -> None:
"""Check that a name is both nonlocal and global."""
def same_scope(current: nodes.Global | nodes.Nonlocal) -> bool:
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(nodes.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(nodes.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.only_required_for_messages("return-outside-function")
def visit_return(self, node: nodes.Return) -> None:
if not isinstance(node.frame(), nodes.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.only_required_for_messages("yield-outside-function")
def visit_yield(self, node: nodes.Yield) -> None:
self._check_yield_outside_func(node)
@utils.only_required_for_messages("yield-outside-function")
def visit_yieldfrom(self, node: nodes.YieldFrom) -> None:
self._check_yield_outside_func(node)
@utils.only_required_for_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node: nodes.Continue) -> None:
self._check_in_loop(node, "continue")
@utils.only_required_for_messages("not-in-loop", "break-in-finally")
def visit_break(self, node: nodes.Break) -> None:
self._check_in_loop(node, "break")
@utils.only_required_for_messages("useless-else-on-loop")
def visit_for(self, node: nodes.For) -> None:
self._check_else_on_loop(node)
@utils.only_required_for_messages("useless-else-on-loop")
def visit_while(self, node: nodes.While) -> None:
self._check_else_on_loop(node)
@utils.only_required_for_messages("nonexistent-operator")
def visit_unaryop(self, node: nodes.UnaryOp) -> None:
"""Check use of the non-existent ++ and -- operators."""
if (
(node.op in "+-")
and isinstance(node.operand, nodes.UnaryOp)
and (node.operand.op == node.op)
and (node.col_offset + 1 == node.operand.col_offset)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node: nodes.Nonlocal, name: str) -> None:
current_scope = node.scope()
while current_scope.parent is not None:
if not isinstance(current_scope, (nodes.ClassDef, nodes.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
# Search for `name` in the parent scope if:
# `current_scope` is the same scope in which the `nonlocal` name is declared
# or `name` is not in `current_scope.locals`.
if current_scope is node.scope() or name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, nodes.FunctionDef):
self.add_message(
"nonlocal-without-binding", args=(name,), node=node, confidence=HIGH
)
@utils.only_required_for_messages("nonlocal-without-binding")
def visit_nonlocal(self, node: nodes.Nonlocal) -> None:
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.only_required_for_messages("abstract-class-instantiated")
def visit_call(self, node: nodes.Call) -> None:
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
for inferred in infer_all(node.func):
self._check_inferred_class_is_abstract(inferred, node)
def _check_inferred_class_is_abstract(
self, inferred: InferenceResult, node: nodes.Call
) -> None:
if not isinstance(inferred, nodes.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node: nodes.Yield) -> None:
if not isinstance(node.frame(), (nodes.FunctionDef, nodes.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node: nodes.For | nodes.While) -> None:
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(
self, node: nodes.Continue | nodes.Break, node_name: str
) -> None:
"""Check that a node is inside a for or while loop."""
for parent in node.node_ancestors():
if isinstance(parent, (nodes.For, nodes.While)):
if node not in parent.orelse:
return
if isinstance(parent, (nodes.ClassDef, nodes.FunctionDef)):
break
if (
isinstance(parent, nodes.Try)
and node in parent.finalbody
and isinstance(node, nodes.Continue)
):
self.add_message("continue-in-finally", node=node)
if (
isinstance(parent, nodes.Try)
and node in parent.finalbody
and isinstance(node, nodes.Break)
):
self.add_message("break-in-finally", node=node)
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(
self, redeftype: str, node: nodes.Call | nodes.FunctionDef
) -> None:
"""Check for redefinition of a function / method / class name."""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = [
i
for i in parent_frame.locals[node.name]
if not (isinstance(i.parent, nodes.AnnAssign) and i.parent.simple)
]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, nodes.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
if _is_singledispatchmethod_registration(node):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, nodes.If):
match node.parent.test:
case nodes.UnaryOp(op="not", operand=nodes.Name(name=name)) if (
name == node.name
):
# Exempt "if not <func>" cases
return
case nodes.Compare(
left=nodes.Name(name=name),
ops=[["is", nodes.Const(value=None)]],
) if (
name == node.name
):
# Exempt "if <func> is not None" cases
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() in FORWARD_REF_QNAME
):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
| BasicErrorChecker |
python | bokeh__bokeh | src/bokeh/io/notebook.py | {
"start": 4853,
"end": 4987
} | class ____(Protocol):
def __call__(self, resources: Resources, verbose: bool, hide_banner: bool, load_timeout: int) -> None: ...
| Load |
python | scipy__scipy | scipy/_lib/tests/test_array_api.py | {
"start": 899,
"end": 16566
} | class ____:
def test_array_namespace(self):
x, y = np.array([0, 1, 2]), np.array([0, 1, 2])
xp = array_namespace(x, y)
assert 'array_api_compat.numpy' in xp.__name__
def test_asarray(self, xp):
x, y = _asarray([0, 1, 2], xp=xp), _asarray(np.arange(3), xp=xp)
ref = xp.asarray([0, 1, 2])
xp_assert_equal(x, ref)
xp_assert_equal(y, ref)
@pytest.mark.filterwarnings("ignore: the matrix subclass")
def test_raises(self):
msg = "of type `numpy.ma.MaskedArray` are not supported"
with pytest.raises(TypeError, match=msg):
array_namespace(np.ma.array(1), np.array(1))
msg = "of type `numpy.matrix` are not supported"
with pytest.raises(TypeError, match=msg):
array_namespace(np.array(1), np.matrix(1))
msg = "only boolean and numerical dtypes are supported"
with pytest.raises(TypeError, match=msg):
array_namespace([object()])
with pytest.raises(TypeError, match=msg):
array_namespace('abc')
@pytest.mark.skip_xp_backends(np_only=True, reason="Array-likes")
def test_array_likes(self, xp):
"""Test that if all parameters of array_namespace are Array-likes,
the output is array_api_compat.numpy
"""
assert array_namespace([0, 1, 2]) is xp
assert array_namespace((0, 1, 2)) is xp
assert array_namespace(1, 2, 3) is xp
assert array_namespace(1) is xp
assert array_namespace(np.int64(1)) is xp
assert array_namespace([0, 1, 2], 3) is xp
assert array_namespace() is xp
assert array_namespace(None) is xp
assert array_namespace(1, None) is xp
assert array_namespace(None, 1) is xp
# This only works when xp is numpy!
assert array_namespace(np.asarray([1, 2]), [3, 4]) is xp
assert array_namespace(np.int64(1), [3, 4]) is xp
def test_array_and_array_likes_mix(self, xp):
"""Test that if there is at least one Array API object among
the parameters of array_namespace, and all other parameters
are scalars, the output is its namespace.
If there are non-scalar Array-Likes, raise as in array-api-compat.
"""
x = xp.asarray(1)
assert array_namespace(x) is xp
assert array_namespace(x, 1) is xp
assert array_namespace(1, x) is xp
assert array_namespace(None, x) is xp
if is_numpy(xp):
assert array_namespace(x, [1, 2]) is xp
else:
with pytest.raises(TypeError, match="Multiple namespaces"):
array_namespace(x, [1, 2])
with pytest.raises(TypeError, match="Multiple namespaces"):
array_namespace(x, np.int64(1))
with pytest.raises(TypeError, match="Multiple namespaces"):
# Subclass of float; matches array_api_compat behavior
array_namespace(x, np.float64(1))
with pytest.raises(TypeError, match="Multiple namespaces"):
# Subclass of complex; matches array_api_compat behavior
array_namespace(x, np.complex128(1))
def test_array_api_extra_hook(self):
"""Test that the `array_namespace` function used by
array-api-extra has been overridden by scipy
"""
msg = "only boolean and numerical dtypes are supported"
with pytest.raises(TypeError, match=msg):
xpx.atleast_nd("abc", ndim=0)
def test_jax_zero_gradient_array(self):
"""Test array_namespace special case for JAX zero-gradient arrays, which are
numpy arrays but must be treated as JAX arrays.
See matching code and tests in array_api_compat.
"""
jax = pytest.importorskip("jax")
xp = pytest.importorskip("jax.numpy")
# Create numpy array with dtype=jax.float0
jax_zero = jax.vmap(jax.grad(xp.float32, allow_int=True))(xp.arange(4))
assert array_namespace(jax_zero) is xp
def test_void_but_not_jax_zero_gradient_array(self):
"""A void dtype that is not a jax.float0 must not be caught in the
special case for JAX zero-gradient arrays.
"""
void = np.empty(0, dtype=np.dtype([]))
with pytest.raises(TypeError, match="only boolean and numerical dtypes"):
array_namespace(void)
with pytest.raises(TypeError, match="only boolean and numerical dtypes"):
array_namespace([void, void])
def test_copy(self, xp):
for _xp in [xp, None]:
x = xp.asarray([1, 2, 3])
y = xp_copy(x, xp=_xp)
# with numpy we'd want to use np.shared_memory, but that's not specified
# in the array-api
assert id(x) != id(y)
try:
y[0] = 10
except (TypeError, ValueError):
pass
else:
assert x[0] != y[0]
@pytest.mark.parametrize(
"dtype",
["float32", "float64", "complex64", "complex128", "int32", "int64"],
)
@pytest.mark.parametrize(
"data", [[], 1, [1, 2, 3], [[1, 2], [2, 3]]],
)
def test_copy_to_numpy(self, xp, data, dtype):
xp_dtype = getattr(xp, dtype)
np_dtype = getattr(np, dtype)
x = xp.asarray(data, dtype=xp_dtype)
y = _xp_copy_to_numpy(x)
assert isinstance(y, np.ndarray)
assert y.dtype == np_dtype
assert x.shape == y.shape
np.testing.assert_equal(y, np.asarray(data, dtype=np_dtype))
if is_numpy(xp):
# Ensure y is a copy when xp is numpy.
assert id(x) != id(y)
@pytest.mark.parametrize('dtype', ['int32', 'int64', 'float32', 'float64'])
@pytest.mark.parametrize('shape', [(), (3,)])
def test_strict_checks(self, xp, dtype, shape):
# Check that `_strict_check` behaves as expected
dtype = getattr(xp, dtype)
x = xp.broadcast_to(xp.asarray(1, dtype=dtype), shape)
x = x if shape else x[()]
y = np_compat.asarray(1)[()]
kwarg_names = ["check_namespace", "check_dtype", "check_shape", "check_0d"]
options = dict(zip(kwarg_names, [True, False, False, False]))
if is_numpy(xp):
xp_assert_equal(x, y, **options)
else:
with pytest.raises(
AssertionError,
match="Namespace of desired array does not match",
):
xp_assert_equal(x, y, **options)
with pytest.raises(
AssertionError,
match="Namespace of actual and desired arrays do not match",
):
xp_assert_equal(y, x, **options)
options = dict(zip(kwarg_names, [False, True, False, False]))
if y.dtype.name in str(x.dtype):
xp_assert_equal(x, y, **options)
else:
with pytest.raises(AssertionError, match="dtypes do not match."):
xp_assert_equal(x, y, **options)
options = dict(zip(kwarg_names, [False, False, True, False]))
if x.shape == y.shape:
xp_assert_equal(x, y, **options)
else:
with pytest.raises(AssertionError, match="Shapes do not match."):
xp_assert_equal(x, xp.asarray(y), **options)
options = dict(zip(kwarg_names, [False, False, False, True]))
if is_numpy(xp) and x.shape == y.shape:
xp_assert_equal(x, y, **options)
elif is_numpy(xp):
with pytest.raises(AssertionError, match="Array-ness does not match."):
xp_assert_equal(x, y, **options)
@pytest.mark.skip_xp_backends(np_only=True, reason="Scalars only exist in NumPy")
def test_check_scalar(self, xp):
# identity always passes
xp_assert_equal(xp.float64(0), xp.float64(0))
xp_assert_equal(xp.asarray(0.), xp.asarray(0.))
xp_assert_equal(xp.float64(0), xp.float64(0), check_0d=False)
xp_assert_equal(xp.asarray(0.), xp.asarray(0.), check_0d=False)
# Check default convention: 0d-arrays are distinguished from scalars
message = "Array-ness does not match:.*"
with pytest.raises(AssertionError, match=message):
xp_assert_equal(xp.asarray(0.), xp.float64(0))
with pytest.raises(AssertionError, match=message):
xp_assert_equal(xp.float64(0), xp.asarray(0.))
with pytest.raises(AssertionError, match=message):
xp_assert_equal(xp.asarray(42), xp.int64(42))
with pytest.raises(AssertionError, match=message):
xp_assert_equal(xp.int64(42), xp.asarray(42))
# with `check_0d=False`, scalars-vs-0d passes (if values match)
xp_assert_equal(xp.asarray(0.), xp.float64(0), check_0d=False)
xp_assert_equal(xp.float64(0), xp.asarray(0.), check_0d=False)
# also with regular python objects
xp_assert_equal(xp.asarray(0.), 0., check_0d=False)
xp_assert_equal(0., xp.asarray(0.), check_0d=False)
xp_assert_equal(xp.asarray(42), 42, check_0d=False)
xp_assert_equal(42, xp.asarray(42), check_0d=False)
# as an alternative to `check_0d=False`, explicitly expect scalar
xp_assert_equal(xp.float64(0), xp.asarray(0.)[()])
@pytest.mark.skip_xp_backends(np_only=True, reason="Scalars only exist in NumPy")
def test_check_scalar_no_0d(self, xp):
# identity passes, if first argument is not 0d (or check_0d=True)
xp_assert_equal_no_0d(xp.float64(0), xp.float64(0))
xp_assert_equal_no_0d(xp.float64(0), xp.float64(0), check_0d=True)
xp_assert_equal_no_0d(xp.asarray(0.), xp.asarray(0.), check_0d=True)
# by default, 0d values are forbidden as the first argument
message = "Result is a NumPy 0d-array.*"
with pytest.raises(AssertionError, match=message):
xp_assert_equal_no_0d(xp.asarray(0.), xp.asarray(0.))
with pytest.raises(AssertionError, match=message):
xp_assert_equal_no_0d(xp.asarray(0.), xp.float64(0))
with pytest.raises(AssertionError, match=message):
xp_assert_equal_no_0d(xp.asarray(42), xp.int64(42))
# Check default convention: 0d-arrays are NOT distinguished from scalars
xp_assert_equal_no_0d(xp.float64(0), xp.asarray(0.))
xp_assert_equal_no_0d(xp.int64(42), xp.asarray(42))
# opt in to 0d-check remains possible
message = "Array-ness does not match:.*"
with pytest.raises(AssertionError, match=message):
xp_assert_equal_no_0d(xp.asarray(0.), xp.float64(0), check_0d=True)
with pytest.raises(AssertionError, match=message):
xp_assert_equal_no_0d(xp.float64(0), xp.asarray(0.), check_0d=True)
with pytest.raises(AssertionError, match=message):
xp_assert_equal_no_0d(xp.asarray(42), xp.int64(0), check_0d=True)
with pytest.raises(AssertionError, match=message):
xp_assert_equal_no_0d(xp.int64(0), xp.asarray(42), check_0d=True)
# scalars-vs-0d passes (if values match) also with regular python objects
xp_assert_equal_no_0d(0., xp.asarray(0.))
xp_assert_equal_no_0d(42, xp.asarray(42))
def test_default_dtype(self, xp):
assert xp_default_dtype(xp) == xp.asarray(1.).dtype
scalars = [1, 1., 1. + 1j]
lists = [[1], [1.], [1. + 1j]]
types = ('int8 int16 int32 int64 '
'uint8 uint16 uint32 uint64 '
'float32 float64 complex64 complex128').split()
arrays = [np.asarray([1], dtype=getattr(np, t)) for t in types]
def convert_type(x, xp):
# Convert NumPy array to xp-array
# Convert string to indicated dtype from xp
# Return Python scalars unchanged
if isinstance(x, np.ndarray):
return xp.asarray(x)
elif isinstance(x, str):
return getattr(xp, x)
return x
def is_inexact(x, xp):
# Determine whether `x` is of inexact (real of complex floating) dtype
x = xp.asarray(x) if np.isscalar(x) or isinstance(x, list) else x
dtype = getattr(x, 'dtype', x)
return xp.isdtype(dtype, ('real floating', 'complex floating'))
@pytest.mark.parametrize('x', scalars + lists + types + arrays)
@pytest.mark.parametrize('y', scalars + lists + types + arrays)
def test_xp_result_type_no_force(x, y, xp):
# When force_floating==False (default), behavior of `xp_result_type`
# should match that of `xp.result_type` on the same arguments after
# converting lists to arrays of type `xp`.
x = convert_type(x, xp)
y = convert_type(y, xp)
x_ref = xp.asarray(x) if isinstance(x, list) else x
y_ref = xp.asarray(y) if isinstance(y, list) else y
try:
dtype_ref = xp.result_type(x_ref, y_ref)
expected_error = None
except Exception as e:
expected_error = (type(e), str(e))
if expected_error is not None:
with pytest.raises(expected_error[0], match=re.escape(expected_error[1])):
xp_result_type(x, y, xp=xp)
return
dtype_res = xp_result_type(x, y, xp=xp)
assert dtype_res == dtype_ref
@pytest.mark.parametrize('x', scalars + lists + types + arrays)
@pytest.mark.parametrize('y', scalars + lists + types + arrays)
def test_xp_result_type_force_floating(x, y, xp):
# When `force_floating==True`, behavior of `xp_result_type`
# should match that of `xp.result_type` with `1.0` appended to the set of
# arguments (after converting lists to arrays of type `xp`).
# If this raises a `TypeError`, which is the case when the result
# type is not defined by the standard, the result type should be
# the result type of any inexact (real or complex floating) arguments
# and the default floating point type.
if (is_torch(xp) and not(isinstance(x, str) or isinstance(y, str))
and np.isscalar(x) and np.isscalar(y)):
pytest.skip("See 3/27/2024 comment at data-apis/array-api-compat#277")
x = convert_type(x, xp)
y = convert_type(y, xp)
x_ref = xp.asarray(x) if isinstance(x, list) else x
y_ref = xp.asarray(y) if isinstance(y, list) else y
expected_error = None
try:
dtype_ref = xp.result_type(x_ref, y_ref, 1.0)
except TypeError:
args = []
if is_inexact(x_ref, xp):
args.append(x_ref)
if is_inexact(y_ref, xp):
args.append(y_ref)
dtype_ref = xp.result_type(*args, xp.asarray(1.0))
except Exception as e:
expected_error = (type(e), str(e))
if expected_error is not None:
with pytest.raises(expected_error[0], match=expected_error[1]):
xp_result_type(x, y, xp=xp)
return
dtype_res = xp_result_type(x, y, force_floating=True, xp=xp)
assert dtype_res == dtype_ref
# Test that the xp_capabilities decorator has been applied to all
# functions and function-likes in the public API. Modules will be
# added to the list of tested_modules below as decorator coverage
# is added on a module by module basis. It remains for future work
# to offer similar functionality to xp_capabilities for classes in
# the public API.
tested_modules = ["scipy.stats"]
def collect_public_functions():
functions = []
for module_name in tested_modules:
module = import_module(module_name)
for name in module.__all__:
obj = getattr(module, name)
if not is_named_function_like_object(obj):
continue
functions.append(pytest.param(obj, id=f"{module_name}.{name}"))
return functions
@pytest.mark.parametrize("func", collect_public_functions())
def test_xp_capabilities_coverage(func):
assert func in xp_capabilities_table
| TestArrayAPI |
python | getsentry__sentry | src/sentry/sentry_metrics/client/kafka.py | {
"start": 1776,
"end": 3626
} | class ____(GenericMetricsBackend):
def __init__(self) -> None:
logical_topic = Topic.INGEST_PERFORMANCE_METRICS
topic_defn = get_topic_definition(logical_topic)
self.kafka_topic = ArroyoTopic(topic_defn["real_topic_name"])
self.producer: Producer = get_arroyo_producer(
name="sentry.sentry_metrics.client.kafka",
topic=logical_topic,
)
def counter(
self,
use_case_id: UseCaseID,
org_id: int,
project_id: int,
metric_name: str,
value: int | float,
tags: dict[str, str],
unit: str | None,
) -> None:
"""
Emit a counter metric for internal use cases only.
Note that, as of now, this function will return
immediately even if the metric message has not been
produced to the broker yet.
"""
counter_metric: IngestMetric = {
"org_id": org_id,
"project_id": project_id,
"name": build_mri(metric_name, "c", use_case_id, unit),
"value": value,
"timestamp": int(datetime.now().timestamp()),
"tags": tags,
"retention_days": get_retention_from_org_id(org_id),
"type": "c",
}
self.__produce(counter_metric, use_case_id)
def __produce(self, metric: IngestMetric, use_case_id: UseCaseID):
INGEST_CODEC.validate(metric)
payload = KafkaPayload(
None,
json.dumps(metric).encode("utf-8"),
[
("namespace", use_case_id.value.encode()),
],
)
self.producer.produce(self.kafka_topic, payload)
def close(self) -> None:
"""
Calling this is not required and is mostly for usage in tests
"""
self.producer.close()
| KafkaMetricsBackend |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 204022,
"end": 204072
} | class ____(TypedDict):
name: str
| TypedDictParent |
python | pytorch__pytorch | test/jit/test_dataclasses.py | {
"start": 390,
"end": 584
} | class ____:
x: float
y: float
norm: Optional[torch.Tensor] = None
def __post_init__(self):
self.norm = (torch.tensor(self.x) ** 2 + torch.tensor(self.y) ** 2) ** 0.5
| Point |
python | pyinstaller__pyinstaller | bootloader/waflib/Build.py | {
"start": 27400,
"end": 30013
} | class ____(BuildContext):
'''executes tasks in a step-by-step fashion, for debugging'''
cmd = 'step'
def __init__(self, **kw):
super(StepContext, self).__init__(**kw)
self.files = Options.options.files
def compile(self):
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets = []
if self.targets and self.targets != '*':
targets = self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f = tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher = self.get_matcher(pat)
for tg in g:
if isinstance(tg, Task.Task):
lst = [tg]
else:
lst = tg.tasks
for tsk in lst:
do_exec = False
for node in tsk.inputs:
if matcher(node, output=False):
do_exec = True
break
for node in tsk.outputs:
if matcher(node, output=True):
do_exec = True
break
if do_exec:
ret = tsk.run()
Logs.info('%s -> exit %r', tsk, ret)
def get_matcher(self, pat):
inn = True
out = True
if pat.startswith('in:'):
out = False
pat = pat.replace('in:', '')
elif pat.startswith('out:'):
inn = False
pat = pat.replace('out:', '')
anode = self.root.find_node(pat)
pattern = None
if not anode:
if not pat.startswith('^'):
pat = '^.+?%s' % pat
if not pat.endswith('$'):
pat = '%s$' % pat
pattern = re.compile(pat)
def match(node, output):
if output and not out:
return False
if not output and not inn:
return False
if anode:
return anode == node
else:
return pattern.match(node.abspath())
return match
| StepContext |
python | PyCQA__pylint | tests/functional/a/arguments_renamed.py | {
"start": 715,
"end": 1023
} | class ____(Fruit):
def brew(self, fruit_name: bool): # No warning here
print(f"Brewing a banana named {fruit_name}")
def eat_with_condiment(self, fruit_name: str, condiment: Condiment, error: str): # [arguments-differ]
print(f"Eating a fruit named {fruit_name} with {condiment}")
| Banana |
python | mlflow__mlflow | mlflow/sklearn/utils.py | {
"start": 1101,
"end": 1414
} | class ____(NamedTuple):
name: str
function: Callable[..., Any]
arguments: dict[str, Any]
title: str
# _SklearnMetric represents a metric (e.g, precision_score) that will be computed and
# logged during the autologging routine for a particular model type (eg, classifier, regressor).
| _SklearnArtifact |
python | getsentry__sentry | tests/sentry/api/helpers/test_error_upsampling.py | {
"start": 573,
"end": 3999
} | class ____(TestCase):
def setUp(self) -> None:
self.organization = Organization.objects.create(name="test-org")
self.projects = [
self.create_project(organization=self.organization, name="Project 1"),
self.create_project(organization=self.organization, name="Project 2"),
self.create_project(organization=self.organization, name="Project 3"),
]
self.project_ids = [p.id for p in self.projects]
self.snuba_params = SnubaParams(
start=None,
end=None,
projects=self.projects,
)
factory = RequestFactory()
self.request = Request(factory.get("/"))
self.request.GET = QueryDict("")
@patch("sentry.api.helpers.error_upsampling.options")
def test_are_any_projects_error_upsampled(self, mock_options: Mock) -> None:
# Test when all projects are allowlisted
mock_options.get.return_value = self.project_ids
assert are_any_projects_error_upsampled(self.project_ids) is True
# Test when some projects are allowlisted
mock_options.get.return_value = self.project_ids[:-1]
assert are_any_projects_error_upsampled(self.project_ids) is True
# Test when no projects are allowlisted
mock_options.get.return_value = []
assert are_any_projects_error_upsampled(self.project_ids) is False
# Test when no project IDs provided
assert are_any_projects_error_upsampled([]) is False
def test_transform_query_columns_for_error_upsampling(self) -> None:
# Test count() transformation
columns = ["count()", "other_column"]
expected = [
"upsampled_count() as count",
"other_column",
]
assert transform_query_columns_for_error_upsampling(columns) == expected
# Test case insensitivity
columns = ["COUNT()"]
expected = [
"upsampled_count() as count",
]
assert transform_query_columns_for_error_upsampling(columns) == expected
# Test whitespace handling
columns = [" count() "]
expected = [
"upsampled_count() as count",
]
assert transform_query_columns_for_error_upsampling(columns) == expected
def test_is_error_focused_query(self) -> None:
# Test explicit error type
self.request.GET = QueryDict("query=event.type:error")
assert _is_error_focused_query(self.request) is True
# Test explicit transaction type
self.request.GET = QueryDict("query=event.type:transaction")
assert _is_error_focused_query(self.request) is False
# Test empty query
self.request.GET = QueryDict("")
assert _is_error_focused_query(self.request) is False
def test_should_apply_sample_weight_transform(self) -> None:
# Test errors dataset
assert _should_apply_sample_weight_transform(errors, self.request) is True
# Test transactions dataset
assert _should_apply_sample_weight_transform(transactions, self.request) is False
self.request.GET = QueryDict("query=event.type:error")
assert _should_apply_sample_weight_transform(discover, self.request) is True
self.request.GET = QueryDict("query=event.type:transaction")
assert _should_apply_sample_weight_transform(discover, self.request) is False
| ErrorUpsamplingTest |
python | google__pytype | pytype/constant_folding_test.py | {
"start": 8081,
"end": 13086
} | class ____(TypeBuilderTestBase):
"""Test preservation of concrete values."""
def _process(self, src):
src = fmt(src)
_, defs = self.ctx.vm.run_program(src, "", maximum_depth=4)
return defs
def test_simple_list(self):
defs = self._process("""
a = [1, '2', 3]
b = a[1]
""")
a = defs["a"].data[0]
b = defs["b"].data[0]
self.assertPytd(a, "list[Union[int, str]]")
self.assertPytd(b, "str")
self.assertEqual(a.pyval[0].data[0].pyval, 1)
def test_nested_list(self):
defs = self._process("""
a = [[1, '2', 3], [4, 5]]
b, c = a
""")
a = defs["a"].data[0]
b = defs["b"].data[0]
c = defs["c"].data[0]
t1 = "list[Union[int, str]]"
t2 = "list[int]"
self.assertPytd(a, f"list[Union[{t2}, {t1}]]")
self.assertPytd(b, t1)
self.assertPytd(c, t2)
def test_long_list(self):
elts = [" [1, 2],", " ['a'],"] * 42
src = ["a = ["] + elts + ["]"]
src += ["b = a[0]", "c = a[1]", "d = [a[72]]"]
defs = self._process("\n".join(src))
a = defs["a"].data[0]
b = defs["b"].data[0]
c = defs["c"].data[0]
d = defs["d"].data[0]
t1 = "list[int]"
t2 = "list[str]"
self.assertPytd(a, "list[Union[list[int], list[str]]]")
self.assertPytd(b, t1)
self.assertPytd(c, t2)
self.assertPytd(d, "list[Union[list[int], list[str]]]")
def test_long_list_of_tuples(self):
elts = [" (1, 2),", " ('a', False),"] * 82
src = ["a = ["] + elts + ["]"]
src += ["b = a[0]", "c = a[1]", "d = [a[72]]"]
defs = self._process("\n".join(src))
a = defs["a"].data[0]
b = defs["b"].data[0]
c = defs["c"].data[0]
d = defs["d"].data[0]
t1 = "tuple[int, int]"
t2 = "tuple[str, bool]"
self.assertPytd(a, f"list[Union[{t1}, {t2}]]")
self.assertPytd(b, t1)
self.assertPytd(c, t2)
self.assertPytd(d, f"list[Union[{t1}, {t2}]]")
def test_simple_map(self):
defs = self._process("""
a = {'b': 1, 'c': '2'}
b = a['b']
c = a['c']
""")
a = defs["a"].data[0]
b = defs["b"].data[0]
c = defs["c"].data[0]
self.assertPytd(a, "dict[str, Union[int, str]]")
self.assertPytd(b, "int")
self.assertPytd(c, "str")
self.assertEqual(a.pyval["b"].data[0].pyval, 1)
def test_boolean(self):
defs = self._process("""
a = {'b': False, 'c': True, 'd': None}
""")
a = defs["a"].data[0]
# pylint: disable=g-generic-assert
self.assertEqual(a.pyval["b"].data[0].pyval, False)
self.assertEqual(a.pyval["c"].data[0].pyval, True)
self.assertEqual(a.pyval["d"].data[0].pyval, None)
# pylint: enable=g-generic-assert
def test_nested_map(self):
defs = self._process("""
a = {'b': [1, '2', 3], 'c': {'x': 4, 'y': True}}
b = a['b']
c = a['c']
d = a['c']['x']
""")
a = defs["a"].data[0]
b = defs["b"].data[0]
c = defs["c"].data[0]
d = defs["d"].data[0]
t1 = "list[Union[int, str]]"
t2 = "dict[str, Union[bool, int]]"
self.assertPytd(a, f"dict[str, Union[{t2}, {t1}]]")
self.assertPytd(b, t1)
self.assertPytd(c, t2)
self.assertPytd(d, "int")
# Check the shape of the nested pyvals (their contents need to be unpacked
# from variables).
self.assertEqual(len(a.pyval["b"].data[0].pyval), 3)
self.assertEqual(list(a.pyval["c"].data[0].pyval.keys()), ["x", "y"])
def test_deep_nesting(self):
defs = self._process("""
a = {'b': [1, {'c': 1, 'd': {'x': 4, 'y': ({'p': 4, q: 3}, 1)}}]}
b = a['b'][1]['d']['y'][0]['p']
""")
b = defs["b"].data[0]
self.assertPytd(b, "int")
def test_long_map(self):
elts = [f" 'k{i}': [1, 2]," for i in range(64)]
src = ["a = {"] + elts + ["}"]
defs = self._process("\n".join(src))
a = defs["a"].data[0]
self.assertPytd(a, "dict[str, list[int]]")
def test_long_map_with_tuple_keys(self):
elts = [f" ({i}, True): 'a'," for i in range(64)]
src = ["a = {"] + elts + ["}"]
defs = self._process("\n".join(src))
a = defs["a"].data[0]
self.assertPytd(a, "dict[tuple[int, bool], str]")
self.assertFalse(a.pyval)
def test_nested_long_map(self):
# Elements in the long map should be collapsed to a single type.
# Elements not in the long map should have pyvals (specifically, the
# container with the long map in it should not be collapsed).
elts = [f" 'k{i}': [1, True]," for i in range(64)]
src = ["x = [1, {"] + elts + ["}, {'x': 2}]"]
src += ["a = x[0]", "b = x[1]", "c = x[2]"]
src += ["d = c['x']", "e = [b['random'][1]]"]
defs = self._process("\n".join(src))
a = defs["a"].data[0]
b = defs["b"].data[0]
c = defs["c"].data[0]
d = defs["d"].data[0]
e = defs["e"].data[0]
self.assertPytd(a, "int")
self.assertPytd(b, "dict[str, list[Union[bool, int]]]")
self.assertPytd(c, "dict[str, int]")
self.assertPytd(d, "int")
self.assertPytd(e, "list[Union[bool, int]]")
if __name__ == "__main__":
unittest.main()
| PyvalTest |
python | huggingface__transformers | src/transformers/models/depth_pro/modeling_depth_pro.py | {
"start": 35094,
"end": 36051
} | class ____(nn.Module):
def __init__(self, config: DepthProConfig):
super().__init__()
self.config = config
self.fusion_hidden_size = config.fusion_hidden_size
self.fov_encoder = DepthProFovEncoder(config)
self.conv = nn.Conv2d(
self.fusion_hidden_size, self.fusion_hidden_size // 2, kernel_size=3, stride=2, padding=1
)
self.activation = nn.ReLU(inplace=True)
self.head = DepthProFovHead(config)
def forward(
self,
pixel_values: torch.Tensor,
global_features: torch.Tensor,
) -> torch.Tensor:
fov_features = self.fov_encoder(pixel_values)
global_features = self.conv(global_features)
global_features = self.activation(global_features)
fov_features = fov_features + global_features
fov_output = self.head(fov_features)
fov_output = fov_output.flatten()
return fov_output
| DepthProFovModel |
python | rapidsai__cudf | python/cudf/cudf/options.py | {
"start": 367,
"end": 9483
} | class ____:
default: Any
value: Any
description: str
validator: Callable
_OPTIONS: dict[str, Option] = {}
def _env_get_int(name, default):
try:
return int(os.getenv(name, default))
except (ValueError, TypeError):
return default
def _env_get_bool(name, default):
env = os.getenv(name)
if env is None:
return default
as_a_int = _env_get_int(name, None)
env = env.lower().strip()
if env == "true" or env == "on" or as_a_int:
return True
if env == "false" or env == "off" or as_a_int == 0:
return False
return default
def _register_option(
name: str, default_value: Any, description: str, validator: Callable
):
"""Register an option.
Parameters
----------
name : str
The name of the option.
default_value : Any
The default value of the option.
description : str
A text description of the option.
validator : Callable
Called on the option value to check its validity. Should raise an
error if the value is invalid.
Raises
------
BaseException
Raised by validator if the value is invalid.
"""
validator(default_value)
_OPTIONS[name] = Option(
default_value, default_value, description, validator
)
def get_option(name: str) -> Any:
"""Get the value of option.
Parameters
----------
key : str
The name of the option.
Returns
-------
The value of the option.
Raises
------
KeyError
If option ``name`` does not exist.
"""
try:
return _OPTIONS[name].value
except KeyError:
raise KeyError(f'"{name}" does not exist.')
def set_option(name: str, val: Any):
"""Set the value of option.
Parameters
----------
name : str
The name of the option.
val : Any
The value to set.
Raises
------
KeyError
If option ``name`` does not exist.
BaseException
Raised by validator if the value is invalid.
"""
try:
option = _OPTIONS[name]
except KeyError:
raise KeyError(f'"{name}" does not exist.')
option.validator(val)
option.value = val
def _build_option_description(name, opt):
return (
f"{name}:\n"
f"\t{opt.description}\n"
f"\t[Default: {opt.default}] [Current: {opt.value}]"
)
def describe_option(name: str | None = None):
"""Prints the description of an option.
If `name` is unspecified, prints the description of all available options.
Parameters
----------
name : Optional[str]
The name of the option.
"""
names = _OPTIONS.keys() if name is None else [name]
for name in names:
print(_build_option_description(name, _OPTIONS[name])) # noqa: T201
def _make_contains_validator(valid_options: Container) -> Callable:
"""Return a validator that checks if a value is in `valid_options`."""
def _validator(val):
if val not in valid_options:
raise ValueError(
f"{val} is not a valid option. "
f"Must be one of {set(valid_options)}."
)
return _validator
def _cow_validator(val):
if val not in {False, True}:
raise ValueError(
f"{val} is not a valid option. Must be one of {{False, True}}."
)
def _spill_validator(val):
if val not in {False, True}:
raise ValueError(
f"{val} is not a valid option. Must be one of {{False, True}}."
)
def _integer_validator(val):
try:
int(val)
return True
except ValueError:
raise ValueError(f"{val} is not a valid option. Must be an integer.")
def _integer_and_none_validator(val):
try:
if val is None or int(val):
return
except ValueError:
raise ValueError(
f"{val} is not a valid option. Must be an integer or None."
)
_register_option(
"default_integer_bitwidth",
None,
textwrap.dedent(
"""
Default bitwidth when the dtype of an integer needs to be
inferred. If set to `None`, the API will align dtype with pandas.
APIs that respect this option include:
\t- cudf object constructors
\t- cudf.read_csv and cudf.read_json when `dtype` is not specified.
\t- APIs that require implicit conversion of cudf.RangeIndex to an
\t integer index.
\tValid values are None, 32 or 64. Default is None.
"""
),
_make_contains_validator([None, 32, 64]),
)
_register_option(
"default_float_bitwidth",
None,
textwrap.dedent(
"""
Default bitwidth when the dtype of a float needs to be
inferred. If set to `None`, the API will align dtype with pandas.
APIs that respect this option include:
\t- cudf object constructors
\t- cudf.read_csv and cudf.read_json when `dtype` is not specified.
\tValid values are None, 32 or 64. Default is None.
"""
),
_make_contains_validator([None, 32, 64]),
)
_register_option(
"spill",
_env_get_bool("CUDF_SPILL", False),
textwrap.dedent(
"""
Enables spilling.
\tValid values are True or False. Default is False.
"""
),
_spill_validator,
)
_register_option(
"copy_on_write",
_env_get_bool("CUDF_COPY_ON_WRITE", False),
textwrap.dedent(
"""
If set to `False`, disables copy-on-write.
If set to `True`, enables copy-on-write.
Read more at: :ref:`copy-on-write-user-doc`
\tValid values are True or False. Default is False.
"""
),
_cow_validator,
)
_register_option(
"spill_on_demand",
_env_get_bool("CUDF_SPILL_ON_DEMAND", True),
textwrap.dedent(
"""
Enables spilling on demand using an RMM out-of-memory error handler.
This has no effect if spilling is disabled, see the "spill" option.
\tValid values are True or False. Default is True.
"""
),
_make_contains_validator([False, True]),
)
_register_option(
"spill_device_limit",
_env_get_int("CUDF_SPILL_DEVICE_LIMIT", None),
textwrap.dedent(
"""
Enforce a device memory limit in bytes.
This has no effect if spilling is disabled, see the "spill" option.
\tValid values are any positive integer or None (disabled).
\tDefault is None.
"""
),
_integer_and_none_validator,
)
_register_option(
"spill_stats",
_env_get_int("CUDF_SPILL_STATS", 0),
textwrap.dedent(
"""
If not 0, enables statistics at the specified level:
0 - disabled (no overhead).
1+ - duration and number of bytes spilled (very low overhead).
2+ - a traceback for each time a spillable buffer is exposed
permanently (potential high overhead).
Valid values are any positive integer.
Default is 0 (disabled).
"""
),
_integer_validator,
)
_register_option(
"mode.pandas_compatible",
False,
textwrap.dedent(
"""
If set to `False`, retains `cudf` specific behavior.
If set to `True`, enables pandas compatibility mode,
which will try to match pandas API behaviors in case of
any inconsistency.
\tValid values are True or False. Default is False.
"""
),
_make_contains_validator([False, True]),
)
_register_option(
"memory_profiling",
_env_get_bool("CUDF_MEMORY_PROFILING", False),
textwrap.dedent(
"""
If set to `False`, disables memory profiling.
If set to `True`, enables memory profiling.
Read more at: :ref:`memory-profiling-user-doc`
\tValid values are True or False. Default is False.
"""
),
_make_contains_validator([False, True]),
)
_register_option(
"io.parquet.low_memory",
False,
textwrap.dedent(
"""
If set to `False`, reads entire parquet in one go.
If set to `True`, reads parquet file in chunks.
\tValid values are True or False. Default is False.
"""
),
_make_contains_validator([False, True]),
)
_register_option(
"io.json.low_memory",
False,
textwrap.dedent(
"""
If set to `False`, reads entire json in one go.
If set to `True`, reads json file in chunks.
\tValid values are True or False. Default is False.
"""
),
_make_contains_validator([False, True]),
)
_register_option(
"kvikio_remote_io",
_env_get_bool("CUDF_KVIKIO_REMOTE_IO", False),
textwrap.dedent(
"""
Whether to use KvikIO's remote IO backend or not.
\tWARN: this is experimental and may be removed at any time
\twithout warning or deprecation period.
\tSet KVIKIO_NTHREADS (default is 8) to change the number of
\tconcurrent tcp connections, which is important for good performance.
\tValid values are True or False. Default is False.
"""
),
_make_contains_validator([False, True]),
)
| Option |
python | walkccc__LeetCode | solutions/2101. Detonate the Maximum Bombs/2101.py | {
"start": 0,
"end": 619
} | class ____:
def maximumDetonation(self, bombs: list[list[int]]) -> int:
n = len(bombs)
ans = 0
graph = [[] for _ in range(n)]
for i, (xi, yi, ri) in enumerate(bombs):
for j, (xj, yj, rj) in enumerate(bombs):
if i == j:
continue
if ri**2 >= (xi - xj)**2 + (yi - yj)**2:
graph[i].append(j)
def dfs(u: int, seen: set[int]) -> None:
for v in graph[u]:
if v in seen:
continue
seen.add(v)
dfs(v, seen)
for i in range(n):
seen = set([i])
dfs(i, seen)
ans = max(ans, len(seen))
return ans
| Solution |
python | google__jax | jax/experimental/pallas/ops/tpu/splash_attention/splash_attention_kernel.py | {
"start": 15144,
"end": 15502
} | class ____(enum.IntEnum):
HEAD_DIM_MINOR = enum.auto() # [..., seq_len, head_dim]
SEQ_MINOR = enum.auto() # [..., head_dim, seq_len]
def from_head_minor(vals: tuple[Any, ...], layout: QKVLayout):
if layout == QKVLayout.HEAD_DIM_MINOR:
return vals
return (*vals[:-2], vals[-1], vals[-2])
@dataclasses.dataclass(frozen=True, slots=True)
| QKVLayout |
python | django__django | tests/forms_tests/views.py | {
"start": 360,
"end": 698
} | class ____(UpdateView):
model = Article
success_url = "/"
form_class = ArticleForm
def form_view(request):
class Form(forms.Form):
number = forms.FloatField()
template = Template("<html>{{ form }}</html>")
context = Context({"form": Form()})
return HttpResponse(template.render(context))
| ArticleFormView |
python | kamyu104__LeetCode-Solutions | Python/k-diff-pairs-in-an-array.py | {
"start": 29,
"end": 458
} | class ____(object):
def findPairs(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if k < 0: return 0
result, lookup = set(), set()
for num in nums:
if num-k in lookup:
result.add(num-k)
if num+k in lookup:
result.add(num)
lookup.add(num)
return len(result)
| Solution |
python | django__django | tests/csrf_tests/tests.py | {
"start": 41714,
"end": 52862
} | class ____(CsrfViewMiddlewareTestMixin, SimpleTestCase):
def _set_csrf_cookie(self, req, cookie):
req.COOKIES[settings.CSRF_COOKIE_NAME] = cookie
def _read_csrf_cookie(self, req, resp):
"""
Return the CSRF cookie as a string, or False if no cookie is present.
"""
if settings.CSRF_COOKIE_NAME not in resp.cookies:
return False
csrf_cookie = resp.cookies[settings.CSRF_COOKIE_NAME]
return csrf_cookie.value
def _get_cookies_set(self, req, resp):
return resp._cookies_set
def test_ensures_csrf_cookie_no_middleware(self):
"""
The ensure_csrf_cookie() decorator works without middleware.
"""
req = self._get_request()
resp = ensure_csrf_cookie_view(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie)
self.assertIn("Cookie", resp.get("Vary", ""))
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie)
self.assertIn("Cookie", resp.get("Vary", ""))
def test_csrf_cookie_age(self):
"""
CSRF cookie age can be set using settings.CSRF_COOKIE_AGE.
"""
req = self._get_request()
MAX_AGE = 123
with self.settings(
CSRF_COOKIE_NAME="csrfcookie",
CSRF_COOKIE_DOMAIN=".example.com",
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH="/test/",
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True,
):
# token_view calls get_token() indirectly
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
max_age = resp.cookies.get("csrfcookie").get("max-age")
self.assertEqual(max_age, MAX_AGE)
def test_csrf_cookie_age_none(self):
"""
CSRF cookie age does not have max age set and therefore uses
session-based cookies.
"""
req = self._get_request()
MAX_AGE = None
with self.settings(
CSRF_COOKIE_NAME="csrfcookie",
CSRF_COOKIE_DOMAIN=".example.com",
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH="/test/",
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True,
):
# token_view calls get_token() indirectly
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
max_age = resp.cookies.get("csrfcookie").get("max-age")
self.assertEqual(max_age, "")
def test_csrf_cookie_samesite(self):
req = self._get_request()
with self.settings(
CSRF_COOKIE_NAME="csrfcookie", CSRF_COOKIE_SAMESITE="Strict"
):
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
self.assertEqual(resp.cookies["csrfcookie"]["samesite"], "Strict")
def test_bad_csrf_cookie_characters(self):
"""
If the CSRF cookie has invalid characters in a POST request, the
middleware rejects the incoming request.
"""
self._check_bad_or_missing_cookie(
64 * "*", "CSRF cookie has invalid characters."
)
def test_bad_csrf_cookie_length(self):
"""
If the CSRF cookie has an incorrect length in a POST request, the
middleware rejects the incoming request.
"""
self._check_bad_or_missing_cookie(16 * "a", "CSRF cookie has incorrect length.")
def test_process_view_token_too_long(self):
"""
If the token is longer than expected, it is ignored and a new token is
created.
"""
req = self._get_request(cookie="x" * 100000)
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(len(csrf_cookie), CSRF_SECRET_LENGTH)
def test_process_view_token_invalid_chars(self):
"""
If the token contains non-alphanumeric characters, it is ignored and a
new token is created.
"""
token = ("!@#" + self._csrf_id_token)[:CSRF_TOKEN_LENGTH]
req = self._get_request(cookie=token)
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(len(csrf_cookie), CSRF_SECRET_LENGTH)
self.assertNotEqual(csrf_cookie, token)
def test_masked_unmasked_combinations(self):
"""
All combinations are allowed of (1) masked and unmasked cookies,
(2) masked and unmasked tokens, and (3) tokens provided via POST and
the X-CSRFToken header.
"""
cases = [
(TEST_SECRET, TEST_SECRET, None),
(TEST_SECRET, MASKED_TEST_SECRET2, None),
(TEST_SECRET, None, TEST_SECRET),
(TEST_SECRET, None, MASKED_TEST_SECRET2),
(MASKED_TEST_SECRET1, TEST_SECRET, None),
(MASKED_TEST_SECRET1, MASKED_TEST_SECRET2, None),
(MASKED_TEST_SECRET1, None, TEST_SECRET),
(MASKED_TEST_SECRET1, None, MASKED_TEST_SECRET2),
]
for args in cases:
with self.subTest(args=args):
cookie, post_token, meta_token = args
req = self._get_POST_csrf_cookie_request(
cookie=cookie,
post_token=post_token,
meta_token=meta_token,
)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
def test_set_cookie_called_only_once(self):
"""
set_cookie() is called only once when the view is decorated with both
ensure_csrf_cookie and csrf_protect.
"""
req = self._get_POST_request_with_token()
resp = ensured_and_protected_view(req)
self.assertContains(resp, "OK")
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(csrf_cookie, TEST_SECRET)
# set_cookie() was called only once and with the expected secret.
cookies_set = self._get_cookies_set(req, resp)
self.assertEqual(cookies_set, [TEST_SECRET])
def test_invalid_cookie_replaced_on_GET(self):
"""
A CSRF cookie with the wrong format is replaced during a GET request.
"""
req = self._get_request(cookie="badvalue")
resp = protected_view(req)
self.assertContains(resp, "OK")
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie, msg="No CSRF cookie was sent.")
self.assertEqual(len(csrf_cookie), CSRF_SECRET_LENGTH)
def test_valid_secret_not_replaced_on_GET(self):
"""
Masked and unmasked CSRF cookies are not replaced during a GET request.
"""
cases = [
TEST_SECRET,
MASKED_TEST_SECRET1,
]
for cookie in cases:
with self.subTest(cookie=cookie):
req = self._get_request(cookie=cookie)
resp = protected_view(req)
self.assertContains(resp, "OK")
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertFalse(csrf_cookie, msg="A CSRF cookie was sent.")
def test_masked_secret_accepted_and_replaced(self):
"""
For a view that uses the csrf_token, the csrf cookie is replaced with
the unmasked version if originally masked.
"""
req = self._get_POST_request_with_token(cookie=MASKED_TEST_SECRET1)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(csrf_cookie, TEST_SECRET)
self._check_token_present(resp, csrf_cookie)
def test_bare_secret_accepted_and_not_replaced(self):
"""
The csrf cookie is left unchanged if originally not masked.
"""
req = self._get_POST_request_with_token(cookie=TEST_SECRET)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(csrf_cookie, TEST_SECRET)
self._check_token_present(resp, csrf_cookie)
@override_settings(
ALLOWED_HOSTS=["www.example.com"],
CSRF_COOKIE_DOMAIN=".example.com",
USE_X_FORWARDED_PORT=True,
)
def test_https_good_referer_behind_proxy(self):
"""
A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True.
"""
self._test_https_good_referer_behind_proxy()
@override_settings(
ALLOWED_HOSTS=["www.example.com"], CSRF_COOKIE_DOMAIN=".example.com"
)
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN.
"""
self._test_https_good_referer_matches_cookie_domain()
@override_settings(
ALLOWED_HOSTS=["www.example.com"], CSRF_COOKIE_DOMAIN=".example.com"
)
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN and a non-443 port.
"""
self._test_https_good_referer_matches_cookie_domain_with_different_port()
@override_settings(CSRF_COOKIE_DOMAIN=".example.com", DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META["HTTP_REFERER"] = "http://example.com/"
req.META["SERVER_PORT"] = "443"
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
"Referer checking failed - Referer is insecure while host is secure.",
status_code=403,
)
@override_settings(CSRF_USE_SESSIONS=True, CSRF_COOKIE_DOMAIN=None)
| CsrfViewMiddlewareTests |
python | PrefectHQ__prefect | src/integrations/prefect-redis/prefect_redis/ordering.py | {
"start": 1078,
"end": 1371
} | class ____(Exception):
"""Indicates that an event is currently being processed and should not be processed
until it is finished. This may happen due to Redis Streams redelivering a message."""
def __init__(self, event: ReceivedEvent):
self.event = event
| EventBeingProcessed |
python | getsentry__sentry | src/sentry/issues/endpoints/bases/group_search_view.py | {
"start": 316,
"end": 1430
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["org:read", "org:write", "org:admin"],
"POST": ["org:read", "org:write", "org:admin"],
"PUT": ["org:read", "org:write", "org:admin"],
"DELETE": ["org:read", "org:write", "org:admin"],
}
def has_object_permission(self, request: Request, view: APIView, obj: object) -> bool:
if isinstance(obj, Organization):
return super().has_object_permission(request, view, obj)
if isinstance(obj, GroupSearchView):
# Org members can view or create any GroupSearchView
if request.method == "GET" or request.method == "POST":
return True
# The creator can edit their own GroupSearchView
# Org owners/managers and superusers may edit any GroupSearchView
if (
request.user.id == obj.user_id
or request.access.has_scope("org:write")
or is_active_superuser(request)
):
return True
return False
return True
| GroupSearchViewPermission |
python | PrefectHQ__prefect | src/integrations/prefect-azure/prefect_azure/blob_storage.py | {
"start": 6781,
"end": 28055
} | class ____(
ObjectStorageBlock, WritableFileSystem, WritableDeploymentStorage
):
"""
Represents a container in Azure Blob Storage.
This class provides methods for downloading and uploading files and folders
to and from the Azure Blob Storage container.
Attributes:
container_name: The name of the Azure Blob Storage container.
credentials: The credentials to use for authentication with Azure.
base_folder: A base path to a folder within the container to use
for reading and writing objects.
"""
_block_type_name = "Azure Blob Storage Container"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/54e3fa7e00197a4fbd1d82ed62494cb58d08c96a-250x250.png" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-azure" # noqa
container_name: str = Field(
default=..., description="The name of a Azure Blob Storage container."
)
credentials: AzureBlobStorageCredentials = Field(
default_factory=AzureBlobStorageCredentials,
description="The credentials to use for authentication with Azure.",
)
base_folder: Optional[str] = Field(
default=None,
description=(
"A base path to a folder within the container to use "
"for reading and writing objects."
),
)
def _get_path_relative_to_base_folder(self, path: Optional[str] = None) -> str:
if path is None and self.base_folder is None:
return ""
if path is None:
return self.base_folder
if self.base_folder is None:
return path
return (Path(self.base_folder) / Path(path)).as_posix()
@sync_compatible
async def download_folder_to_path(
self,
from_folder: str,
to_folder: Union[str, Path],
**download_kwargs: Dict[str, Any],
) -> Coroutine[Any, Any, Path]:
"""Download a folder from the container to a local path.
Args:
from_folder: The folder path in the container to download.
to_folder: The local path to download the folder to.
**download_kwargs: Additional keyword arguments passed into
`BlobClient.download_blob`.
Returns:
The local path where the folder was downloaded.
Example:
Download the contents of container folder `folder` from the container
to the local folder `local_folder`:
```python
from prefect_azure import AzureBlobStorageCredentials
from prefect_azure.blob_storage import AzureBlobStorageContainer
credentials = AzureBlobStorageCredentials(
connection_string="connection_string",
)
block = AzureBlobStorageContainer(
container_name="container",
credentials=credentials,
)
block.download_folder_to_path(
from_folder="folder",
to_folder="local_folder"
)
```
"""
self.logger.info(
"Downloading folder from container %s to path %s",
self.container_name,
to_folder,
)
full_container_path = self._get_path_relative_to_base_folder(from_folder)
async with self.credentials as credentials:
async with credentials.get_container_client(
self.container_name
) as container_client:
try:
async for blob in container_client.list_blobs(
name_starts_with=full_container_path
):
blob_path = blob.name
local_path = Path(to_folder) / Path(blob_path).relative_to(
full_container_path
)
local_path.parent.mkdir(parents=True, exist_ok=True)
async with container_client.get_blob_client(
blob_path
) as blob_client:
blob_obj = await blob_client.download_blob(
**download_kwargs
)
with local_path.open(mode="wb") as to_file:
await blob_obj.readinto(to_file)
except ResourceNotFoundError as exc:
raise RuntimeError(
"An error occurred when attempting to download from container"
f" {self.container_name}: {exc.reason}"
) from exc
return Path(to_folder)
@sync_compatible
async def download_object_to_file_object(
self,
from_path: str,
to_file_object: BinaryIO,
**download_kwargs: Dict[str, Any],
) -> Coroutine[Any, Any, BinaryIO]:
"""
Downloads an object from the container to a file object.
Args:
from_path : The path of the object to download within the container.
to_file_object: The file object to download the object to.
**download_kwargs: Additional keyword arguments for the download
operation.
Returns:
The file object that the object was downloaded to.
Example:
Download the object `object` from the container to a file object:
```python
from prefect_azure import AzureBlobStorageCredentials
from prefect_azure.blob_storage import AzureBlobStorageContainer
credentials = AzureBlobStorageCredentials(
connection_string="connection_string",
)
block = AzureBlobStorageContainer(
container_name="container",
credentials=credentials,
)
with open("file.txt", "wb") as f:
block.download_object_to_file_object(
from_path="object",
to_file_object=f
)
```
"""
self.logger.info(
"Downloading object from container %s to file object", self.container_name
)
full_container_path = self._get_path_relative_to_base_folder(from_path)
async with self.credentials as credentials:
async with credentials.get_blob_client(
self.container_name, full_container_path
) as blob_client:
try:
blob_obj = await blob_client.download_blob(**download_kwargs)
await blob_obj.download_to_stream(to_file_object)
except ResourceNotFoundError as exc:
raise RuntimeError(
"An error occurred when attempting to download from container"
f" {self.container_name}: {exc.reason}"
) from exc
return to_file_object
@sync_compatible
async def download_object_to_path(
self,
from_path: str,
to_path: Union[str, Path],
**download_kwargs: Dict[str, Any],
) -> Coroutine[Any, Any, Path]:
"""
Downloads an object from a container to a specified path.
Args:
from_path: The path of the object in the container.
to_path: The path where the object will be downloaded to.
**download_kwargs (Dict[str, Any]): Additional keyword arguments
for the download operation.
Returns:
The path where the object was downloaded to.
Example:
Download the object `object` from the container to the local path
`file.txt`:
```python
from prefect_azure import AzureBlobStorageCredentials
from prefect_azure.blob_storage import AzureBlobStorageContainer
credentials = AzureBlobStorageCredentials(
connection_string="connection_string",
)
block = AzureBlobStorageContainer(
container_name="container",
credentials=credentials,
)
block.download_object_to_path(
from_path="object",
to_path="file.txt"
)
```
"""
self.logger.info(
"Downloading object from container %s to path %s",
self.container_name,
to_path,
)
full_container_path = self._get_path_relative_to_base_folder(from_path)
async with self.credentials as credentials:
async with credentials.get_blob_client(
self.container_name, full_container_path
) as blob_client:
try:
blob_obj = await blob_client.download_blob(**download_kwargs)
path = Path(to_path)
path.parent.mkdir(parents=True, exist_ok=True)
with path.open(mode="wb") as to_file:
await blob_obj.readinto(to_file)
except ResourceNotFoundError as exc:
raise RuntimeError(
"An error occurred when attempting to download from container"
f" {self.container_name}: {exc.reason}"
) from exc
return path
@sync_compatible
async def upload_from_file_object(
self, from_file_object: BinaryIO, to_path: str, **upload_kwargs: Dict[str, Any]
) -> Coroutine[Any, Any, str]:
"""
Uploads an object from a file object to the specified path in the blob
storage container.
Args:
from_file_object: The file object to upload.
to_path: The path in the blob storage container to upload the
object to.
**upload_kwargs: Additional keyword arguments to pass to the
upload_blob method.
Returns:
The path where the object was uploaded to.
Example:
Upload a file object to the container at the path `object`:
```python
from prefect_azure import AzureBlobStorageCredentials
from prefect_azure.blob_storage import AzureBlobStorageContainer
credentials = AzureBlobStorageCredentials(
connection_string="connection_string",
)
block = AzureBlobStorageContainer(
container_name="container",
credentials=credentials,
)
with open("file.txt", "rb") as f:
block.upload_from_file_object(
from_file_object=f,
to_path="object"
)
```
"""
self.logger.info(
"Uploading object to container %s with key %s", self.container_name, to_path
)
full_container_path = self._get_path_relative_to_base_folder(to_path)
async with self.credentials as credentials:
async with credentials.get_blob_client(
self.container_name, full_container_path
) as blob_client:
try:
await blob_client.upload_blob(from_file_object, **upload_kwargs)
except ResourceNotFoundError as exc:
raise RuntimeError(
"An error occurred when attempting to upload from container"
f" {self.container_name}: {exc.reason}"
) from exc
return to_path
@sync_compatible
async def upload_from_path(
self, from_path: Union[str, Path], to_path: str, **upload_kwargs: Dict[str, Any]
) -> Coroutine[Any, Any, str]:
"""
Uploads an object from a local path to the specified destination path in the
blob storage container.
Args:
from_path: The local path of the object to upload.
to_path: The destination path in the blob storage container.
**upload_kwargs: Additional keyword arguments to pass to the
`upload_blob` method.
Returns:
The destination path in the blob storage container.
Example:
Upload a file from the local path `file.txt` to the container
at the path `object`:
```python
from prefect_azure import AzureBlobStorageCredentials
from prefect_azure.blob_storage import AzureBlobStorageContainer
credentials = AzureBlobStorageCredentials(
connection_string="connection_string",
)
block = AzureBlobStorageContainer(
container_name="container",
credentials=credentials,
)
block.upload_from_path(
from_path="file.txt",
to_path="object"
)
```
"""
self.logger.info(
"Uploading object to container %s with key %s", self.container_name, to_path
)
full_container_path = self._get_path_relative_to_base_folder(to_path)
async with self.credentials as credentials:
async with credentials.get_blob_client(
self.container_name, full_container_path
) as blob_client:
try:
with open(from_path, "rb") as f:
await blob_client.upload_blob(f, **upload_kwargs)
except ResourceNotFoundError as exc:
raise RuntimeError(
"An error occurred when attempting to upload to container"
f" {self.container_name}: {exc.reason}"
) from exc
return to_path
@sync_compatible
async def upload_from_folder(
self,
from_folder: Union[str, Path],
to_folder: str,
**upload_kwargs: Dict[str, Any],
) -> Coroutine[Any, Any, str]:
"""
Uploads files from a local folder to a specified folder in the Azure
Blob Storage container.
Args:
from_folder: The path to the local folder containing the files to upload.
to_folder: The destination folder in the Azure Blob Storage container.
**upload_kwargs: Additional keyword arguments to pass to the
`upload_blob` method.
Returns:
The full path of the destination folder in the container.
Example:
Upload the contents of the local folder `local_folder` to the container
folder `folder`:
```python
from prefect_azure import AzureBlobStorageCredentials
from prefect_azure.blob_storage import AzureBlobStorageContainer
credentials = AzureBlobStorageCredentials(
connection_string="connection_string",
)
block = AzureBlobStorageContainer(
container_name="container",
credentials=credentials,
)
block.upload_from_folder(
from_folder="local_folder",
to_folder="folder"
)
```
"""
self.logger.info(
"Uploading folder to container %s with key %s",
self.container_name,
to_folder,
)
full_container_path = self._get_path_relative_to_base_folder(to_folder)
async with self.credentials as credentials:
async with credentials.get_container_client(
self.container_name
) as container_client:
if not Path(from_folder).is_dir():
raise ValueError(f"{from_folder} is not a directory")
for path in Path(from_folder).rglob("*"):
if path.is_file():
blob_path = Path(full_container_path) / path.relative_to(
from_folder
)
async with container_client.get_blob_client(
blob_path.as_posix()
) as blob_client:
try:
await blob_client.upload_blob(
path.read_bytes(), **upload_kwargs
)
except ResourceNotFoundError as exc:
raise RuntimeError(
"An error occurred when attempting to upload to "
f"container {self.container_name}: {exc.reason}"
) from exc
return full_container_path
@sync_compatible
async def get_directory(
self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> None:
"""
Downloads the contents of a direry from the blob storage to a local path.
Used to enable flow code storage for deployments.
Args:
from_path: The path of the directory in the blob storage.
local_path: The local path where the directory will be downloaded.
"""
await self.download_folder_to_path(from_path, local_path)
@sync_compatible
async def put_directory(
self,
local_path: Optional[str] = None,
to_path: Optional[str] = None,
ignore_file: Optional[str] = None,
) -> None:
"""
Uploads a directory to the blob storage.
Used to enable flow code storage for deployments.
Args:
local_path: The local path of the directory to upload. Defaults to
current directory.
to_path: The destination path in the blob storage. Defaults to
root directory.
ignore_file: The path to a file containing patterns to ignore
during upload.
"""
to_path = "" if to_path is None else to_path
if local_path is None:
local_path = "."
included_files = None
if ignore_file:
with open(ignore_file, "r") as f:
ignore_patterns = f.readlines()
included_files = filter_files(local_path, ignore_patterns)
for local_file_path in Path(local_path).expanduser().rglob("*"):
if (
included_files is not None
and str(local_file_path.relative_to(local_path)) not in included_files
):
continue
elif not local_file_path.is_dir():
remote_file_path = Path(to_path) / local_file_path.relative_to(
local_path
)
with open(local_file_path, "rb") as local_file:
local_file_content = local_file.read()
await self.write_path(
remote_file_path.as_posix(), content=local_file_content
)
@sync_compatible
async def read_path(self, path: str) -> bytes:
"""
Reads the contents of a file at the specified path and returns it as bytes.
Used to enable results storage.
Args:
path: The path of the file to read.
Returns:
The contents of the file as bytes.
"""
file_obj = BytesIO()
await self.download_object_to_file_object(path, file_obj)
return file_obj.getvalue()
@sync_compatible
async def write_path(self, path: str, content: bytes) -> None:
"""
Writes the content to the specified path in the blob storage.
Used to enable results storage.
Args:
path: The path where the content will be written.
content: The content to be written.
"""
await self.upload_from_file_object(BytesIO(content), path)
@sync_compatible
async def list_blobs(self) -> List[str]:
"""
Lists blobs available within the specified Azure container.
Used to introspect your containers.
Returns:
A list of the blobs within your container.
Example:
List the blobs associated with a container.
```python
from prefect_azure import AzureBlobStorageCredentials
from prefect_azure.blob_storage import AzureBlobStorageContainer
credentials = AzureBlobStorageCredentials(
connection_string="connection_string",
)
block = AzureBlobStorageContainer(
container_name="container",
credentials=credentials,
)
block.list_blobs()
```
"""
self.logger.info(
"Listing the blobs within container %s",
self.container_name,
)
async with self.credentials as credentials:
async with credentials.get_container_client(
self.container_name
) as container_client:
blobs = container_client.list_blobs()
filenames = []
async for blob in blobs:
filenames.append(blob.name)
return filenames
| AzureBlobStorageContainer |
python | Pylons__pyramid | tests/test_config/test_assets.py | {
"start": 33976,
"end": 34658
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.config.assets import DirectoryOverride
return DirectoryOverride
def _makeOne(self, path, source):
klass = self._getTargetClass()
return klass(path, source)
def test_it_match(self):
source = DummyAssetSource()
o = self._makeOne('foo/', source)
result = o('foo/something.pt')
self.assertEqual(result, (source, 'something.pt'))
def test_it_no_match(self):
source = DummyAssetSource()
o = self._makeOne('foo/', source)
result = o('baz/notfound.pt')
self.assertEqual(result, None)
| TestDirectoryOverride |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs.py | {
"start": 355,
"end": 1135
} | class ____:
hidden_mutable_default: list[int] = default_function()
class_variable: typing.ClassVar[list[int]] = default_function()
another_class_var: ClassVar[list[int]] = default_function()
fine_path: Path = Path()
fine_date: datetime.date = datetime.date(2042, 1, 1)
fine_timedelta: datetime.timedelta = datetime.timedelta(hours=7)
fine_tuple: tuple[int] = tuple([1])
fine_regex: re.Pattern = re.compile(r".*")
fine_float: float = float("-inf")
fine_int: int = int(12)
fine_complex: complex = complex(1, 2)
fine_str: str = str("foo")
fine_bool: bool = bool("foo")
fine_fraction: Fraction = Fraction(1, 2)
DEFAULT_IMMUTABLETYPE_FOR_ALL_DATACLASSES = ImmutableType(40)
DEFAULT_A_FOR_ALL_DATACLASSES = A([1, 2, 3])
@define
| A |
python | readthedocs__readthedocs.org | readthedocs/core/forms.py | {
"start": 2376,
"end": 4983
} | class ____(forms.Form):
"""
Form class that allows raising form errors before form submission.
The base ``Form`` does not support validation errors while the form is
unbound (does not have ``data`` defined). There are cases in our UI where we
want to show errors and/or disabled the form before the user has a chance to
interact with the form -- for example, when a feature is unavailable or
disabled for the user or organization.
This provides the ``clean_prevalidation`` method, which acts much like the
``clean`` method. Any validation errors raised in this method surface as non
field errors in the UI.
"""
def __init__(self, *args, **kwargs):
self._prevalidation_errors = None
super().__init__(*args, **kwargs)
def is_valid(self):
# This differs from ``Form`` in that we don't care if the form is bound
return not self.errors
@property
def is_disabled(self):
return self._prevalidation_errors is not None
def full_clean(self):
"""
Extend full clean method with prevalidation cleaning.
Where :py:method:`forms.Form.full_clean` bails out if there is no bound
data on the form, this method always checks prevalidation no matter
what. This gives errors before submission and after submission.
"""
# Always call prevalidation, ``full_clean`` bails if the form is unbound
self._clean_prevalidation()
super().full_clean()
# ``full_clean`` sets ``self._errors``, so we prepend prevalidation
# errors after calling the parent ``full_clean``
if self._prevalidation_errors is not None:
non_field_errors = []
non_field_errors.extend(self._prevalidation_errors)
non_field_errors.extend(self._errors.get(NON_FIELD_ERRORS, []))
self._errors[NON_FIELD_ERRORS] = non_field_errors
def _clean_prevalidation(self):
"""
Catch validation errors raised by the subclassed ``clean_validation()``.
This wraps ``clean_prevalidation()`` using the same pattern that
:py:method:`form.Form._clean_form` wraps :py:method:`clean`. Validation
errors raised in the subclass method will be eventually added to the
form error list but :py:method:`full_clean`.
"""
try:
self.clean_prevalidation()
except forms.ValidationError as validation_error:
self._prevalidation_errors = [validation_error]
def clean_prevalidation(self):
raise NotImplementedError()
| PrevalidatedForm |
python | pytorch__pytorch | torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py | {
"start": 1220,
"end": 2130
} | class ____(ABC):
def __init__(self, optim_cls: type) -> None:
"""
Initialize the OverlappedOptimizer.
Overlappedoptimizer is a base class that child classes can implement to
specify how different optimizers will register themselves with DDP.
"""
self.optim_cls = optim_cls
@abstractmethod
def register_ddp(self, ddp: DistributedDataParallel) -> None:
"""Registers the overlapped optimizer with DDP."""
raise NotImplementedError(
f"{self.__class__.__name__} does not support overlapped DDP."
)
@abstractmethod
def register_fsdp(self, fsdp: FullyShardedDataParallel) -> None:
"""Registers the overlapped optimizer with FSDP."""
raise NotImplementedError(
f"{self.__class__.__name__} does not support overlapped FSDP."
)
@register_overlapped(Optimizer)
| OverlappedOptimizer |
python | wandb__wandb | wandb/vendor/pygments/lexers/matlab.py | {
"start": 586,
"end": 5816
} | class ____(RegexLexer):
"""
For Matlab source code.
.. versionadded:: 0.10
"""
name = 'Matlab'
aliases = ['matlab']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ("sin", "sind", "sinh", "asin", "asind", "asinh", "cos", "cosd", "cosh",
"acos", "acosd", "acosh", "tan", "tand", "tanh", "atan", "atand", "atan2",
"atanh", "sec", "secd", "sech", "asec", "asecd", "asech", "csc", "cscd",
"csch", "acsc", "acscd", "acsch", "cot", "cotd", "coth", "acot", "acotd",
"acoth", "hypot", "exp", "expm1", "log", "log1p", "log10", "log2", "pow2",
"realpow", "reallog", "realsqrt", "sqrt", "nthroot", "nextpow2", "abs",
"angle", "complex", "conj", "imag", "real", "unwrap", "isreal", "cplxpair",
"fix", "floor", "ceil", "round", "mod", "rem", "sign")
specfun = ("airy", "besselj", "bessely", "besselh", "besseli", "besselk", "beta",
"betainc", "betaln", "ellipj", "ellipke", "erf", "erfc", "erfcx",
"erfinv", "expint", "gamma", "gammainc", "gammaln", "psi", "legendre",
"cross", "dot", "factor", "isprime", "primes", "gcd", "lcm", "rat",
"rats", "perms", "nchoosek", "factorial", "cart2sph", "cart2pol",
"pol2cart", "sph2cart", "hsv2rgb", "rgb2hsv")
elmat = ("zeros", "ones", "eye", "repmat", "rand", "randn", "linspace", "logspace",
"freqspace", "meshgrid", "accumarray", "size", "length", "ndims", "numel",
"disp", "isempty", "isequal", "isequalwithequalnans", "cat", "reshape",
"diag", "blkdiag", "tril", "triu", "fliplr", "flipud", "flipdim", "rot90",
"find", "end", "sub2ind", "ind2sub", "bsxfun", "ndgrid", "permute",
"ipermute", "shiftdim", "circshift", "squeeze", "isscalar", "isvector",
"ans", "eps", "realmax", "realmin", "pi", "i", "inf", "nan", "isnan",
"isinf", "isfinite", "j", "why", "compan", "gallery", "hadamard", "hankel",
"hilb", "invhilb", "magic", "pascal", "rosser", "toeplitz", "vander",
"wilkinson")
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.11 (R2010):
(words((
'break', 'case', 'catch', 'classdef', 'continue', 'else', 'elseif',
'end', 'enumerated', 'events', 'for', 'function', 'global', 'if',
'methods', 'otherwise', 'parfor', 'persistent', 'properties',
'return', 'spmd', 'switch', 'try', 'while'), suffix=r'\b'),
Keyword),
("(" + "|".join(elfun + specfun + elmat) + r')\b', Name.Builtin),
# line continuation with following comment:
(r'\.\.\..*$', Comment),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w)\].])\'+', Operator),
(r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float),
(r'\d+[eEf][+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
(r'(?<![\w)\].])\'', String, 'string'),
(r'[a-zA-Z_]\w*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'blockcomment': [
(r'^\s*%\}', Comment.Multiline, '#pop'),
(r'^.*\n', Comment.Multiline),
(r'.', Comment.Multiline),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Whitespace, Text, Whitespace, Punctuation,
Whitespace, Name.Function, Punctuation, Text,
Punctuation, Whitespace), '#pop'),
# function with no args
(r'(\s*)([a-zA-Z_]\w*)', bygroups(Text, Name.Function), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.2
elif re.match('^!\w+', text, re.M): # system cmd
return 0.2
line_re = re.compile('.*?\n')
| MatlabLexer |
python | doocs__leetcode | solution/3300-3399/3365.Rearrange K Substrings to Form Target String/Solution.py | {
"start": 0,
"end": 299
} | class ____:
def isPossibleToRearrange(self, s: str, t: str, k: int) -> bool:
cnt = Counter()
n = len(s)
m = n // k
for i in range(0, n, m):
cnt[s[i : i + m]] += 1
cnt[t[i : i + m]] -= 1
return all(v == 0 for v in cnt.values())
| Solution |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/len_test.py | {
"start": 982,
"end": 2117
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.eager_only_combinations())
def testKnown(self):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
self.assertLen(ds, 10)
@combinations.generate(test_base.eager_only_combinations())
def testInfinite(self):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements).repeat()
with self.assertRaisesRegex(TypeError, "infinite"):
len(ds)
@combinations.generate(test_base.eager_only_combinations())
def testUnknown(self):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements).filter(lambda x: True)
with self.assertRaisesRegex(TypeError, "unknown"):
len(ds)
@combinations.generate(test_base.graph_only_combinations())
def testGraphMode(self):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
with self.assertRaisesRegex(
TypeError,
r"`tf.data.Dataset` only supports `len` in eager mode. Use "
r"`tf.data.Dataset.cardinality\(\)` instead."):
len(ds)
if __name__ == "__main__":
test.main()
| LenTest |
python | walkccc__LeetCode | solutions/2031. Count Subarrays With More Ones Than Zeros/2031.py | {
"start": 414,
"end": 755
} | class ____:
def subarraysWithMoreZerosThanOnes(self, nums: list[int]) -> int:
MOD = 1_000_000_007
ans = 0
prefix = 0
tree = FenwichTree(len(nums))
tree.add(0, 1)
for num in nums:
prefix += -1 if num == 0 else 1
ans += tree.get(prefix - 1)
ans %= MOD
tree.add(prefix, 1)
return ans
| Solution |
python | joke2k__faker | faker/providers/internet/ko_KR/__init__.py | {
"start": 46,
"end": 344
} | class ____(InternetProvider):
free_email_domains = (
"gmail.com",
"daum.net",
"hotmail.com",
"hanmail.net",
"naver.com",
"nate.com",
"live.com",
"dreamwiz.com",
)
tlds = ("com", "com", "com", "kr", "kr", "net", "org")
| Provider |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau/components/translation.py | {
"start": 1068,
"end": 3929
} | class ____(AssetSpecUpdateKwargs, Resolvable):
for_sheet: Optional[ResolvedTargetedTableauTranslationFn] = None
for_dashboard: Optional[ResolvedTargetedTableauTranslationFn] = None
for_data_source: Optional[ResolvedTargetedTableauTranslationFn] = None
def resolve_multilayer_translation(context: ResolutionContext, model):
"""The Tableau translation schema supports defining global transforms
as well as per-content-type transforms. This resolver composes the
per-content-type transforms with the global transforms.
"""
info = TranslatorResolvingInfo(
asset_attributes=model,
resolution_context=context,
model_key="translation",
)
def _translation_fn(base_asset_spec: AssetSpec, data: TableauTranslatorData):
processed_spec = info.get_asset_spec(
base_asset_spec,
{
"data": data,
"spec": base_asset_spec,
},
)
nested_translation_fns = resolve_fields(
model=model,
resolved_cls=TableauAssetArgs,
context=context.with_scope(
**{
"data": data,
"spec": processed_spec,
}
),
)
for_sheet = nested_translation_fns.get("for_sheet")
for_dashboard = nested_translation_fns.get("for_dashboard")
for_data_source = nested_translation_fns.get("for_data_source")
if data.content_type == TableauContentType.SHEET and for_sheet:
return for_sheet(processed_spec, data)
if data.content_type == TableauContentType.DASHBOARD and for_dashboard:
return for_dashboard(processed_spec, data)
if data.content_type == TableauContentType.DATA_SOURCE and for_data_source:
return for_data_source(processed_spec, data)
return processed_spec
return _translation_fn
ResolvedMultilayerTranslationFn: TypeAlias = Annotated[
TranslationFn,
Resolver(
resolve_multilayer_translation,
model_field_type=Union[str, TableauAssetArgs.model()],
),
]
def create_tableau_component_translator(component_cls):
"""Creates a translator class for a Tableau component."""
class TableauComponentTranslator(
create_component_translator_cls(component_cls, DagsterTableauTranslator),
ComponentTranslator[component_cls],
):
def __init__(self, component):
self._component = component
def get_asset_spec(self, data: TableauTranslatorData) -> AssetSpec:
base_asset_spec = super().get_asset_spec(data)
if self.component.translation is None:
return base_asset_spec
else:
return self.component.translation(base_asset_spec, data)
return TableauComponentTranslator
| TableauAssetArgs |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_ops_test.py | {
"start": 4406,
"end": 9063
} | class ____(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (sparse_tensor.SparseTensor.from_value(indices),
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with test_util.force_cpu():
for indices in (indices_v,
sparse_tensor.SparseTensor.from_value(indices_v)):
for values in (values_v,
sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt32AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int32, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat64NonCanonicalOrder(self):
vocab_size = 50
vocab_size_tensor = constant_op.constant(vocab_size, dtypes.int64)
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size_tensor, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testShouldSetLastDimensionInDynamicShape(self):
with ops.Graph().as_default():
shape = constant_op.constant([2, 2], dtype=dtypes.int64)
dynamic_shape = array_ops.placeholder_with_default(shape, shape=[2])
ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[1, 3],
dense_shape=dynamic_shape)
values = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[0.4, 0.7],
dense_shape=dynamic_shape)
merged = sparse_ops.sparse_merge(
sp_ids=ids, sp_values=values, vocab_size=5)
self.assertEqual(5, merged.get_shape()[1])
| SparseMergeTest |
python | agronholm__apscheduler | src/apscheduler/triggers/date.py | {
"start": 273,
"end": 1246
} | class ____(Trigger):
"""
Triggers once on the given date/time.
:param run_time: the date/time to run the job at
"""
run_time: datetime = attrs.field(
converter=as_aware_datetime, validator=instance_of(datetime)
)
_completed: bool = attrs.field(init=False, eq=False, default=False)
def next(self) -> datetime | None:
if not self._completed:
self._completed = True
return self.run_time
else:
return None
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"run_time": self.run_time,
"completed": self._completed,
}
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
self.run_time = state["run_time"]
self._completed = state["completed"]
def __repr__(self) -> str:
return f"{self.__class__.__name__}('{self.run_time}')"
| DateTrigger |
python | getsentry__sentry | src/sentry/dashboards/endpoints/organization_dashboards.py | {
"start": 2123,
"end": 2249
} | class ____(IntEnum):
FRONTEND_SESSION_HEALTH = 1
BACKEND_QUERIES = 2
BACKEND_QUERIES_SUMMARY = 3
| PrebuiltDashboardId |
python | ray-project__ray | python/ray/tune/search/variant_generator.py | {
"start": 16739,
"end": 17305
} | class ____(dict):
def __init__(self, *args, **kwds):
super(_UnresolvedAccessGuard, self).__init__(*args, **kwds)
self.__dict__ = self
def __getattribute__(self, item):
value = dict.__getattribute__(self, item)
if not _is_resolved(value):
raise RecursiveDependencyError(
"`{}` recursively depends on {}".format(item, value)
)
elif isinstance(value, dict):
return _UnresolvedAccessGuard(value)
else:
return value
@DeveloperAPI
| _UnresolvedAccessGuard |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 138940,
"end": 139298
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
widgets: Optional[SqlDashboardWidgetOutput] = Field(
None,
description=(
"Widgets executed in the run. Only SQL query based widgets are listed."
),
)
| SqlDashboardOutput |
python | pytorch__pytorch | torch/_inductor/autoheuristic/artifacts/_MMRankingA100.py | {
"start": 468,
"end": 28044
} | class ____(LearnedHeuristicDecision):
def __init__(self) -> None:
self.choices: list[Choice] = []
self.fill_choices()
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
return (
metadata.name == self.get_name()
and metadata.shared_memory == 166912
and str(metadata.device_capa) == "(8, 0)"
)
def get_confidence_threshold(self) -> float:
return 0.0
def get_choice(self, idx: int) -> Optional[str]:
if idx < len(self.choices):
return self.choices[idx]
return None
def fill_choices(self) -> None:
self.choices.append('extern_mm')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=16_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=32_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=2_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=2_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=128_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=128_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=2')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=128_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=1')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=1_numwarps=2')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=2')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=2')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=2')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=1')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=2')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=32_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=16_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=16_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=1_numwarps=2')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=2')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=1_numwarps=2')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=2')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=2')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=64_BLOCK-N=128_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=32_BLOCK-K=64_BLOCK-N=32_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=16_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=128_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=128_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=16_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=32_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=16_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=128_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=16_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=32_numstages=5_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=2_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=4_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=32_BLOCK-N=64_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=128_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=16_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=32_numstages=5_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=3_numwarps=8')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=4_numwarps=4')
self.choices.append('type=triton_BLOCK-M=64_BLOCK-K=64_BLOCK-N=64_numstages=5_numwarps=4')
def get_name(self) -> str:
return 'mm'
def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:
if context.get_value('arith_intensity') <= 52.6245059967041:
if context.get_value('n') <= 34.0:
if context.get_value('n') <= 18.0:
if context.get_value('k*n') <= 312.0:
return [(0.093, 12), (0.081, 16), (0.081, 148), (0.070, 10), (0.070, 17), (0.070, 149), (0.070, 151), (0.070, 150), (0.070, 14), (0.058, 11), (0.058, 15), (0.058, 13), (0.058, 122), (0.047, 121), (0.035, 123), (0.012, 92)]
else:
if context.get_value('k') <= 40.0:
return [(0.083, 42), (0.083, 46), (0.083, 44), (0.083, 40), (0.083, 128), (0.067, 45), (0.067, 43), (0.067, 41), (0.067, 169), (0.067, 171), (0.067, 168), (0.067, 129), (0.067, 170), (0.033, 103), (0.017, 121)]
else:
return [(0.112, 137), (0.104, 136), (0.101, 0), (0.081, 1), (0.073, 135), (0.069, 67), (0.066, 187), (0.058, 41), (0.050, 71), (0.046, 68), (0.046, 70), (0.031, 44), (0.027, 43), (0.027, 170), (0.019, 189), (0.019, 188), (0.015, 169), (0.015, 171), (0.012, 115), (0.012, 168), (0.012, 69), (0.004, 103)]
else:
if context.get_value('mat1_stride_0') <= 20.0:
return [(0.069, 0), (0.059, 157), (0.059, 22), (0.059, 153), (0.059, 155), (0.059, 25), (0.059, 23), (0.059, 19), (0.044, 21), (0.044, 18), (0.044, 152), (0.044, 158), (0.044, 154), (0.044, 156), (0.044, 20), (0.044, 124), (0.044, 24), (0.030, 125), (0.029, 126), (0.015, 97), (0.015, 95), (0.015, 96), (0.010, 2), (0.010, 75)]
else:
if context.get_value('k') <= 68.0:
return [(0.087, 72), (0.087, 74), (0.087, 73), (0.086, 76), (0.077, 75), (0.067, 192), (0.058, 190), (0.048, 47), (0.048, 193), (0.048, 49), (0.048, 51), (0.048, 191), (0.038, 53), (0.019, 133), (0.019, 50), (0.019, 175), (0.019, 172), (0.019, 48), (0.019, 174), (0.010, 173), (0.010, 177), (0.010, 52), (0.010, 54), (0.010, 178), (0.010, 176)]
else:
return [(0.154, 52), (0.154, 72), (0.102, 75), (0.087, 49), (0.087, 73), (0.086, 51), (0.057, 176), (0.045, 2), (0.038, 191), (0.038, 178), (0.038, 190), (0.029, 173), (0.029, 76), (0.026, 138), (0.013, 139), (0.013, 140), (0.003, 0)]
else:
if context.get_value('k') <= 35.0:
if context.get_value('k') <= 18.0:
if context.get_value('m*n') <= 19505152.0:
return [(0.151, 159), (0.140, 160), (0.129, 164), (0.055, 127), (0.051, 29), (0.044, 161), (0.044, 147), (0.040, 146), (0.040, 31), (0.037, 145), (0.026, 28), (0.022, 90), (0.022, 93), (0.022, 94), (0.022, 100), (0.022, 125), (0.022, 158), (0.022, 157), (0.011, 87), (0.011, 88), (0.011, 89), (0.011, 91), (0.011, 95), (0.011, 96), (0.011, 98), (0.011, 99)]
else:
return [(0.069, 7), (0.069, 5), (0.067, 147), (0.066, 8), (0.061, 145), (0.058, 146), (0.052, 124), (0.049, 29), (0.049, 159), (0.046, 31), (0.043, 157), (0.041, 9), (0.041, 4), (0.040, 6), (0.035, 164), (0.035, 160), (0.026, 158), (0.017, 125), (0.017, 28), (0.017, 32), (0.017, 162), (0.017, 27), (0.017, 30), (0.017, 161), (0.009, 33), (0.009, 26), (0.009, 163), (0.006, 0)]
else:
if context.get_value('n') <= 68.0:
return [(0.101, 182), (0.101, 59), (0.088, 57), (0.076, 184), (0.076, 61), (0.076, 179), (0.076, 62), (0.076, 58), (0.063, 180), (0.063, 60), (0.051, 56), (0.050, 181), (0.025, 130), (0.025, 177), (0.025, 183), (0.013, 178), (0.013, 55)]
else:
return [(0.089, 180), (0.079, 60), (0.066, 35), (0.066, 181), (0.066, 38), (0.066, 58), (0.066, 179), (0.066, 57), (0.062, 184), (0.053, 37), (0.044, 166), (0.040, 55), (0.040, 39), (0.040, 36), (0.040, 165), (0.040, 167), (0.027, 177), (0.027, 34), (0.022, 159)]
else:
if context.get_value('m*n') <= 309760.0:
return [(0.298, 0), (0.097, 140), (0.080, 83), (0.072, 86), (0.044, 84), (0.036, 178), (0.036, 117), (0.036, 82), (0.032, 120), (0.032, 85), (0.028, 119), (0.024, 130), (0.024, 109), (0.020, 108), (0.020, 118), (0.012, 104), (0.012, 116), (0.012, 141), (0.012, 144), (0.008, 105), (0.008, 106), (0.008, 111), (0.008, 114), (0.008, 107), (0.008, 132), (0.004, 101), (0.004, 102), (0.004, 110), (0.004, 112), (0.004, 113), (0.004, 131)]
else:
if context.get_value('n') <= 72.0:
return [(0.227, 77), (0.118, 78), (0.102, 194), (0.086, 80), (0.059, 57), (0.054, 81), (0.049, 196), (0.048, 197), (0.048, 59), (0.043, 79), (0.032, 195), (0.027, 180), (0.022, 3), (0.021, 141), (0.016, 60), (0.016, 142), (0.011, 183), (0.011, 0), (0.011, 144)]
else:
return [(0.140, 186), (0.132, 185), (0.109, 63), (0.085, 65), (0.078, 37), (0.077, 35), (0.062, 197), (0.047, 194), (0.046, 165), (0.046, 57), (0.039, 78), (0.039, 79), (0.039, 66), (0.039, 64), (0.016, 195), (0.008, 159)]
else:
if str(context.get_value('using_tf32')) != 'False':
if context.get_value('m*n') <= 815360.0:
if context.get_value('k') <= 1184.0:
return [(0.218, 140), (0.205, 0), (0.154, 144), (0.115, 141), (0.051, 185), (0.051, 104), (0.039, 78), (0.038, 116), (0.026, 165), (0.026, 130), (0.026, 178), (0.013, 57), (0.013, 195), (0.013, 167), (0.013, 186)]
else:
return [(0.901, 0), (0.030, 144), (0.030, 134), (0.016, 3), (0.006, 78), (0.006, 77), (0.002, 57), (0.002, 194), (0.002, 59), (0.002, 60), (0.002, 143)]
else:
if context.get_value('arith_intensity') <= 187.23922729492188:
if context.get_value('mat1_stride_0') <= 198.0:
return [(0.273, 63), (0.158, 37), (0.152, 35), (0.127, 57), (0.097, 165), (0.053, 185), (0.031, 0), (0.028, 64), (0.014, 60), (0.014, 78), (0.009, 55), (0.008, 134), (0.005, 34), (0.005, 167), (0.005, 179), (0.005, 65), (0.005, 66), (0.005, 186), (0.005, 194), (0.002, 166)]
else:
return [(0.296, 63), (0.235, 0), (0.132, 64), (0.074, 37), (0.069, 78), (0.051, 185), (0.051, 35), (0.030, 57), (0.020, 77), (0.016, 194), (0.008, 66), (0.007, 65), (0.003, 3), (0.003, 165), (0.003, 141), (0.001, 134), (0.001, 166)]
else:
return [(0.405, 0), (0.246, 37), (0.177, 63), (0.145, 35), (0.005, 185), (0.005, 65), (0.005, 64), (0.004, 57), (0.003, 66), (0.002, 165), (0.001, 78), (0.001, 55)]
else:
return [(0.357, 0), (0.112, 165), (0.101, 57), (0.094, 179), (0.086, 64), (0.074, 167), (0.067, 60), (0.064, 159), (0.033, 35), (0.007, 195), (0.002, 180), (0.001, 34), (0.001, 166), (0.001, 78)]
| MMRankingA100 |
python | pypa__setuptools | setuptools/_vendor/more_itertools/recipes.py | {
"start": 7740,
"end": 28591
} | class ____(ValueError):
def __init__(self, details=None):
msg = 'Iterables have different lengths'
if details is not None:
msg += (': index 0 has length {}; index {} has length {}').format(
*details
)
super().__init__(msg)
def _zip_equal_generator(iterables):
for combo in zip_longest(*iterables, fillvalue=_marker):
for val in combo:
if val is _marker:
raise UnequalIterablesError()
yield combo
def _zip_equal(*iterables):
# Check whether the iterables are all the same size.
try:
first_size = len(iterables[0])
for i, it in enumerate(iterables[1:], 1):
size = len(it)
if size != first_size:
raise UnequalIterablesError(details=(first_size, i, size))
# All sizes are equal, we can use the built-in zip.
return zip(*iterables)
# If any one of the iterables didn't have a length, start reading
# them until one runs out.
except TypeError:
return _zip_equal_generator(iterables)
def grouper(iterable, n, incomplete='fill', fillvalue=None):
"""Group elements from *iterable* into fixed-length groups of length *n*.
>>> list(grouper('ABCDEF', 3))
[('A', 'B', 'C'), ('D', 'E', 'F')]
The keyword arguments *incomplete* and *fillvalue* control what happens for
iterables whose length is not a multiple of *n*.
When *incomplete* is `'fill'`, the last group will contain instances of
*fillvalue*.
>>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x'))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
When *incomplete* is `'ignore'`, the last group will not be emitted.
>>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x'))
[('A', 'B', 'C'), ('D', 'E', 'F')]
When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
>>> it = grouper('ABCDEFG', 3, incomplete='strict')
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnequalIterablesError
"""
args = [iter(iterable)] * n
if incomplete == 'fill':
return zip_longest(*args, fillvalue=fillvalue)
if incomplete == 'strict':
return _zip_equal(*args)
if incomplete == 'ignore':
return zip(*args)
else:
raise ValueError('Expected fill, strict, or ignore')
def roundrobin(*iterables):
"""Yields an item from each iterable, alternating between them.
>>> list(roundrobin('ABC', 'D', 'EF'))
['A', 'D', 'E', 'B', 'F', 'C']
This function produces the same output as :func:`interleave_longest`, but
may perform better for some inputs (in particular when the number of
iterables is small).
"""
# Algorithm credited to George Sakkis
iterators = map(iter, iterables)
for num_active in range(len(iterables), 0, -1):
iterators = cycle(islice(iterators, num_active))
yield from map(next, iterators)
def partition(pred, iterable):
"""
Returns a 2-tuple of iterables derived from the input iterable.
The first yields the items that have ``pred(item) == False``.
The second yields the items that have ``pred(item) == True``.
>>> is_odd = lambda x: x % 2 != 0
>>> iterable = range(10)
>>> even_items, odd_items = partition(is_odd, iterable)
>>> list(even_items), list(odd_items)
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
If *pred* is None, :func:`bool` is used.
>>> iterable = [0, 1, False, True, '', ' ']
>>> false_items, true_items = partition(None, iterable)
>>> list(false_items), list(true_items)
([0, False, ''], [1, True, ' '])
"""
if pred is None:
pred = bool
t1, t2, p = tee(iterable, 3)
p1, p2 = tee(map(pred, p))
return (compress(t1, map(operator.not_, p1)), compress(t2, p2))
def powerset(iterable):
"""Yields all possible subsets of the iterable.
>>> list(powerset([1, 2, 3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
:func:`powerset` will operate on iterables that aren't :class:`set`
instances, so repeated elements in the input will produce repeated elements
in the output.
>>> seq = [1, 1, 0]
>>> list(powerset(seq))
[(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
For a variant that efficiently yields actual :class:`set` instances, see
:func:`powerset_of_sets`.
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def unique_everseen(iterable, key=None):
"""
Yield unique elements, preserving order.
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
Sequences with a mix of hashable and unhashable items can be used.
The function will be slower (i.e., `O(n^2)`) for unhashable items.
Remember that ``list`` objects are unhashable - you can use the *key*
parameter to transform the list to a tuple (which is hashable) to
avoid a slowdown.
>>> iterable = ([1, 2], [2, 3], [1, 2])
>>> list(unique_everseen(iterable)) # Slow
[[1, 2], [2, 3]]
>>> list(unique_everseen(iterable, key=tuple)) # Faster
[[1, 2], [2, 3]]
Similarly, you may want to convert unhashable ``set`` objects with
``key=frozenset``. For ``dict`` objects,
``key=lambda x: frozenset(x.items())`` can be used.
"""
seenset = set()
seenset_add = seenset.add
seenlist = []
seenlist_add = seenlist.append
use_key = key is not None
for element in iterable:
k = key(element) if use_key else element
try:
if k not in seenset:
seenset_add(k)
yield element
except TypeError:
if k not in seenlist:
seenlist_add(k)
yield element
def unique_justseen(iterable, key=None):
"""Yields elements in order, ignoring serial duplicates
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
if key is None:
return map(operator.itemgetter(0), groupby(iterable))
return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
def unique(iterable, key=None, reverse=False):
"""Yields unique elements in sorted order.
>>> list(unique([[1, 2], [3, 4], [1, 2]]))
[[1, 2], [3, 4]]
*key* and *reverse* are passed to :func:`sorted`.
>>> list(unique('ABBcCAD', str.casefold))
['A', 'B', 'c', 'D']
>>> list(unique('ABBcCAD', str.casefold, reverse=True))
['D', 'c', 'B', 'A']
The elements in *iterable* need not be hashable, but they must be
comparable for sorting to work.
"""
return unique_justseen(sorted(iterable, key=key, reverse=reverse), key=key)
def iter_except(func, exception, first=None):
"""Yields results from a function repeatedly until an exception is raised.
Converts a call-until-exception interface to an iterator interface.
Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
to end the loop.
>>> l = [0, 1, 2]
>>> list(iter_except(l.pop, IndexError))
[2, 1, 0]
Multiple exceptions can be specified as a stopping condition:
>>> l = [1, 2, 3, '...', 4, 5, 6]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[7, 6, 5]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[4, 3, 2]
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
[]
"""
try:
if first is not None:
yield first()
while 1:
yield func()
except exception:
pass
def first_true(iterable, default=None, pred=None):
"""
Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item for which
``pred(item) == True`` .
>>> first_true(range(10))
1
>>> first_true(range(10), pred=lambda x: x > 5)
6
>>> first_true(range(10), default='missing', pred=lambda x: x > 9)
'missing'
"""
return next(filter(pred, iterable), default)
def random_product(*args, repeat=1):
"""Draw an item at random from each of the input iterables.
>>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
('c', 3, 'Z')
If *repeat* is provided as a keyword argument, that many items will be
drawn from each iterable.
>>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
('a', 2, 'd', 3)
This equivalent to taking a random selection from
``itertools.product(*args, **kwarg)``.
"""
pools = [tuple(pool) for pool in args] * repeat
return tuple(choice(pool) for pool in pools)
def random_permutation(iterable, r=None):
"""Return a random *r* length permutation of the elements in *iterable*.
If *r* is not specified or is ``None``, then *r* defaults to the length of
*iterable*.
>>> random_permutation(range(5)) # doctest:+SKIP
(3, 4, 0, 1, 2)
This equivalent to taking a random selection from
``itertools.permutations(iterable, r)``.
"""
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(sample(pool, r))
def random_combination(iterable, r):
"""Return a random *r* length subsequence of the elements in *iterable*.
>>> random_combination(range(5), 3) # doctest:+SKIP
(2, 3, 4)
This equivalent to taking a random selection from
``itertools.combinations(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(sample(range(n), r))
return tuple(pool[i] for i in indices)
def random_combination_with_replacement(iterable, r):
"""Return a random *r* length subsequence of elements in *iterable*,
allowing individual elements to be repeated.
>>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
(0, 0, 1, 2, 2)
This equivalent to taking a random selection from
``itertools.combinations_with_replacement(iterable, r)``.
"""
pool = tuple(iterable)
n = len(pool)
indices = sorted(randrange(n) for i in range(r))
return tuple(pool[i] for i in indices)
def nth_combination(iterable, r, index):
"""Equivalent to ``list(combinations(iterable, r))[index]``.
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`nth_combination` computes the subsequence at
sort position *index* directly, without computing the previous
subsequences.
>>> nth_combination(range(5), 3, 5)
(0, 3, 4)
``ValueError`` will be raised If *r* is negative or greater than the length
of *iterable*.
``IndexError`` will be raised if the given *index* is invalid.
"""
pool = tuple(iterable)
n = len(pool)
if (r < 0) or (r > n):
raise ValueError
c = 1
k = min(r, n - r)
for i in range(1, k + 1):
c = c * (n - k + i) // i
if index < 0:
index += c
if (index < 0) or (index >= c):
raise IndexError
result = []
while r:
c, n, r = c * r // n, n - 1, r - 1
while index >= c:
index -= c
c, n = c * (n - r) // n, n - 1
result.append(pool[-1 - n])
return tuple(result)
def prepend(value, iterator):
"""Yield *value*, followed by the elements in *iterator*.
>>> value = '0'
>>> iterator = ['1', '2', '3']
>>> list(prepend(value, iterator))
['0', '1', '2', '3']
To prepend multiple values, see :func:`itertools.chain`
or :func:`value_chain`.
"""
return chain([value], iterator)
def convolve(signal, kernel):
"""Convolve the iterable *signal* with the iterable *kernel*.
>>> signal = (1, 2, 3, 4, 5)
>>> kernel = [3, 2, 1]
>>> list(convolve(signal, kernel))
[3, 8, 14, 20, 26, 14, 5]
Note: the input arguments are not interchangeable, as the *kernel*
is immediately consumed and stored.
"""
# This implementation intentionally doesn't match the one in the itertools
# documentation.
kernel = tuple(kernel)[::-1]
n = len(kernel)
window = deque([0], maxlen=n) * n
for x in chain(signal, repeat(0, n - 1)):
window.append(x)
yield _sumprod(kernel, window)
def before_and_after(predicate, it):
"""A variant of :func:`takewhile` that allows complete access to the
remainder of the iterator.
>>> it = iter('ABCdEfGhI')
>>> all_upper, remainder = before_and_after(str.isupper, it)
>>> ''.join(all_upper)
'ABC'
>>> ''.join(remainder) # takewhile() would lose the 'd'
'dEfGhI'
Note that the first iterator must be fully consumed before the second
iterator can generate valid results.
"""
it = iter(it)
transition = []
def true_iterator():
for elem in it:
if predicate(elem):
yield elem
else:
transition.append(elem)
return
# Note: this is different from itertools recipes to allow nesting
# before_and_after remainders into before_and_after again. See tests
# for an example.
remainder_iterator = chain(transition, it)
return true_iterator(), remainder_iterator
def triplewise(iterable):
"""Return overlapping triplets from *iterable*.
>>> list(triplewise('ABCDE'))
[('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
"""
for (a, _), (b, c) in pairwise(pairwise(iterable)):
yield a, b, c
def sliding_window(iterable, n):
"""Return a sliding window of width *n* over *iterable*.
>>> list(sliding_window(range(6), 4))
[(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
If *iterable* has fewer than *n* items, then nothing is yielded:
>>> list(sliding_window(range(3), 4))
[]
For a variant with more features, see :func:`windowed`.
"""
it = iter(iterable)
window = deque(islice(it, n - 1), maxlen=n)
for x in it:
window.append(x)
yield tuple(window)
def subslices(iterable):
"""Return all contiguous non-empty subslices of *iterable*.
>>> list(subslices('ABC'))
[['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']]
This is similar to :func:`substrings`, but emits items in a different
order.
"""
seq = list(iterable)
slices = starmap(slice, combinations(range(len(seq) + 1), 2))
return map(operator.getitem, repeat(seq), slices)
def polynomial_from_roots(roots):
"""Compute a polynomial's coefficients from its roots.
>>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
>>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
[1, -4, -17, 60]
"""
factors = zip(repeat(1), map(operator.neg, roots))
return list(reduce(convolve, factors, [1]))
def iter_index(iterable, value, start=0, stop=None):
"""Yield the index of each place in *iterable* that *value* occurs,
beginning with index *start* and ending before index *stop*.
>>> list(iter_index('AABCADEAF', 'A'))
[0, 1, 4, 7]
>>> list(iter_index('AABCADEAF', 'A', 1)) # start index is inclusive
[1, 4, 7]
>>> list(iter_index('AABCADEAF', 'A', 1, 7)) # stop index is not inclusive
[1, 4]
The behavior for non-scalar *values* matches the built-in Python types.
>>> list(iter_index('ABCDABCD', 'AB'))
[0, 4]
>>> list(iter_index([0, 1, 2, 3, 0, 1, 2, 3], [0, 1]))
[]
>>> list(iter_index([[0, 1], [2, 3], [0, 1], [2, 3]], [0, 1]))
[0, 2]
See :func:`locate` for a more general means of finding the indexes
associated with particular values.
"""
seq_index = getattr(iterable, 'index', None)
if seq_index is None:
# Slow path for general iterables
it = islice(iterable, start, stop)
for i, element in enumerate(it, start):
if element is value or element == value:
yield i
else:
# Fast path for sequences
stop = len(iterable) if stop is None else stop
i = start - 1
try:
while True:
yield (i := seq_index(value, i + 1, stop))
except ValueError:
pass
def sieve(n):
"""Yield the primes less than n.
>>> list(sieve(30))
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
"""
if n > 2:
yield 2
start = 3
data = bytearray((0, 1)) * (n // 2)
limit = math.isqrt(n) + 1
for p in iter_index(data, 1, start, limit):
yield from iter_index(data, 1, start, p * p)
data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
start = p * p
yield from iter_index(data, 1, start)
def _batched(iterable, n, *, strict=False):
"""Batch data into tuples of length *n*. If the number of items in
*iterable* is not divisible by *n*:
* The last batch will be shorter if *strict* is ``False``.
* :exc:`ValueError` will be raised if *strict* is ``True``.
>>> list(batched('ABCDEFG', 3))
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G',)]
On Python 3.13 and above, this is an alias for :func:`itertools.batched`.
"""
if n < 1:
raise ValueError('n must be at least one')
it = iter(iterable)
while batch := tuple(islice(it, n)):
if strict and len(batch) != n:
raise ValueError('batched(): incomplete batch')
yield batch
if hexversion >= 0x30D00A2:
from itertools import batched as itertools_batched
def batched(iterable, n, *, strict=False):
return itertools_batched(iterable, n, strict=strict)
else:
batched = _batched
batched.__doc__ = _batched.__doc__
def transpose(it):
"""Swap the rows and columns of the input matrix.
>>> list(transpose([(1, 2, 3), (11, 22, 33)]))
[(1, 11), (2, 22), (3, 33)]
The caller should ensure that the dimensions of the input are compatible.
If the input is empty, no output will be produced.
"""
return _zip_strict(*it)
def reshape(matrix, cols):
"""Reshape the 2-D input *matrix* to have a column count given by *cols*.
>>> matrix = [(0, 1), (2, 3), (4, 5)]
>>> cols = 3
>>> list(reshape(matrix, cols))
[(0, 1, 2), (3, 4, 5)]
"""
return batched(chain.from_iterable(matrix), cols)
def matmul(m1, m2):
"""Multiply two matrices.
>>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]))
[(49, 80), (41, 60)]
The caller should ensure that the dimensions of the input matrices are
compatible with each other.
"""
n = len(m2[0])
return batched(starmap(_sumprod, product(m1, transpose(m2))), n)
def factor(n):
"""Yield the prime factors of n.
>>> list(factor(360))
[2, 2, 2, 3, 3, 5]
"""
for prime in sieve(math.isqrt(n) + 1):
while not n % prime:
yield prime
n //= prime
if n == 1:
return
if n > 1:
yield n
def polynomial_eval(coefficients, x):
"""Evaluate a polynomial at a specific value.
Example: evaluating x^3 - 4 * x^2 - 17 * x + 60 at x = 2.5:
>>> coefficients = [1, -4, -17, 60]
>>> x = 2.5
>>> polynomial_eval(coefficients, x)
8.125
"""
n = len(coefficients)
if n == 0:
return x * 0 # coerce zero to the type of x
powers = map(pow, repeat(x), reversed(range(n)))
return _sumprod(coefficients, powers)
def sum_of_squares(it):
"""Return the sum of the squares of the input values.
>>> sum_of_squares([10, 20, 30])
1400
"""
return _sumprod(*tee(it))
def polynomial_derivative(coefficients):
"""Compute the first derivative of a polynomial.
Example: evaluating the derivative of x^3 - 4 * x^2 - 17 * x + 60
>>> coefficients = [1, -4, -17, 60]
>>> derivative_coefficients = polynomial_derivative(coefficients)
>>> derivative_coefficients
[3, -8, -17]
"""
n = len(coefficients)
powers = reversed(range(1, n))
return list(map(operator.mul, coefficients, powers))
def totient(n):
"""Return the count of natural numbers up to *n* that are coprime with *n*.
>>> totient(9)
6
>>> totient(12)
4
"""
# The itertools docs use unique_justseen instead of set; see
# https://github.com/more-itertools/more-itertools/issues/823
for p in set(factor(n)):
n = n // p * (p - 1)
return n
| UnequalIterablesError |
python | ray-project__ray | python/ray/data/_internal/metadata_exporter.py | {
"start": 1527,
"end": 2997
} | class ____:
"""Represents a data processing operator in the DAG.
Attributes:
name: The name of the operator.
id: The unique identifier of the operator within the DAG structure, typically
incorporating a position or index (e.g., "ReadParquet_0"). This is used for
referencing operators within the DAG topology.
uuid: The system-generated UUID of the physical operator instance. This is the
internal unique identifier created when the operator instance is initialized
and remains consistent throughout its lifetime.
input_dependencies: List of operator IDs that this operator depends on for input.
sub_stages: List of sub-stages contained within this operator.
args: User-specified arguments associated with the operator, which may
include configuration settings, options, or other relevant data for the operator.
execution_start_time: The timestamp when the operator execution begins.
execution_end_time: The timestamp when the operator execution ends.
state: The state of the operator.
"""
name: str
id: str
uuid: str
execution_start_time: Optional[float]
execution_end_time: Optional[float]
state: str
input_dependencies: List[str] = field(default_factory=list)
sub_stages: List[SubStage] = field(default_factory=list)
args: Dict[str, Any] = field(default_factory=dict)
@dataclass
| Operator |
python | google__jax | jax/experimental/array_serialization/pytree_serialization_utils.py | {
"start": 1652,
"end": 2798
} | class ____(Future[Any]):
"""A wrapper around a Future that makes it look like an async function."""
def __init__(self, future: Future[Any]):
self._future, self.pytree = future, None
def done(self):
return self._future.done()
def result(self, *args, **kw):
return self._future.result(*args, **kw)
def __await__(self):
while not self.done():
yield
return self.result()
def __repr__(self):
return f"PyTreeFuture(done={self.done()}, pytree={self.pytree})"
def serialize_pytreedef(node) -> dict[str, Any]:
builder = flatbuffers.Builder(65536)
exported = _serialize_pytreedef(builder, node)
builder.Finish(exported)
root_repr = base64.b64encode(builder.Output()).decode("utf-8")
leaf_count = node.num_leaves
pytree_repr = {_TREE_REPR_KEY: root_repr,
_LEAF_IDS_KEY: list(range(leaf_count))}
return pytree_repr
def deserialize_pytreedef(pytreedef_repr: dict[str, Any]):
buf = base64.b64decode(pytreedef_repr[_TREE_REPR_KEY])
exp = ser_flatbuf.PyTreeDef.GetRootAs(buf)
treestruct = jax.tree.structure(_deserialize_pytreedef_to_pytree(exp))
return treestruct
| PyTreeFuture |
python | crytic__slither | slither/detectors/reentrancy/reentrancy_eth.py | {
"start": 562,
"end": 8599
} | class ____(Reentrancy):
ARGUMENT = "reentrancy-eth"
HELP = "Reentrancy vulnerabilities (theft of ethers)"
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = (
"https://github.com/crytic/slither/wiki/Detector-Documentation#reentrancy-vulnerabilities"
)
WIKI_TITLE = "Reentrancy vulnerabilities"
# region wiki_description
WIKI_DESCRIPTION = """
Detection of the [reentrancy bug](https://github.com/trailofbits/not-so-smart-contracts/tree/master/reentrancy).
Do not report reentrancies that don't involve Ether (see `reentrancy-no-eth`)"""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
function withdrawBalance(){
// send userBalance[msg.sender] Ether to msg.sender
// if msg.sender is a contract, it will call its fallback function
if( ! (msg.sender.call.value(userBalance[msg.sender])() ) ){
throw;
}
userBalance[msg.sender] = 0;
}
```
Bob uses the re-entrancy bug to call `withdrawBalance` two times, and withdraw more than its initial deposit to the contract."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Apply the [`check-effects-interactions pattern`](http://solidity.readthedocs.io/en/v0.4.21/security-considerations.html#re-entrancy)."
STANDARD_JSON = False
def find_reentrancies(self) -> Dict[FindingKey, Set[FindingValue]]:
result: Dict[FindingKey, Set[FindingValue]] = defaultdict(set)
for contract in self.contracts: # pylint: disable=too-many-nested-blocks
variables_used_in_reentrancy = contract.state_variables_used_in_reentrant_targets
for f in contract.functions_and_modifiers_declared:
for node in f.nodes:
# dead code
if not self.KEY in node.context:
continue
if node.context[self.KEY].calls and node.context[self.KEY].send_eth:
if not any(n != node for n in node.context[self.KEY].send_eth):
continue
read_then_written = set()
for c in node.context[self.KEY].calls:
if c == node:
continue
read_then_written |= {
FindingValue(
v,
node,
tuple(sorted(nodes, key=lambda x: x.node_id)),
tuple(
sorted(
variables_used_in_reentrancy[v], key=lambda x: str(x)
)
),
)
for (v, nodes) in node.context[self.KEY].written.items()
if v in node.context[self.KEY].reads_prior_calls[c]
and (f.is_reentrant or v in variables_used_in_reentrancy)
}
if read_then_written:
# calls are ordered
finding_key = FindingKey(
function=node.function,
calls=to_hashable(node.context[self.KEY].calls),
send_eth=to_hashable(node.context[self.KEY].send_eth),
)
result[finding_key] |= set(read_then_written)
return result
def _detect(self) -> List[Output]: # pylint: disable=too-many-branches,too-many-locals
""""""
super()._detect()
reentrancies = self.find_reentrancies()
results = []
result_sorted = sorted(list(reentrancies.items()), key=lambda x: x[0].function.name)
varsWritten: List[FindingValue]
varsWrittenSet: Set[FindingValue]
for (func, calls, send_eth), varsWrittenSet in result_sorted:
calls = sorted(list(set(calls)), key=lambda x: x[0].node_id)
send_eth = sorted(list(set(send_eth)), key=lambda x: x[0].node_id)
varsWritten = sorted(varsWrittenSet, key=lambda x: (x.variable.name, x.node.node_id))
info = ["Reentrancy in ", func, ":\n"]
info += ["\tExternal calls:\n"]
for (call_info, calls_list) in calls:
info += ["\t- ", call_info, "\n"]
for call_list_info in calls_list:
if call_list_info != call_info:
info += ["\t\t- ", call_list_info, "\n"]
if calls != send_eth and send_eth:
info += ["\tExternal calls sending eth:\n"]
for (call_info, calls_list) in send_eth:
info += ["\t- ", call_info, "\n"]
for call_list_info in calls_list:
if call_list_info != call_info:
info += ["\t\t- ", call_list_info, "\n"]
info += ["\tState variables written after the call(s):\n"]
for finding_value in varsWritten:
info += ["\t- ", finding_value.node, "\n"]
for other_node in finding_value.nodes:
if other_node != finding_value.node:
info += ["\t\t- ", other_node, "\n"]
if finding_value.cross_functions:
info += [
"\t",
finding_value.variable,
" can be used in cross function reentrancies:\n",
]
for cross in finding_value.cross_functions:
info += ["\t- ", cross, "\n"]
# Create our JSON result
res = self.generate_result(info)
# Add the function with the re-entrancy first
res.add(func)
# Add all underlying calls in the function which are potentially problematic.
for (call_info, calls_list) in calls:
res.add(call_info, {"underlying_type": "external_calls"})
for call_list_info in calls_list:
if call_list_info != call_info:
res.add(
call_list_info,
{"underlying_type": "external_calls_sending_eth"},
)
# If the calls are not the same ones that send eth, add the eth sending nodes.
if calls != send_eth:
for (call_info, calls_list) in send_eth:
res.add(call_info, {"underlying_type": "external_calls_sending_eth"})
for call_list_info in calls_list:
if call_list_info != call_info:
res.add(
call_list_info,
{"underlying_type": "external_calls_sending_eth"},
)
# Add all variables written via nodes which write them.
for finding_value in varsWritten:
res.add(
finding_value.node,
{
"underlying_type": "variables_written",
"variable_name": finding_value.variable.name,
},
)
for other_node in finding_value.nodes:
if other_node != finding_value.node:
res.add(
other_node,
{
"underlying_type": "variables_written",
"variable_name": finding_value.variable.name,
},
)
# Append our result
results.append(res)
return results
| ReentrancyEth |
python | readthedocs__readthedocs.org | readthedocs/projects/views/public.py | {
"start": 13672,
"end": 15678
} | class ____(SettingsOverrideObject):
_default_class = ProjectDownloadMediaBase
def project_versions(request, project_slug):
"""
Project version list view.
Shows the available versions and lets the user choose which ones to build.
"""
max_inactive_versions = 100
project = get_object_or_404(
Project.objects.public(request.user),
slug=project_slug,
)
versions = project.versions(manager=INTERNAL).public(
user=request.user,
only_active=False,
)
active_versions = versions.filter(active=True)
# Limit inactive versions in case a project has a large number of branches or tags
# Filter inactive versions based on the query string
inactive_versions = versions.filter(active=False)
version_filter = request.GET.get("version_filter", "")
if version_filter:
inactive_versions = inactive_versions.filter(verbose_name__icontains=version_filter)
total_inactive_versions_count = inactive_versions.count()
inactive_versions = inactive_versions[:max_inactive_versions]
# If there's a wiped query string, check the string against the versions
# list and display a success message. Deleting directories doesn't know how
# to fail. :)
wiped = request.GET.get("wipe", "")
wiped_version = versions.filter(slug=wiped)
if wiped and wiped_version.exists():
messages.success(request, "Version wiped: " + wiped)
# Optimize project permission checks
prefetch_related_objects([project], "users")
return render(
request,
"projects/project_version_list.html",
{
"inactive_versions": inactive_versions,
"active_versions": active_versions,
"project": project,
"is_project_admin": AdminPermission.is_admin(request.user, project),
"max_inactive_versions": max_inactive_versions,
"total_inactive_versions_count": total_inactive_versions_count,
},
)
| ProjectDownloadMedia |
python | Lightning-AI__lightning | examples/fabric/build_your_own_trainer/run.py | {
"start": 143,
"end": 2790
} | class ____(L.LightningModule):
def __init__(self) -> None:
super().__init__()
self.model = torch.nn.Sequential(
torch.nn.Conv2d(
in_channels=1,
out_channels=16,
kernel_size=5,
stride=1,
padding=2,
),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2),
torch.nn.Conv2d(16, 32, 5, 1, 2),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
torch.nn.Flatten(),
# fully connected layer, output 10 classes
torch.nn.Linear(32 * 7 * 7, 10),
)
self.loss_fn = torch.nn.CrossEntropyLoss()
def forward(self, x: torch.Tensor):
return self.model(x)
def training_step(self, batch, batch_idx: int):
x, y = batch
logits = self(x)
loss = self.loss_fn(logits, y)
accuracy_train = accuracy(logits.argmax(-1), y, num_classes=10, task="multiclass", top_k=1)
return {"loss": loss, "accuracy": accuracy_train}
def configure_optimizers(self):
optim = torch.optim.Adam(self.parameters(), lr=1e-4)
return {
"optimizer": optim,
"scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(optim, mode="max", verbose=True),
"monitor": "val_accuracy",
"interval": "epoch",
"frequency": 1,
}
def validation_step(self, *args, **kwargs):
return self.training_step(*args, **kwargs)
def train(model):
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
train_set = MNIST(root="/tmp/data/MNIST", train=True, transform=ToTensor(), download=True)
val_set = MNIST(root="/tmp/data/MNIST", train=False, transform=ToTensor(), download=False)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=64, shuffle=True, pin_memory=torch.cuda.is_available(), num_workers=4
)
val_loader = torch.utils.data.DataLoader(
val_set, batch_size=64, shuffle=False, pin_memory=torch.cuda.is_available(), num_workers=4
)
# MPS backend currently does not support all operations used in this example.
# If you want to use MPS, set accelerator='auto' and also set PYTORCH_ENABLE_MPS_FALLBACK=1
accelerator = "cpu" if torch.backends.mps.is_available() else "auto"
trainer = MyCustomTrainer(
accelerator=accelerator, devices="auto", limit_train_batches=10, limit_val_batches=20, max_epochs=3
)
trainer.fit(model, train_loader, val_loader)
if __name__ == "__main__":
train(MNISTModule())
| MNISTModule |
python | ansible__ansible | lib/ansible/plugins/doc_fragments/url.py | {
"start": 212,
"end": 3341
} | class ____(object):
# Standard files documentation fragment
DOCUMENTATION = r"""
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
type: str
force:
description:
- If V(yes) do not get a cached copy.
type: bool
default: no
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
use_proxy:
description:
- If V(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
validate_certs:
description:
- If V(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: yes
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without O(url_password) for sites that allow empty passwords.
type: str
url_password:
description:
- The password for use in HTTP basic authentication.
- If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used.
type: str
force_basic_auth:
description:
- Credentials specified with O(url_username) and O(url_password) should be passed in HTTP Header.
type: bool
default: no
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, O(client_key) is not required.
type: path
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If O(client_cert) contains both the certificate and key, this option is not required.
type: path
use_gssapi:
description:
- Use GSSAPI to perform the authentication, typically this is for Kerberos or Kerberos through Negotiate
authentication.
- Requires the Python library L(gssapi,https://github.com/pythongssapi/python-gssapi) to be installed.
- Credentials for GSSAPI can be specified with O(url_username)/O(url_password) or with the GSSAPI env var
E(KRB5CCNAME) that specified a custom Kerberos credential cache.
- NTLM authentication is B(not) supported even if the GSSAPI mech for NTLM has been installed.
type: bool
default: no
version_added: '2.11'
"""
URL_REDIRECT = r'''
options:
follow_redirects:
description:
- Whether or not the URI module should follow redirects.
type: str
default: safe
choices:
all: Will follow all redirects.
none: Will not follow any redirects.
safe: Only redirects doing GET or HEAD requests will be followed.
urllib2: Defer to urllib2 behavior (As of writing this follows HTTP redirects).
'no': (DEPRECATED, removed in 2.22) alias of V(none).
'yes': (DEPRECATED, removed in 2.22) alias of V(all).
'''
| ModuleDocFragment |
python | pola-rs__polars | py-polars/src/polars/interchange/dataframe.py | {
"start": 448,
"end": 7661
} | class ____(InterchangeDataFrame):
"""
A dataframe object backed by a Polars DataFrame.
Parameters
----------
df
The Polars DataFrame backing the dataframe object.
allow_copy
Allow data to be copied during operations on this column. If set to `False`,
a RuntimeError is raised if data would be copied.
"""
version = 0
def __init__(self, df: DataFrame, *, allow_copy: bool = True) -> None:
self._df = df
self._allow_copy = allow_copy
def __dataframe__(
self,
nan_as_null: bool = False, # noqa: FBT001
allow_copy: bool = True, # noqa: FBT001
) -> PolarsDataFrame:
"""
Construct a new dataframe object, potentially changing the parameters.
Parameters
----------
nan_as_null
Overwrite null values in the data with `NaN`.
.. warning::
This functionality has not been implemented and the parameter will be
removed in a future version.
Setting this to `True` will raise a `NotImplementedError`.
allow_copy
Allow memory to be copied to perform the conversion. If set to `False`,
causes conversions that are not zero-copy to fail.
"""
if nan_as_null:
msg = (
"functionality for `nan_as_null` has not been implemented and the"
" parameter will be removed in a future version"
"\n\nUse the default `nan_as_null=False`."
)
raise NotImplementedError(msg)
return PolarsDataFrame(self._df, allow_copy=allow_copy)
@property
def metadata(self) -> dict[str, Any]:
"""The metadata for the dataframe."""
return {}
def num_columns(self) -> int:
"""Return the number of columns in the dataframe."""
return self._df.width
def num_rows(self) -> int:
"""Return the number of rows in the dataframe."""
return self._df.height
def num_chunks(self) -> int:
"""
Return the number of chunks the dataframe consists of.
It is possible for a Polars DataFrame to consist of columns with a varying
number of chunks. This method returns the number of chunks of the first
column.
See Also
--------
polars.dataframe.frame.DataFrame.n_chunks
"""
return self._df.n_chunks("first")
def column_names(self) -> list[str]:
"""Return the column names."""
return self._df.columns
def get_column(self, i: int) -> PolarsColumn:
"""
Return the column at the indicated position.
Parameters
----------
i
Index of the column.
"""
s = self._df.to_series(i)
return PolarsColumn(s, allow_copy=self._allow_copy)
def get_column_by_name(self, name: str) -> PolarsColumn:
"""
Return the column with the given name.
Parameters
----------
name
Name of the column.
"""
s = self._df.get_column(name)
return PolarsColumn(s, allow_copy=self._allow_copy)
def get_columns(self) -> Iterator[PolarsColumn]:
"""Return an iterator yielding the columns."""
for column in self._df.get_columns():
yield PolarsColumn(column, allow_copy=self._allow_copy)
def select_columns(self, indices: Sequence[int]) -> PolarsDataFrame:
"""
Create a new dataframe by selecting a subset of columns by index.
Parameters
----------
indices
Column indices
"""
if not isinstance(indices, Sequence):
msg = "`indices` is not a sequence"
raise TypeError(msg)
if not isinstance(indices, list):
indices = list(indices)
return PolarsDataFrame(
self._df[:, indices],
allow_copy=self._allow_copy,
)
def select_columns_by_name(self, names: Sequence[str]) -> PolarsDataFrame:
"""
Create a new dataframe by selecting a subset of columns by name.
Parameters
----------
names
Column names.
"""
if not isinstance(names, Sequence):
msg = "`names` is not a sequence"
raise TypeError(msg)
return PolarsDataFrame(
self._df.select(names),
allow_copy=self._allow_copy,
)
def get_chunks(self, n_chunks: int | None = None) -> Iterator[PolarsDataFrame]:
"""
Return an iterator yielding the chunks of the dataframe.
Parameters
----------
n_chunks
The number of chunks to return. Must be a multiple of the number of chunks
in the dataframe. If set to `None` (default), returns all chunks.
Notes
-----
When the columns in the dataframe are chunked unevenly, or when `n_chunks` is
higher than the number of chunks in the dataframe, a slice must be performed
that is not on the chunk boundary. This will trigger some compute for columns
that contain null values and boolean columns.
"""
total_n_chunks = self.num_chunks()
chunks = self._get_chunks_from_col_chunks()
if (n_chunks is None) or (n_chunks == total_n_chunks):
for chunk in chunks:
yield PolarsDataFrame(chunk, allow_copy=self._allow_copy)
elif (n_chunks <= 0) or (n_chunks % total_n_chunks != 0):
msg = (
"`n_chunks` must be a multiple of the number of chunks of this"
f" dataframe ({total_n_chunks})"
)
raise ValueError(msg)
else:
subchunks_per_chunk = n_chunks // total_n_chunks
for chunk in chunks:
size = len(chunk)
step = size // subchunks_per_chunk
if size % subchunks_per_chunk != 0:
step += 1
for start in range(0, step * subchunks_per_chunk, step):
yield PolarsDataFrame(
chunk[start : start + step, :],
allow_copy=self._allow_copy,
)
def _get_chunks_from_col_chunks(self) -> Iterator[DataFrame]:
"""
Return chunks of this dataframe according to the chunks of the first column.
If columns are not all chunked identically, they will be rechunked like the
first column. If copy is not allowed, this raises a RuntimeError.
"""
col_chunks = self.get_column(0).get_chunks()
chunk_sizes = [chunk.size() for chunk in col_chunks]
starts = [0] + list(accumulate(chunk_sizes))
for i in range(len(starts) - 1):
start, end = starts[i : i + 2]
chunk = self._df[start:end, :]
if not all(x == 1 for x in chunk.n_chunks("all")):
if not self._allow_copy:
msg = "unevenly chunked columns must be rechunked"
raise CopyNotAllowedError(msg)
chunk = chunk.rechunk()
yield chunk
| PolarsDataFrame |
python | getsentry__sentry | src/sentry/integrations/slack/message_builder/notifications/daily_summary.py | {
"start": 908,
"end": 8765
} | class ____(SlackNotificationsMessageBuilder):
def __init__(
self,
notification: BaseNotification,
context: Mapping[str, Any],
recipient: Actor,
) -> None:
super().__init__(notification, context, recipient)
self.notification = notification
self.context = context
self.recipient = recipient
def linkify_error_title(self, group):
link = group.get_absolute_url(
params={"referrer": self.notification.get_referrer(ExternalProviders.SLACK)}
)
title = build_attachment_title(group)
formatted_title = self.truncate_text(title)
attachment_text = self.get_attachment_text(group)
if not attachment_text:
return f"<{link}|*{escape_slack_text(formatted_title)}*>"
formatted_attachment_text = attachment_text.replace("\n", " ").replace("`", "")
return f"<{link}|*{escape_slack_text(formatted_title)}*>\n`{self.truncate_text(formatted_attachment_text)}`"
def linkify_release(self, release, organization):
path = f"/releases/{release.version}/"
url = organization.absolute_url(path)
release_description = parse_release(release.version, json_loads=orjson.loads).get(
"description"
)
return f":rocket: *<{url}|Release {release_description}>*\n"
def truncate_text(self, text):
if text and len(text) > MAX_CHARS_ONE_LINE:
text = text[:MAX_CHARS_ONE_LINE] + "..."
return text
def get_attachment_text(self, group):
attachment_text = build_attachment_text(group)
return self.truncate_text(attachment_text)
def build_discover_url(self, project):
query_params = {
"field": ["title", "event.type", "project", "user.display", "timestamp"],
"name": "All Events",
"project": project.id,
"query": "event.type:error",
"sort": "-timestamp",
"statsPeriod": "24h",
"yAxis": "count()",
}
query_string = urlencode(query_params, doseq=True)
url = absolute_uri(
f"/organizations/{project.organization.slug}/discover/homepage/?{query_string}"
)
return url
def build(self) -> SlackBlock:
blocks = []
subject = self.notification.get_subject()
blocks.append(
self.get_markdown_block(
f"*{subject}*\nYour comprehensive overview for today - key issues, performance insights, and more.",
":bell:",
)
)
blocks.append(self.get_divider())
for project_id, context in self.context.items():
try:
project = Project.objects.get(id=project_id)
except Project.DoesNotExist:
continue
if context.check_if_project_is_empty():
continue
project_text = f"Here's what happened in the *{project.slug}* project today:"
blocks.append(self.get_markdown_block(project_text))
fields = []
event_count_text = "*Today’s Event Count*: "
formatted_total_today = f"{context.total_today:,}"
if features.has("organizations:discover", project.organization):
discover_url = self.build_discover_url(project)
event_count_text += f"<{discover_url}|{formatted_total_today}>"
else:
event_count_text += formatted_total_today
fields.append(self.make_field(event_count_text))
# Calculate today's event count percentage against 14 day avg
if context.comparison_period_avg > 0: # avoid a zerodivisionerror
percentage_diff = context.total_today / context.comparison_period_avg
if context.total_today > context.comparison_period_avg:
percentage_diff_text = (
f":warning: {percentage_diff:.0%} higher than last {COMPARISON_PERIOD}d avg"
)
fields.append(self.make_field(percentage_diff_text))
else:
percentage_diff_text = (
f" :tada: {percentage_diff:.0%} lower than last {COMPARISON_PERIOD}d avg"
)
fields.append(self.make_field(percentage_diff_text))
blocks.append(self.get_section_fields_block(fields))
# Add release info if we have it
if context.new_in_release:
fields = []
for release_id, errors in context.new_in_release.items():
try:
release = Release.objects.get(id=release_id)
except Release.DoesNotExist:
continue
release_text = self.linkify_release(release, project.organization)
for error in errors:
linked_issue_title = self.linkify_error_title(error)
release_text += f"• :new: {linked_issue_title}\n"
fields.append(self.make_field(release_text))
blocks.append(self.get_section_fields_block(fields))
# Add Top 3 Error/Performance Issues
top_issue_fields = []
if context.key_errors_by_group:
top_errors_text = "*Today's Top 3 Error Issues*\n"
for group, _ in context.key_errors_by_group:
linked_title = self.linkify_error_title(group)
top_errors_text += f"• {linked_title}\n"
top_issue_fields.append(self.make_field(top_errors_text))
if context.key_performance_issues:
top_perf_issues_text = "*Today's Top 3 Performance Issues*\n"
for perf_issue in context.key_performance_issues:
linked_title = self.linkify_error_title(perf_issue[0])
top_perf_issues_text += f"• {linked_title}\n"
top_issue_fields.append(self.make_field(top_perf_issues_text))
if top_issue_fields:
blocks.append(self.get_section_fields_block(top_issue_fields))
# Add regressed and escalated issues
regressed_escalated_fields = []
if context.escalated_today:
escalated_issue_text = "*Issues that escalated today*\n"
for escalated_issue in context.escalated_today:
linked_title = self.linkify_error_title(escalated_issue)
escalated_issue_text += f"• :point_up: {linked_title}\n"
regressed_escalated_fields.append(self.make_field(escalated_issue_text))
if context.regressed_today:
regressed_issue_text = "*Issues that regressed today*\n"
for regressed_issue in context.regressed_today:
linked_title = self.linkify_error_title(regressed_issue)
regressed_issue_text += f"• :recycle: {linked_title}\n"
regressed_escalated_fields.append(self.make_field(regressed_issue_text))
if regressed_escalated_fields:
blocks.append(self.get_section_fields_block(regressed_escalated_fields))
blocks.append(self.get_divider())
text = subject
callback_id_raw = self.notification.get_callback_data()
callback_id = orjson.dumps(callback_id_raw).decode() if callback_id_raw else None
footer = self.notification.build_notification_footer(
self.recipient, ExternalProviders.SLACK
)
if footer:
blocks.append(self.get_context_block(text=footer))
return self._build_blocks(
*blocks, fallback_text=text if text else None, callback_id=callback_id
)
| SlackDailySummaryMessageBuilder |
python | viewflow__viewflow | tests/json/test_json__basics.py | {
"start": 2747,
"end": 2866
} | class ____(Person):
birthdate = jsonstore.DateField()
business_phone = jsonstore.CharField(max_length=250)
| Client |
python | scipy__scipy | scipy/io/matlab/_mio5_params.py | {
"start": 7448,
"end": 7781
} | class ____(np.ndarray):
"""Subclass for a MATLAB function.
This is a simple subclass of :class:`numpy.ndarray` meant to be used
by :func:`scipy.io.loadmat` and should not be directly instantiated.
"""
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
return obj
| MatlabFunction |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_test.py | {
"start": 2722,
"end": 3287
} | class ____(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return tensor_shape.TensorShape(1), tensor_shape.TensorShape((2))
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
concatenated = array_ops.concat((input_, input_), axis=-1)
return (input_, concatenated), state + 1
| UnbalancedOutputRNNCell |
python | scipy__scipy | scipy/linalg/tests/test_special_matrices.py | {
"start": 22181,
"end": 24843
} | class ____:
"""
Test convolution_matrix vs. numpy.convolve for various parameters.
"""
def create_vector(self, n, cpx):
"""Make a complex or real test vector of length n."""
x = np.linspace(-2.5, 2.2, n)
if cpx:
x = x + 1j*np.linspace(-1.5, 3.1, n)
return x
def test_bad_n(self):
# n must be a positive integer
with pytest.raises(ValueError, match='n must be a positive integer'):
convolution_matrix([1, 2, 3], 0)
def test_empty_first_arg(self):
# first arg must have at least one value
with pytest.raises(ValueError, match=r'len\(a\)'):
convolution_matrix([], 4)
def test_bad_mode(self):
# mode must be in ('full', 'valid', 'same')
with pytest.raises(ValueError, match='mode.*must be one of'):
convolution_matrix((1, 1), 4, mode='invalid argument')
@pytest.mark.parametrize('cpx', [False, True])
@pytest.mark.parametrize('na', [1, 2, 9])
@pytest.mark.parametrize('nv', [1, 2, 9])
@pytest.mark.parametrize('mode', [None, 'full', 'valid', 'same'])
def test_against_numpy_convolve(self, cpx, na, nv, mode):
a = self.create_vector(na, cpx)
v = self.create_vector(nv, cpx)
if mode is None:
y1 = np.convolve(v, a)
A = convolution_matrix(a, nv)
else:
y1 = np.convolve(v, a, mode)
A = convolution_matrix(a, nv, mode)
y2 = A @ v
assert_array_almost_equal(y1, y2)
@pytest.mark.fail_slow(5) # `leslie` has an import in the function
@pytest.mark.parametrize('f, args', [(circulant, ()),
(companion, ()),
(convolution_matrix, (5, 'same')),
(fiedler, ()),
(fiedler_companion, ()),
(hankel, (np.arange(9),)),
(leslie, (np.arange(9),)),
(toeplitz, (np.arange(9),)),
])
def test_batch(f, args):
rng = np.random.default_rng(283592436523456)
batch_shape = (2, 3)
m = 10
A = rng.random(batch_shape + (m,))
if f in {hankel}:
message = "Beginning in SciPy 1.19, multidimensional input will be..."
with pytest.warns(FutureWarning, match=message):
f(A, *args)
return
res = f(A, *args)
ref = np.asarray([f(a, *args) for a in A.reshape(-1, m)])
ref = ref.reshape(A.shape[:-1] + ref.shape[-2:])
assert_allclose(res, ref)
| TestConvolutionMatrix |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_data.py | {
"start": 8498,
"end": 14132
} | class ____:
"""A single tensor dumped by TensorFlow Debugger (tfdbg).
Contains metadata about the dumped tensor, including `timestamp`,
`node_name`, `output_slot`, `debug_op`, and path to the dump file
(`file_path`).
This type does not hold the generally space-expensive tensor value (numpy
array). Instead, it points to the file from which the tensor value can be
loaded (with the `get_tensor` method) if needed.
"""
def __init__(self, dump_root, debug_dump_rel_path):
"""`DebugTensorDatum` constructor.
Args:
dump_root: (`str`) Debug dump root directory. This path should not include
the path component that represents the device name (see also below).
debug_dump_rel_path: (`str`) Path to a debug dump file, relative to the
`dump_root`. The first item of this relative path is assumed to be
a path representing the name of the device that the Tensor belongs to.
See `device_path_to_device_name` for more details on the device path.
For example, suppose the debug dump root
directory is `/tmp/tfdbg_1` and the dump file is at
`/tmp/tfdbg_1/<device_path>/>ns_1/node_a_0_DebugIdentity_123456789`,
then the value of the debug_dump_rel_path should be
`<device_path>/ns_1/node_a_0_DebugIdentity_1234456789`.
Raises:
ValueError: If the base file name of the dump file does not conform to
the dump file naming pattern:
`node_name`_`output_slot`_`debug_op`_`timestamp`
"""
path_components = os.path.normpath(debug_dump_rel_path).split(os.sep)
self._device_name = device_path_to_device_name(path_components[0])
base = path_components[-1]
if base.count("_") < 3:
raise ValueError(
"Dump file path does not conform to the naming pattern: %s" % base)
self._extended_timestamp = base.split("_")[-1]
# It may include an index suffix at the end if file path collision happened
# due to identical timestamps.
if "-" in self._extended_timestamp:
self._timestamp = int(
self._extended_timestamp[:self._extended_timestamp.find("-")])
else:
self._timestamp = int(self._extended_timestamp)
self._debug_op = base.split("_")[-2]
self._output_slot = int(base.split("_")[-3])
node_base_name = "_".join(base.split("_")[:-3])
self._node_name = "/".join(path_components[1:-1] + [node_base_name])
self._file_path = os.path.join(dump_root, debug_dump_rel_path)
self._dump_size_bytes = (gfile.Stat(self._file_path).length if
gfile.Exists(self._file_path) else None)
def __str__(self):
return "{DebugTensorDatum (%s) %s:%d @ %s @ %d}" % (self.device_name,
self.node_name,
self.output_slot,
self.debug_op,
self.timestamp)
def __repr__(self):
return self.__str__()
def get_tensor(self):
"""Get tensor from the dump (`Event`) file.
Returns:
The tensor loaded from the dump (`Event`) file.
"""
return load_tensor_from_event_file(self.file_path)
# TODO(cais): Add time unit suffix to timestamp and t0 (us).
@property
def timestamp(self):
"""Timestamp of when this tensor value was dumped.
Returns:
(`int`) The timestamp in microseconds.
"""
return self._timestamp
@property
def extended_timestamp(self):
"""Extended timestamp, possibly with an index suffix.
The index suffix, e.g., "-1", is for disambiguating multiple dumps of the
same tensor with the same timestamp, which can occur if the dumping events
are spaced by shorter than the temporal resolution of the timestamps.
Returns:
(`str`) The extended timestamp.
"""
return self._extended_timestamp
@property
def debug_op(self):
"""Name of the debug op.
Returns:
(`str`) debug op name (e.g., `DebugIdentity`).
"""
return self._debug_op
@property
def device_name(self):
"""Name of the device that the tensor belongs to.
Returns:
(`str`) device name.
"""
return self._device_name
@property
def node_name(self):
"""Name of the node from which the tensor value was dumped.
Returns:
(`str`) name of the node watched by the debug op.
"""
return self._node_name
@property
def output_slot(self):
"""Output slot index from which the tensor value was dumped.
Returns:
(`int`) output slot index watched by the debug op.
"""
return self._output_slot
@property
def tensor_name(self):
"""Name of the tensor watched by the debug op.
Returns:
(`str`) `Tensor` name, in the form of `node_name`:`output_slot`
"""
return _get_tensor_name(self.node_name, self.output_slot)
@property
def watch_key(self):
"""Watch key identities a debug watch on a tensor.
Returns:
(`str`) A watch key, in the form of `tensor_name`:`debug_op`.
"""
return _get_tensor_watch_key(self.node_name, self.output_slot,
self.debug_op)
@property
def file_path(self):
"""Path to the file which stores the value of the dumped tensor."""
return self._file_path
@property
def dump_size_bytes(self):
"""Size of the dump file.
Unit: byte.
Returns:
If the dump file exists, size of the dump file, in bytes.
If the dump file does not exist, None.
"""
return self._dump_size_bytes
| DebugTensorDatum |
python | getsentry__sentry | tests/sentry/api/test_base.py | {
"start": 1949,
"end": 2656
} | class ____(Endpoint):
permission_classes = ()
# `as_view` requires that any init args passed to it match attributes already on the
# class, so even though they're really meant to be instance attributes, we have to
# add them here as class attributes first
error: Exception = NotImplementedError()
handler_context_arg: Mapping[str, Any] | None = None
scope_arg: Scope | None = None
def get(self, request):
raise self.error
def handle_exception_with_details(self, request, exc, handler_context=None, scope=None):
return super().handle_exception_with_details(
request, exc, self.handler_context_arg, self.scope_arg
)
| DummyErroringEndpoint |
python | astropy__astropy | astropy/table/__init__.py | {
"start": 872,
"end": 3410
} | class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
"col{0}",
"The template that determines the name of a column if it cannot be "
"determined. Uses new-style (format method) string formatting.",
aliases=["astropy.table.column.auto_colname"],
)
default_notebook_table_class = _config.ConfigItem(
"table-striped table-bordered table-condensed",
"The table class to be used in Jupyter notebooks when displaying "
"tables (and not overridden). See <https://getbootstrap.com/css/#tables "
"for a list of useful bootstrap classes.",
)
replace_warnings = _config.ConfigItem(
[],
"List of conditions for issuing a warning when replacing a table "
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
"string_list",
)
replace_inplace = _config.ConfigItem(
False,
"Always use in-place update of a table column when using setitem, "
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases.",
)
conf = Conf()
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from astropy.io import registry
from . import connect
from .bst import BST
from .groups import ColumnGroups, TableGroups
from .operations import (
TableMergeError,
dstack,
hstack,
join,
join_distance,
join_skycoord,
setdiff,
unique,
vstack,
)
from .serialize import SerializedColumn, represent_mixins_as_columns
from .soco import SCEngine
from .sorted_array import SortedArray
from .table import (
NdarrayMixin,
PprintIncludeExclude,
QTable,
Row,
Table,
TableAttribute,
TableColumns,
TableFormatter,
TableReplaceWarning,
)
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
import astropy.io.ascii.connect
import astropy.io.fits.connect
import astropy.io.misc.connect
import astropy.io.misc.pandas.connect
import astropy.io.votable.connect
from .jsviewer import JSViewer
if optional_deps.HAS_ASDF_ASTROPY:
import asdf_astropy.io.connect
| Conf |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/sample_from_datasets_test.py | {
"start": 1771,
"end": 12599
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
def _normalize(self, vec):
return vec / vec.sum()
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasets(self, weights_type):
random_seed.set_random_seed(1619)
num_samples = 5000
rand_probs = self._normalize(np.random.random_sample((5,)))
# Use chi-squared test to assert that the observed distribution matches the
# expected distribution. Based on the implementation in
# "third_party/tensorflow/python/kernel_tests/multinomial_op_test.py".
for probs in [[.85, .05, .1], rand_probs, [1.]]:
weights = _get_weights_of_type(np.asarray(probs), weights_type)
classes = len(probs)
# Create a dataset that samples each integer in `[0, num_datasets)`
# with probability given by `weights[i]`.
dataset = dataset_ops.Dataset.sample_from_datasets([
dataset_ops.Dataset.from_tensors(i).repeat() for i in range(classes)
], weights)
dataset = dataset.take(num_samples)
next_element = self.getNext(dataset, requires_initialization=True)
freqs = np.zeros([classes])
for _ in range(num_samples):
freqs[self.evaluate(next_element())] += 1
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsStoppingOnEmptyDataset(self, weights_type):
# Sampling stops when the first dataset is exhausted.
weights = _get_weights_of_type(np.asarray([.5, .1, .4]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(np.int64(-1)),
dataset_ops.Dataset.from_tensors(np.int64(1)).repeat(),
dataset_ops.Dataset.range(10).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
samples_list = self.getIteratorOutput(self.getNext(
sample_dataset, requires_initialization=True))
self.assertEqual(samples_list.count(-1), 1)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsSkippingEmptyDataset(self, weights_type):
# Sampling skips the first dataset after it becomes empty.
weights = _get_weights_of_type(np.asarray([.5, .1, .4]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(np.int64(-1)),
dataset_ops.Dataset.from_tensors(np.int64(1)).repeat(),
dataset_ops.Dataset.range(10).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False).take(100)
samples_list = self.getIteratorOutput(self.getNext(
sample_dataset, requires_initialization=True))
self.assertLen(samples_list, 100)
self.assertEqual(samples_list.count(-1), 1)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsWithZeroWeight(self, weights_type):
# Sampling stops when the second dataset is exhausted.
weights = _get_weights_of_type(np.asarray([0., 1.]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(2),
dataset_ops.Dataset.from_tensors(1).repeat(2)
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
self.assertDatasetProduces(sample_dataset, [1, 1],
requires_initialization=True)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromEmptyDataset(self, weights_type):
weights = _get_weights_of_type(np.asarray([1., 0.]), weights_type)
datasets = [
dataset_ops.Dataset.range(0),
dataset_ops.Dataset.range(1).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
self.assertDatasetProduces(sample_dataset, [],
requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsSkippingDatasetsWithZeroWeight(self):
# Sampling skips the first dataset.
weights = np.asarray([0., 1.])
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(),
dataset_ops.Dataset.from_tensors(1)
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False)
self.assertDatasetProduces(sample_dataset, [1])
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsAllWeightsAreZero(self):
# Sampling skips both datasets.
weights = np.asarray([0., 0.])
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(),
dataset_ops.Dataset.from_tensors(1).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False)
self.assertDatasetProduces(sample_dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsCardinality(self):
ds1 = dataset_ops.Dataset.from_tensors([1.0]).repeat()
ds2 = dataset_ops.Dataset.from_tensors([2.0]).repeat()
ds = dataset_ops.Dataset.sample_from_datasets([ds1, ds2])
self.assertEqual(self.evaluate(ds.cardinality()), dataset_ops.INFINITE)
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsNested(self):
ds1 = dataset_ops.Dataset.range(10).window(2)
ds2 = dataset_ops.Dataset.range(10, 20).window(2)
ds = dataset_ops.Dataset.sample_from_datasets([ds1, ds2],
weights=[0.3, 0.7])
ds = ds.flat_map(lambda x: x)
next_element = self.getNext(ds, requires_initialization=True)
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.eager_only_combinations(),
combinations.combine(rerandomize=[None, True, False])))
def testSampleFromDatasetsRerandomizeEachIterationEpochs(self, rerandomize):
if rerandomize is not None and not tf_compat.forward_compatible(
2022, 12, 17):
self.skipTest(
"target functionality not available due to forward compatibility")
dataset1 = dataset_ops.Dataset.range(0, 10)
dataset2 = dataset_ops.Dataset.range(100, 110)
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
[dataset1, dataset2],
seed=42,
weights=[0.5, 0.5],
stop_on_empty_dataset=True,
rerandomize_each_iteration=rerandomize)
first_epoch = self.getDatasetOutput(sample_dataset)
second_epoch = self.getDatasetOutput(sample_dataset)
if rerandomize:
self.assertNotEqual(first_epoch, second_epoch)
else:
self.assertEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(rerandomize=[None, True, False])))
def testSampleFromDatasetsRerandomizeRepeatEpochs(self, rerandomize):
if rerandomize is not None and not tf_compat.forward_compatible(
2022, 12, 17):
self.skipTest(
"target functionality not available due to forward compatibility")
dataset1 = dataset_ops.Dataset.range(0, 10)
dataset2 = dataset_ops.Dataset.range(100, 110)
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
[dataset1, dataset2],
seed=42,
weights=[0.5, 0.5],
stop_on_empty_dataset=True,
rerandomize_each_iteration=rerandomize)
sample_dataset = sample_dataset.repeat(2)
epochs = self.getDatasetOutput(sample_dataset, requires_initialization=True)
first_epoch = epochs[:len(epochs) // 2]
second_epoch = epochs[len(epochs) // 2:]
if rerandomize:
self.assertNotEqual(first_epoch, second_epoch)
else:
self.assertEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.times(test_base.v2_eager_only_combinations(),
combinations.combine(rerandomize=[None, True, False])))
def testSampleFromDatasetsRerandomizeInsideFunction(self, rerandomize):
if rerandomize is not None and not tf_compat.forward_compatible(
2022, 12, 17):
self.skipTest(
"target functionality not available due to forward compatibility")
@def_function.function
def make_dataset():
dataset1 = dataset_ops.Dataset.range(0, 10)
dataset2 = dataset_ops.Dataset.range(100, 110)
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
[dataset1, dataset2],
seed=42,
weights=[0.5, 0.5],
stop_on_empty_dataset=True,
rerandomize_each_iteration=rerandomize)
return sample_dataset
sample_dataset = make_dataset()
first_epoch = self.getDatasetOutput(sample_dataset)
second_epoch = self.getDatasetOutput(sample_dataset)
if rerandomize:
self.assertNotEqual(first_epoch, second_epoch)
else:
self.assertEqual(first_epoch, second_epoch)
@combinations.generate(test_base.default_test_combinations())
def testErrors(self):
with self.assertRaisesRegex(ValueError, r"should have the same length"):
dataset_ops.Dataset.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[0.25, 0.25, 0.25, 0.25])
with self.assertRaisesRegex(TypeError, "`tf.float32` or `tf.float64`"):
dataset_ops.Dataset.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[1, 1])
with self.assertRaisesRegex(TypeError, "must have compatible"):
dataset_ops.Dataset.sample_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(0.0)
])
with self.assertRaisesRegex(
ValueError, r"Invalid `datasets`. `datasets` should not be empty."):
dataset_ops.Dataset.sample_from_datasets(datasets=[], weights=[])
| SampleFromDatasetsTest |
python | google__pytype | pytype/directors/parser.py | {
"start": 847,
"end": 948
} | class ____(LineRange):
"""Tag to identify function calls."""
@dataclasses.dataclass(frozen=True)
| Call |
python | huggingface__transformers | src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py | {
"start": 1726,
"end": 17570
} | class ____(PreTrainedModel):
config: VisionTextDualEncoderConfig
base_model_prefix = "vision_text_dual_encoder"
input_modalities = ("image", "text")
_supports_flash_attn = True
_supports_sdpa = True
def __init__(
self,
config: Optional[VisionTextDualEncoderConfig] = None,
vision_model: Optional[PreTrainedModel] = None,
text_model: Optional[PreTrainedModel] = None,
):
r"""
vision_model (`PreTrainedModel`):
The vision model to use.
text_model (`PreTrainedModel`):
The text model to use.
"""
if config is None and (vision_model is None or text_model is None):
raise ValueError("Either a configuration or an vision and a text model has to be provided")
if config is None:
config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_model.config, text_model.config)
else:
if not isinstance(config, self.config_class):
raise ValueError(f"config: {config} has to be of type {self.config_class}")
# initialize with config
super().__init__(config)
if vision_model is None:
if isinstance(config.vision_config, CLIPVisionConfig):
vision_model = CLIPVisionModel(config.vision_config)
else:
vision_model = AutoModel.from_config(config.vision_config)
if text_model is None:
text_model = AutoModel.from_config(config.text_config)
self.vision_model = vision_model
self.text_model = text_model
# make sure that the individual model's config refers to the shared config
# so that the updates to the config will be synced
self.config.vision_config._attn_implementation = self.vision_model.config._attn_implementation
self.config.text_config._attn_implementation = self.text_model.config._attn_implementation
self.vision_model.config = self.config.vision_config
self.text_model.config = self.config.text_config
self.vision_embed_dim = config.vision_config.hidden_size
self.text_embed_dim = config.text_config.hidden_size
self.projection_dim = config.projection_dim
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`CLIPTextModel`].
Examples:
```python
>>> import torch
>>> from transformers import VisionTextDualEncoderModel, AutoTokenizer
>>> model = VisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian")
>>> tokenizer = AutoTokenizer.from_pretrained("clip-italian/clip-italian")
>>> inputs = tokenizer(["una foto di un gatto", "una foto di un cane"], padding=True, return_tensors="pt")
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
text_outputs: BaseModelOutputWithPooling = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
token_type_ids=token_type_ids,
)
text_features = self.text_projection(text_outputs.pooler_output)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(self, pixel_values: torch.Tensor) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`CLIPVisionModel`].
Examples:
```python
>>> import torch
>>> from transformers import VisionTextDualEncoderModel, AutoImageProcessor
>>> from transformers.image_utils import load_image
>>> model = VisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian")
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```"""
vision_outputs = self.vision_model(pixel_values=pixel_values)
image_features = self.visual_projection(vision_outputs.pooler_output)
return image_features
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
return_loss: Optional[bool] = None,
token_type_ids: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], CLIPOutput]:
r"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import (
... VisionTextDualEncoderModel,
... VisionTextDualEncoderProcessor,
... AutoImageProcessor,
... AutoTokenizer,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
>>> processor = VisionTextDualEncoderProcessor(image_processor, tokenizer)
>>> model = VisionTextDualEncoderModel.from_vision_text_pretrained(
... "google/vit-base-patch16-224", "google-bert/bert-base-uncased"
... )
>>> # contrastive training
>>> urls = [
... "http://images.cocodataset.org/val2017/000000039769.jpg",
... "https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg",
... ]
>>> images = [Image.open(requests.get(url, stream=True).raw) for url in urls]
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=images, return_tensors="pt", padding=True
... )
>>> outputs = model(
... input_ids=inputs.input_ids,
... attention_mask=inputs.attention_mask,
... pixel_values=inputs.pixel_values,
... return_loss=True,
... )
>>> loss, logits_per_image = outputs.loss, outputs.logits_per_image # this is the image-text similarity score
>>> # save and load from pretrained
>>> model.save_pretrained("vit-bert")
>>> model = VisionTextDualEncoderModel.from_pretrained("vit-bert")
>>> # inference
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[1] # pooler_output
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1] # pooler_output
text_embeds = self.text_projection(text_embeds)
# normalized features
image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.T
loss = None
if return_loss:
loss = clip_loss(logits_per_text)
if not return_dict:
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return ((loss,) + output) if loss is not None else output
return CLIPOutput(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
@classmethod
def from_vision_text_pretrained(
cls,
vision_model_name_or_path: Optional[str] = None,
text_model_name_or_path: Optional[str] = None,
*model_args,
**kwargs,
) -> PreTrainedModel:
"""
Params:
vision_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the vision model. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *PyTorch checkpoint folder* (e.g, `./pt_model`). In this case, a configuration
object should be provided as `config` argument.
text_model_name_or_path (`str`, *optional*):
Information necessary to initiate the text model. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *PyTorch checkpoint folder* (e.g, `./pt_model`). In this case, a configuration
object should be provided as `config` argument.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the text configuration, use the prefix *text_* for each configuration parameter.
- To update the vision configuration, use the prefix *vision_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import VisionTextDualEncoderModel
>>> # initialize a model from pretrained ViT and BERT models. Note that the projection layers will be randomly initialized.
>>> model = VisionTextDualEncoderModel.from_vision_text_pretrained(
... "google/vit-base-patch16-224", "google-bert/bert-base-uncased"
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./vit-bert")
>>> # load fine-tuned model
>>> model = VisionTextDualEncoderModel.from_pretrained("./vit-bert")
```"""
kwargs_vision = {
argument[len("vision_") :]: value for argument, value in kwargs.items() if argument.startswith("vision_")
}
kwargs_text = {
argument[len("text_") :]: value for argument, value in kwargs.items() if argument.startswith("text_")
}
# remove vision, text kwargs from kwargs
for key in kwargs_vision:
del kwargs["vision_" + key]
for key in kwargs_text:
del kwargs["text_" + key]
# Load and initialize the vision and text model
vision_model = kwargs_vision.pop("model", None)
if vision_model is None:
if vision_model_name_or_path is None:
raise ValueError(
"If `vision_model` is not defined as an argument, a `vision_model_name_or_path` has to be defined"
)
if "config" not in kwargs_vision:
vision_config = AutoConfig.from_pretrained(vision_model_name_or_path)
if vision_config.model_type == "clip":
kwargs_vision["config"] = vision_config.vision_config
vision_model = CLIPVisionModel.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision)
# TODO: Should we use the pre-trained projection as well ?
else:
kwargs_vision["config"] = vision_config
vision_model = AutoModel.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision)
text_model = kwargs_text.pop("model", None)
if text_model is None:
if text_model_name_or_path is None:
raise ValueError(
"If `text_model` is not defined as an argument, a `text_model_name_or_path` has to be defined"
)
if "config" not in kwargs_text:
text_config = AutoConfig.from_pretrained(text_model_name_or_path)
kwargs_text["config"] = text_config
text_model = AutoModel.from_pretrained(text_model_name_or_path, *model_args, **kwargs_text)
# instantiate config with corresponding kwargs
config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_model.config, text_model.config, **kwargs)
# init model
model = cls(config=config, vision_model=vision_model, text_model=text_model)
# the projection layers are always newly initialized when loading the model
# using pre-trained vision and text model.
logger.warning(
"The projection layer and logit scale weights `['visual_projection.weight', 'text_projection.weight',"
" 'logit_scale']` are newly initialized. You should probably TRAIN this model on a down-stream task to be"
" able to use it for predictions and inference."
)
return model
__all__ = ["VisionTextDualEncoderModel"]
| VisionTextDualEncoderModel |
python | doocs__leetcode | solution/0500-0599/0501.Find Mode in Binary Search Tree/Solution.py | {
"start": 192,
"end": 756
} | class ____:
def findMode(self, root: TreeNode) -> List[int]:
def dfs(root):
if root is None:
return
nonlocal mx, prev, ans, cnt
dfs(root.left)
cnt = cnt + 1 if prev == root.val else 1
if cnt > mx:
ans = [root.val]
mx = cnt
elif cnt == mx:
ans.append(root.val)
prev = root.val
dfs(root.right)
prev = None
mx = cnt = 0
ans = []
dfs(root)
return ans
| Solution |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 296173,
"end": 297204
} | class ____(FallbackKernel):
"""
Custom kernel for memory checking that generates direct function calls
TODO - the custom op was erroring with str inputs. should be able to custom op directly.
"""
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
"""Override codegen to write direct function call"""
# Extract our arguments from nontensor_args
wrapper.write_memory_track_allocation_once()
alive_list, dead_list, is_final_step = self.constant_args
alive_repr = repr(alive_list)
dead_repr = repr(dead_list)
if is_final_step:
wrapper.writeline(
"# note: dont currently distinguish between buffers returned and dealloc'd in last step"
)
call = f"check_memory_step(allocated={alive_repr}, freed={dead_repr}, is_final_step={is_final_step})"
else:
call = f"check_memory_step(allocated={alive_repr}, freed={dead_repr})"
wrapper.writeline(call)
@ir_dataclass
| MemoryCheckKernel |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 11561,
"end": 11704
} | class ____(desc_sig_element, _sig_element=True):
"""Node for a string literal in a signature."""
classes = ['s']
| desc_sig_literal_string |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 393440,
"end": 394430
} | class ____(sgqlc.types.Interface):
"""Represents a Git object."""
__schema__ = github_schema
__field_names__ = ("abbreviated_oid", "commit_resource_path", "commit_url", "id", "oid", "repository")
abbreviated_oid = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="abbreviatedOid")
"""An abbreviated version of the Git object ID"""
commit_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="commitResourcePath")
"""The HTTP path for this Git object"""
commit_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="commitUrl")
"""The HTTP URL for this Git object"""
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
oid = sgqlc.types.Field(sgqlc.types.non_null(GitObjectID), graphql_name="oid")
"""The Git object ID"""
repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository")
"""The Repository the Git object belongs to"""
| GitObject |
python | pytorch__pytorch | torch/distributions/constraints.py | {
"start": 18539,
"end": 19758
} | class ____(Constraint):
"""
Constraint functor that applies a sequence of constraints
`cseq` at the submatrices at dimension `dim`,
each of size `lengths[dim]`, in a way compatible with :func:`torch.cat`.
"""
def __init__(self, cseq, dim=0, lengths=None):
assert all(isinstance(c, Constraint) for c in cseq)
self.cseq = list(cseq)
if lengths is None:
lengths = [1] * len(self.cseq)
self.lengths = list(lengths)
assert len(self.lengths) == len(self.cseq)
self.dim = dim
super().__init__()
@property
def is_discrete(self) -> bool: # type: ignore[override]
return any(c.is_discrete for c in self.cseq)
@property
def event_dim(self) -> int: # type: ignore[override]
return max(c.event_dim for c in self.cseq)
def check(self, value):
assert -value.dim() <= self.dim < value.dim()
checks = []
start = 0
for constr, length in zip(self.cseq, self.lengths):
v = value.narrow(self.dim, start, length)
checks.append(constr.check(v))
start = start + length # avoid += for jit compat
return torch.cat(checks, self.dim)
| _Cat |
python | pydata__xarray | xarray/tests/test_indexing.py | {
"start": 35968,
"end": 36058
} | class ____:
def __array_namespace__(self, version=None):
pass
| ArrayWithNamespace |
python | ray-project__ray | python/ray/train/tests/test_iter_torch_batches_gpu.py | {
"start": 1165,
"end": 1559
} | class ____(ArrowBatchCollateFn):
"""Collate function that returns only the id column as a tensor."""
def __call__(self, batch: pa.Table) -> torch.Tensor:
"""Return only the id column as a tensor."""
assert isinstance(batch, pa.Table)
tensor_dict = arrow_batch_to_tensors(batch, combine_chunks=True)
return tensor_dict["id"]
| SingleTensorArrowBatchCollateFn |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/projection_queries/snippets.py | {
"start": 1652,
"end": 1871
} | class ____(ndb.Model):
A = ndb.IntegerProperty(repeated=True)
B = ndb.StringProperty(repeated=True)
def declare_multiple_valued_property():
entity = Foo(A=[1, 1, 2, 3], B=["x", "y", "x"])
return entity
| Foo |
python | great-expectations__great_expectations | tests/scripts/test_public_api_report.py | {
"start": 12835,
"end": 20959
} | class ____:
def example_method():
pass
@staticmethod
def example_public_staticmethod():
pass
@classmethod
def example_public_classmethod(cls):
pass
@some_other_decorator
@another_decorator
def example_multiple_decorator_public_method(self):
pass
"""
test_path = tmp_path / "test_path.py"
ast_definitions = self._class_and_function_definitions(tree=ast.parse(file_string))
definitions = [
Definition(
name="test_name",
filepath=pathlib.Path(test_path),
ast_definition=ast_definition,
)
for ast_definition in ast_definitions
]
assert not all(
public_api_checker.is_definition_marked_public_api(definition)
for definition in definitions
)
@pytest.fixture
def code_reference_filter(
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
)
@pytest.fixture
def code_reference_filter_with_non_default_include_exclude(
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
sample_docs_example_python_file_string_filepath: pathlib.Path,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
includes=[
IncludeExcludeDefinition(
reason="test",
name="test_name",
filepath=sample_docs_example_python_file_string_filepath,
)
],
excludes=[
IncludeExcludeDefinition(
reason="test",
name="test_name",
filepath=sample_docs_example_python_file_string_filepath,
)
],
)
@pytest.fixture
def code_reference_filter_with_no_include_exclude(
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
includes=[],
excludes=[],
)
@pytest.fixture
def code_reference_filter_with_exclude_by_file(
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
sample_with_definitions_python_file_string_filepath: pathlib.Path,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
includes=[],
excludes=[
IncludeExcludeDefinition(
reason="test",
filepath=sample_with_definitions_python_file_string_filepath,
)
],
)
@pytest.fixture
def code_reference_filter_with_references_from_docs_content(
repo_root: pathlib.Path,
empty_docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=empty_docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
references_from_docs_content={"ExampleClass", "ExamplePublicAPIClass"},
)
@pytest.fixture
def code_reference_filter_with_exclude_by_file_and_name(
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
sample_with_definitions_python_file_string_filepath: pathlib.Path,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
includes=[],
excludes=[
IncludeExcludeDefinition(
reason="test",
name="example_method",
filepath=sample_with_definitions_python_file_string_filepath,
),
IncludeExcludeDefinition(
reason="test",
name="example_module_level_function",
filepath=sample_with_definitions_python_file_string_filepath,
),
],
)
@pytest.fixture
def code_reference_filter_with_include_by_file_and_name_already_included(
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
sample_with_definitions_python_file_string_filepath: pathlib.Path,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
includes=[
IncludeExcludeDefinition(
reason="test",
name="example_method",
filepath=sample_with_definitions_python_file_string_filepath,
),
IncludeExcludeDefinition(
reason="test",
name="example_module_level_function",
filepath=sample_with_definitions_python_file_string_filepath,
),
],
excludes=[],
)
@pytest.fixture
def code_reference_filter_with_include_by_file_and_name_already_excluded(
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
sample_with_definitions_python_file_string_filepath: pathlib.Path,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
includes=[
IncludeExcludeDefinition(
reason="test",
name="example_method",
filepath=sample_with_definitions_python_file_string_filepath,
),
IncludeExcludeDefinition(
reason="test",
name="example_module_level_function",
filepath=sample_with_definitions_python_file_string_filepath,
),
],
excludes=[
IncludeExcludeDefinition(
reason="test",
filepath=sample_with_definitions_python_file_string_filepath,
)
],
)
@pytest.fixture
def code_reference_filter_with_include_by_file_and_name_not_used_in_docs_example_exclude_file(
repo_root: pathlib.Path,
docs_example_parser: DocsExampleParser,
code_parser: CodeParser,
public_api_checker: PublicAPIChecker,
sample_with_definitions_python_file_string_filepath: pathlib.Path,
) -> CodeReferenceFilter:
return CodeReferenceFilter(
repo_root=repo_root,
docs_example_parser=docs_example_parser,
code_parser=code_parser,
public_api_checker=public_api_checker,
includes=[
IncludeExcludeDefinition(
reason="test",
name="example_no_usages_in_sample_docs_example_python_file_string",
filepath=sample_with_definitions_python_file_string_filepath,
),
],
excludes=[
IncludeExcludeDefinition(
reason="test",
filepath=sample_with_definitions_python_file_string_filepath,
)
],
)
| ExampleClass |
python | getsentry__sentry | tests/sentry/flags/endpoints/test_logs.py | {
"start": 13192,
"end": 14964
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-flag-log"
def setUp(self) -> None:
super().setUp()
self.flag = FlagAuditLogModel(
action=0,
created_at=datetime.now(timezone.utc),
created_by="a@b.com",
created_by_type=0,
flag="hello",
organization_id=self.organization.id,
tags={"commit_sha": "123"},
)
self.flag.save()
self.login_as(user=self.user)
self.url = reverse(self.endpoint, args=(self.organization.id, self.flag.id))
@property
def features(self) -> dict[str, bool]:
return {}
def test_get(self) -> None:
with self.feature(self.features):
response = self.client.get(self.url)
assert response.status_code == 200
result = response.json()
assert result["data"]["action"] == "created"
assert "createdAt" in result["data"]
assert result["data"]["createdBy"] == "a@b.com"
assert result["data"]["createdByType"] == "email"
assert result["data"]["flag"] == "hello"
assert result["data"]["tags"] == {"commit_sha": "123"}
def test_get_unauthorized_organization(self) -> None:
org = self.create_organization()
url = reverse(self.endpoint, args=(org.id, 123))
with self.feature(self.features):
response = self.client.get(url)
assert response.status_code == 403
def test_get_no_flag(self) -> None:
with self.feature(self.features):
response = self.client.get(reverse(self.endpoint, args=(self.organization.id, 123)))
assert response.status_code == 404
| OrganizationFlagLogDetailsEndpointTestCase |
python | dask__distributed | distributed/utils_test.py | {
"start": 55353,
"end": 55815
} | class ____(WorkerPlugin):
"""WorkPlugin to populate TaskState.metadata"""
def setup(self, worker):
self.tasks = worker.state.tasks
def transition(self, key, start, finish, **kwargs):
ts = self.tasks[key]
if start == "ready" and finish == "executing":
ts.metadata["start_time"] = time()
elif start == "executing" and finish == "memory":
ts.metadata["stop_time"] = time()
| TaskStateMetadataPlugin |
python | pytorch__pytorch | test/distributed/_composable/test_replicate_training.py | {
"start": 9658,
"end": 24823
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(8, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_train_parity_single_group(self):
"""
Tests train parity with DDP for a single FSDP group when sharding
parameters on dim-0.
"""
self.run_subtests(
{
"lin_shapes": [
[(16, 15), (15, 8)],
[(7, 15), (15, 3)],
[(16, 17), (17, 8)],
],
},
self._test_train_parity_single_group,
)
def _test_train_parity_single_group(self, lin_shapes: list[tuple[int, int]]):
torch.manual_seed(42)
model = nn.Sequential(
nn.Linear(*lin_shapes[0]), nn.ReLU(), nn.Linear(*lin_shapes[1])
)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
replicate(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
torch.manual_seed(42 + self.rank + 1)
inp = (torch.randn((4, lin_shapes[0][0]), device=device_type.type),)
for iter_idx in range(10):
losses: list[torch.Tensor] = []
for _model in (ref_model, model):
losses.append(_model(*inp).sum())
losses[-1].backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad)
param.grad.div_(self.world_size)
for _optim in (ref_optim, optim):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
_optim.step()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_HPU, "Sleep kernel not supported for HPU")
@compiled_fsdp_test(compile_compute_on_module=Transformer)
def test_train_parity_multi_groups(self):
"""
Tests train parity against DDP when using multiple parameter groups for
communication (for communication and computation overlap plus memory
reduction).
"""
self.run_subtests(
{
"test_device_type": [device_type.type],
"offload_policy": [OffloadPolicy()],
"delay_after_forward": [False, True],
"delay_before_all_gather": [False, True],
"delay_before_reduce_scatter": [False, True],
"delay_before_optim": [False, True],
"unshard_async_op": [False],
},
self._test_train_parity_multi_group,
)
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_HPU, "sleep kernel not supported on HPU")
def test_train_parity_multi_group_cpu_offload_eager(self):
"""
Tests train parity when using multiple parameter groups for
communication and CPU offloading.
"""
self.run_subtests(
{
"offload_policy": [
CPUOffloadPolicy(pin_memory=True),
CPUOffloadPolicy(pin_memory=False),
],
"test_device_type": [device_type.type],
"delay_after_forward": [False, True],
"delay_before_all_gather": [False, True],
"delay_before_reduce_scatter": [False, True],
"delay_before_optim": [False, True],
"unshard_async_op": [False],
},
self._test_train_parity_multi_group,
)
def _test_train_parity_multi_group(
self,
offload_policy: OffloadPolicy,
test_device_type: str,
delay_after_forward: bool,
delay_before_all_gather: bool,
delay_before_reduce_scatter: bool,
delay_before_optim: bool,
unshard_async_op: bool,
):
# Only test individual delays or all four delays to save test time
if (
delay_after_forward
+ delay_before_all_gather
+ delay_before_reduce_scatter
+ delay_before_optim
in (2, 3)
):
return
assert test_device_type in ("cuda", "hpu", "xpu", "cpu"), f"{test_device_type}"
torch.manual_seed(42)
vocab_size = 1024
model_args = ModelArgs(
n_layers=3,
n_heads=4,
vocab_size=vocab_size,
max_seq_len=64,
dropout_p=0,
)
model = Transformer(model_args)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
mesh = init_device_mesh(
test_device_type,
(self.world_size,),
mesh_dim_names=("replicate",),
)
fully_shard_fn = functools.partial(
replicate,
mesh=mesh,
offload_policy=offload_policy,
)
for module in model.modules():
if isinstance(module, TransformerBlock):
fully_shard_fn(module)
fully_shard_fn(model)
if unshard_async_op:
model._set_unshard_async_op(unshard_async_op)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
delay_in_ms = 100
orig_all_gather = dist.all_gather_into_tensor
orig_reduce_scatter = dist.reduce_scatter_tensor
def delayed_all_gather(*args, **kwargs):
torch.get_device_module(device_type)._sleep(
int(delay_in_ms * get_cycles_per_ms())
)
return orig_all_gather(*args, **kwargs)
def delayed_reduce_scatter(*args, **kwargs):
torch.get_device_module(device_type)._sleep(
int(delay_in_ms * get_cycles_per_ms())
)
return orig_reduce_scatter(*args, **kwargs)
torch.manual_seed(42 + self.rank + 1)
patch_all_gather_ctx = (
patch_all_gather(delayed_all_gather)
if delay_before_all_gather
else contextlib.nullcontext()
)
patch_reduce_scatter_ctx = (
patch_reduce_scatter(delayed_reduce_scatter)
if delay_before_reduce_scatter
else contextlib.nullcontext()
)
with patch_all_gather_ctx, patch_reduce_scatter_ctx:
for iter_idx in range(10):
inp = torch.randint(0, vocab_size, (3, 64), device=device_type)
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
losses.append(_model(inp).sum())
if _model is model and delay_after_forward:
torch.get_device_module(device_type)._sleep(
int(delay_in_ms * get_cycles_per_ms())
)
losses[-1].backward()
if _model is model and delay_before_optim:
torch.get_device_module(device_type)._sleep(
int(delay_in_ms * get_cycles_per_ms())
)
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad)
param.grad.div_(self.world_size)
for _optim in (ref_optim, optim):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
_optim.step()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
def test_non_root_forward_backward(self):
"""
Tests running forward/backward through the root and then through a
non-root. The non-root needs to synchronize streams/queue the callback.
"""
torch.manual_seed(42)
lin_dim = 32
model = nn.Sequential(*[MLP(lin_dim, torch.device("cpu")) for _ in range(3)])
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
for mlp in model:
replicate(mlp)
replicate(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
torch.manual_seed(42 + self.rank)
inp = torch.randn((8, lin_dim), device=device_type)
ref_root_loss = ref_model(inp).sum()
ref_root_loss.backward()
for param in ref_model.parameters():
dist.all_reduce(param.grad)
param.grad.detach().div_(self.world_size)
ref_optim.step()
ref_optim.zero_grad()
ref_nonroot_loss = ref_model[0](inp).sum()
ref_nonroot_loss.backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad)
param.grad.detach().div_(self.world_size)
ref_optim.step()
root_loss = model(inp).sum()
root_loss.backward()
torch.get_device_module(device_type)._sleep(int(100 * get_cycles_per_ms()))
optim.step()
optim.zero_grad()
nonroot_loss = model[0](inp).sum()
nonroot_loss.backward()
optim.step()
self.assertEqual(ref_root_loss, root_loss)
self.assertEqual(ref_nonroot_loss, nonroot_loss)
self.assertEqual(ref_model(inp).sum(), model(inp).sum())
@skip_if_lt_x_gpu(2)
def test_multi_forward_module(self):
"""
Tests parity when running a module that participates multiple
times in forward.
"""
self._test_multi_forward_module()
def _test_multi_forward_module(self):
class MultiForwardModule(nn.Module):
def __init__(self, device: torch.device):
super().__init__()
self.inner = nn.Linear(4, 4, device=device)
self.outer = nn.Linear(4, 5, device=device)
def forward(self, x):
i = self.inner(x)
j = self.inner(x)
return self.outer(i + j)
torch.manual_seed(42)
model = MultiForwardModule(device=device_type.type)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
replicate(model.inner)
replicate(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
torch.manual_seed(42 + self.rank)
inp = torch.randn((32, 4), device=device_type.type)
for iter_idx in range(10):
losses: list[torch.Tensor] = []
for _model in (ref_model, model):
losses.append(_model(inp).sum())
losses[-1].backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad)
param.grad.div_(self.world_size)
for _optim in (ref_optim, optim):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
_optim.step()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
def test_explicit_prefetching(self):
torch.manual_seed(42)
model_args = ModelArgs(n_layers=8, dropout_p=0.0)
model = Transformer(model_args)
ref_model = copy.deepcopy(model).to(device_type)
ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2)
for layer in itertools.chain(model.layers, [model]):
replicate(layer)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
num_to_forward_prefetch = num_to_backward_prefetch = 2
for i, layer in enumerate(model.layers):
if i >= len(model.layers) - num_to_forward_prefetch:
break
layers_to_prefetch = [
model.layers[i + j] for j in range(1, num_to_forward_prefetch + 1)
]
layer.set_modules_to_forward_prefetch(layers_to_prefetch)
for i, layer in enumerate(model.layers):
if i < num_to_backward_prefetch:
continue
layers_to_prefetch = [
model.layers[i - j] for j in range(1, num_to_backward_prefetch + 1)
]
layer.set_modules_to_backward_prefetch(layers_to_prefetch)
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 8), device=device_type.type)
for _ in range(10):
losses: list[torch.Tensor] = []
for _model in (ref_model, model):
losses.append(_model(inp).sum())
losses[-1].backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad)
param.grad.div_(self.world_size)
for _optim in (ref_optim, optim):
_optim.zero_grad()
_optim.step()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_HPU, "Sleep is not supported on HPU")
def test_post_optim_event(self):
torch.manual_seed(42)
model_args = ModelArgs(dropout_p=0.0)
model = Transformer(model_args)
ref_model = copy.deepcopy(model).to(device_type.type)
ref_optim = torch.optim.AdamW(ref_model.parameters(), lr=1e-2)
for layer in itertools.chain(model.layers, [model]):
replicate(layer)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
def step_post_hook(
fsdp_module: FSDPModule, opt: torch.optim.Optimizer, args, kwargs
) -> None:
post_optim_event = (
torch.get_device_module(device_type).current_stream().record_event()
)
fsdp_module.set_post_optim_event(post_optim_event)
optim.register_step_post_hook(functools.partial(step_post_hook, model))
torch.manual_seed(42 + self.rank)
inp = torch.randint(0, model_args.vocab_size, (2, 8), device=device_type.type)
# Track all losses and check for equality at the end to avoid a CPU
# sync point after each iteration
ref_losses: list[torch.Tensor] = []
losses: list[torch.Tensor] = []
for _ in range(10):
ref_optim.zero_grad()
ref_losses.append(ref_model(inp).sum())
ref_losses[-1].backward()
for param in ref_model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad)
param.grad.div_(self.world_size)
ref_optim.step()
for _ in range(10):
optim.zero_grad()
losses.append(model(inp).sum())
losses[-1].backward()
optim.step()
# Sleep after the optimizer step to allow CPU to run ahead into the
# next iteration's forward, exercising the post-optim stream sync
torch.get_device_module(device_type)._sleep(int(25 * get_cycles_per_ms()))
for ref_loss, loss in zip(ref_losses, losses):
self.assertEqual(ref_loss, loss)
| TestReplicate1DTrainingCore |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/proxied_state.py | {
"start": 122,
"end": 991
} | class ____(NamedTuple):
"""A class to store the proxied state of a task.
Args:
task_id (str): The id of the task.
proxied (bool): A boolean indicating whether the task is proxied.
"""
task_id: str
proxied: bool
@staticmethod
def from_dict(task_dict: dict[str, Any]) -> "TaskProxiedState":
if set(task_dict.keys()) != {"id", "proxied"}:
raise Exception(
f"Expected 'proxied' and 'id' keys in the task dictionary. Found keys: {task_dict.keys()}"
)
if task_dict["proxied"] not in [True, False]:
raise Exception("Expected 'proxied' key to be a boolean")
return TaskProxiedState(task_id=task_dict["id"], proxied=task_dict["proxied"])
def to_dict(self) -> dict[str, Any]:
return {"id": self.task_id, "proxied": self.proxied}
| TaskProxiedState |
python | Textualize__textual | src/textual/_parser.py | {
"start": 323,
"end": 387
} | class ____(ParseError):
"""Read has timed out."""
| ParseTimeout |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 14149,
"end": 14798
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 1]"):
l_x_ = L_x_
wrap_body_0 = self.wrap_body_0
wrap = torch.ops.higher_order.wrap(wrap_body_0, l_x_); wrap_body_0 = l_x_ = None
getitem: "f32[3]" = wrap[0]; wrap = None
return (getitem,)
class wrap_body_0(torch.nn.Module):
def forward(self, l_x_: "f32[3, 1]"):
view: "f32[3]" = l_x_.view(3); l_x_ = None
add: "f32[3]" = view + 0.5; view = None
return (add,)
""",
)
else:
self.assertExpectedInline(
actual_graph,
"""\
| GraphModule |
python | mwaskom__seaborn | tests/test_distributions.py | {
"start": 1776,
"end": 4278
} | class ____:
rs = np.random.RandomState(0)
x = rs.randn(100)
def test_hist_bins(self):
fd_edges = np.histogram_bin_edges(self.x, "fd")
with pytest.warns(UserWarning):
ax = distplot(self.x)
for edge, bar in zip(fd_edges, ax.patches):
assert pytest.approx(edge) == bar.get_x()
plt.close(ax.figure)
n = 25
n_edges = np.histogram_bin_edges(self.x, n)
with pytest.warns(UserWarning):
ax = distplot(self.x, bins=n)
for edge, bar in zip(n_edges, ax.patches):
assert pytest.approx(edge) == bar.get_x()
def test_elements(self):
with pytest.warns(UserWarning):
n = 10
ax = distplot(self.x, bins=n,
hist=True, kde=False, rug=False, fit=None)
assert len(ax.patches) == 10
assert len(ax.lines) == 0
assert len(ax.collections) == 0
plt.close(ax.figure)
ax = distplot(self.x,
hist=False, kde=True, rug=False, fit=None)
assert len(ax.patches) == 0
assert len(ax.lines) == 1
assert len(ax.collections) == 0
plt.close(ax.figure)
ax = distplot(self.x,
hist=False, kde=False, rug=True, fit=None)
assert len(ax.patches) == 0
assert len(ax.lines) == 0
assert len(ax.collections) == 1
class Norm:
"""Dummy object that looks like a scipy RV"""
def fit(self, x):
return ()
def pdf(self, x, *params):
return np.zeros_like(x)
plt.close(ax.figure)
ax = distplot(
self.x, hist=False, kde=False, rug=False, fit=Norm())
assert len(ax.patches) == 0
assert len(ax.lines) == 1
assert len(ax.collections) == 0
def test_distplot_with_nans(self):
f, (ax1, ax2) = plt.subplots(2)
x_null = np.append(self.x, [np.nan])
with pytest.warns(UserWarning):
distplot(self.x, ax=ax1)
distplot(x_null, ax=ax2)
line1 = ax1.lines[0]
line2 = ax2.lines[0]
assert np.array_equal(line1.get_xydata(), line2.get_xydata())
for bar1, bar2 in zip(ax1.patches, ax2.patches):
assert bar1.get_xy() == bar2.get_xy()
assert bar1.get_height() == bar2.get_height()
| TestDistPlot |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/v1_compat_tests/stack_op_test.py | {
"start": 1065,
"end": 3249
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
# Tests symbolic tensor semantics
def testVariable(self):
with self.session():
v = variables.Variable(17)
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
self.evaluate(v.initializer)
self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]],
self.evaluate(result))
v.assign(38).op.run()
self.assertAllEqual([[0, 0, 0], [0, 38, 0], [0, 0, 0]],
self.evaluate(result))
@test_util.run_deprecated_v1
# Placeholders are V1 only.
def testPlaceholder(self):
with self.session():
# Test using placeholder with a defined shape.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
result_0.eval(feed_dict={ph_0: 1}))
self.assertAllEqual([[0, 0, 0], [0, 2, 0], [0, 0, 0]],
result_0.eval(feed_dict={ph_0: 2}))
# Test using placeholder with an undefined shape.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
result_1.eval(feed_dict={ph_1: 1}))
self.assertAllEqual([[0, 0, 0], [0, 2, 0], [0, 0, 0]],
result_1.eval(feed_dict={ph_1: 2}))
@test_util.run_deprecated_v1
# Placeholders and shape inference are only applicable in Graph mode.
def testShapeErrors(self):
# Static shape error.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[1])
with self.assertRaises(ValueError):
ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
# Dynamic shape error.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
with self.session():
with self.assertRaises(errors_impl.InvalidArgumentError):
result_1.eval(feed_dict={ph_1: [1]})
if __name__ == "__main__":
test.main()
| AutomaticStackingTest |
python | django-haystack__django-haystack | test_haystack/elasticsearch5_tests/test_backend.py | {
"start": 2950,
"end": 3356
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
name = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return AnotherMockModel
def prepare_text(self, obj):
return "You might be searching for the user %s" % obj.author
| Elasticsearch5AnotherMockModelSearchIndex |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/collaborators/base.py | {
"start": 1261,
"end": 5946
} | class ____(BaseReader):
"""
GitHub repository collaborators reader.
Retrieves the list of collaborators of a GitHub repository and returns a list of documents.
Examples:
>>> reader = GitHubRepositoryCollaboratorsReader("owner", "repo")
>>> colabs = reader.load_data()
>>> print(colabs)
"""
class FilterType(enum.Enum):
"""
Filter type.
Used to determine whether the filter is inclusive or exclusive.
"""
EXCLUDE = enum.auto()
INCLUDE = enum.auto()
def __init__(
self,
github_client: BaseGitHubCollaboratorsClient,
owner: str,
repo: str,
verbose: bool = False,
):
"""
Initialize params.
Args:
- github_client (BaseGitHubCollaboratorsClient): GitHub client.
- owner (str): Owner of the repository.
- repo (str): Name of the repository.
- verbose (bool): Whether to print verbose messages.
Raises:
- `ValueError`: If the github_token is not provided and
the GITHUB_TOKEN environment variable is not set.
"""
super().__init__()
self._owner = owner
self._repo = repo
self._verbose = verbose
# Set up the event loop
try:
self._loop = asyncio.get_running_loop()
except RuntimeError:
# If there is no running loop, create a new one
self._loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._loop)
self._github_client = github_client
def load_data(
self,
) -> List[Document]:
"""
GitHub repository collaborators reader.
Retrieves the list of collaborators in a GitHub repository and converts them to documents.
Each collaborator is converted to a document by doing the following:
- The text of the document is the login.
- The title of the document is also the login.
- The extra_info of the document is a dictionary with the following keys:
- login: str, the login of the user
- type: str, the type of user e.g. "User"
- site_admin: bool, whether the user has admin permissions
- role_name: str, e.g. "admin"
- name: str, the name of the user, if available
- email: str, the email of the user, if available
- permissions: str, the permissions of the user, if available
:return: list of documents
"""
documents = []
page = 1
# Loop until there are no more collaborators
while True:
collaborators: Dict = self._loop.run_until_complete(
self._github_client.get_collaborators(
self._owner, self._repo, page=page
)
)
if len(collaborators) == 0:
print_if_verbose(self._verbose, "No more collaborators found, stopping")
break
print_if_verbose(
self._verbose,
f"Found {len(collaborators)} collaborators in the repo page {page}",
)
page += 1
for collab in collaborators:
extra_info = {
"login": collab["login"],
"type": collab["type"],
"site_admin": collab["site_admin"],
"role_name": collab["role_name"],
}
if collab.get("name") is not None:
extra_info["name"] = collab["name"]
if collab.get("email") is not None:
extra_info["email"] = collab["email"]
if collab.get("permissions") is not None:
extra_info["permissions"] = collab["permissions"]
document = Document(
doc_id=str(collab["login"]),
text=str(collab["login"]), # unsure for this
extra_info=extra_info,
)
documents.append(document)
print_if_verbose(self._verbose, f"Resulted in {len(documents)} documents")
return documents
if __name__ == "__main__":
"""Load all collaborators in the repo labeled as bug."""
github_client = GitHubCollaboratorsClient(verbose=True)
reader = GitHubRepositoryCollaboratorsReader(
github_client=github_client,
owner="moncho",
repo="dry",
verbose=True,
)
documents = reader.load_data()
print(f"Got {len(documents)} documents")
| GitHubRepositoryCollaboratorsReader |
python | google__pytype | pytype/rewrite/abstract/base.py | {
"start": 4029,
"end": 4740
} | class ____(BaseValue):
"""Union of values."""
def __init__(self, ctx: ContextType, options: Sequence[BaseValue]):
super().__init__(ctx)
assert len(options) > 1
flattened_options = []
for o in options:
if isinstance(o, Union):
flattened_options.extend(o.options)
else:
flattened_options.append(o)
self.options = tuple(utils.unique_list(flattened_options))
def __repr__(self):
return ' | '.join(repr(o) for o in self.options)
@property
def _attrs(self):
return (frozenset(self.options),)
def instantiate(self):
return Union(self._ctx, tuple(o.instantiate() for o in self.options))
AbstractVariableType = variables.Variable[BaseValue]
| Union |
python | Netflix__metaflow | metaflow/plugins/catch_decorator.py | {
"start": 196,
"end": 629
} | class ____(MetaflowException):
headline = "Task execution failed but @catch handled it"
def __init__(self, retry_count):
msg = (
"Task execution kept failing over %d attempts. "
"Your code did not raise an exception. Something "
"in the execution environment caused the failure." % retry_count
)
super(FailureHandledByCatch, self).__init__(msg)
| FailureHandledByCatch |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table04.py | {
"start": 315,
"end": 1144
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table04.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.set_column("C:F", 10.288)
worksheet.add_table("C3:F13")
worksheet.write("A1", "http://perl.com/")
worksheet.set_comments_author("John")
worksheet.write_comment("H1", "Test1")
worksheet.write_comment("J1", "Test2")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torch/_inductor/codegen/cpp.py | {
"start": 224527,
"end": 225765
} | class ____:
def __init__(self, code):
self.code = code
self.in_parallel = False
self.num_threads = None
self.stack = contextlib.ExitStack()
def parallel(self, threads):
if self.in_parallel and threads != self.num_threads:
# wrong number of threads
self.close()
if not self.in_parallel:
self.num_threads = threads
self.in_parallel = True
if config.cpp.dynamic_threads:
self.code.writeline("#pragma omp parallel")
else:
self.code.writeline(f"#pragma omp parallel num_threads({threads})")
self.stack.enter_context(self.code.indent())
self.code.writeline(
"int tid = omp_get_thread_num();",
)
def single(self):
if self.in_parallel:
self.code.writeline("#pragma omp single")
return self.in_parallel
def close(self):
self.stack.close()
self.in_parallel = False
def __enter__(self):
self.stack.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stack.__exit__(exc_type, exc_val, exc_tb)
@dataclasses.dataclass
| WorkSharing |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-athena/llama_index/readers/athena/base.py | {
"start": 179,
"end": 2692
} | class ____(BaseReader):
"""
Athena reader.
Follow AWS best practices for security.
AWS discourages hardcoding credentials in code.
We recommend that you use IAM roles instead of IAM user credentials.
If you must use credentials, do not embed them in your code.
Instead, store them in environment variables or in a separate configuration file.
"""
def __init__(
self,
) -> None:
"""Initialize with parameters."""
def create_athena_engine(
self,
aws_access_key: Optional[str] = None,
aws_secret_key: Optional[str] = None,
aws_region: str = None,
s3_staging_dir: str = None,
database: str = None,
workgroup: str = None,
):
"""
Args:
aws_access_key is the AWS access key from aws credential
aws_secret_key is the AWS secret key from aws credential
aws_region is the AWS region
s3_staging_dir is the S3 staging (result bucket) directory
database is the Athena database name
workgroup is the Athena workgroup name.
"""
if not aws_access_key or not aws_secret_key:
conn_str = (
"awsathena+rest://:@athena.{region_name}.amazonaws.com:443/"
"{database}?s3_staging_dir={s3_staging_dir}?work_group={workgroup}"
)
engine = create_engine(
conn_str.format(
region_name=aws_region,
s3_staging_dir=s3_staging_dir,
database=database,
workgroup=workgroup,
)
)
else:
warnings.warn(
"aws_access_key and aws_secret_key are set. We recommend to use IAM role instead."
)
boto3.client(
"athena",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
region_name=aws_region,
)
conn_str = (
"awsathena+rest://:@athena.{region_name}.amazonaws.com:443/"
"{database}?s3_staging_dir={s3_staging_dir}?work_group={workgroup}"
)
engine = create_engine(
conn_str.format(
region_name=aws_region,
s3_staging_dir=s3_staging_dir,
database=database,
workgroup=workgroup,
)
)
return engine
| AthenaReader |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/isinstance3.py | {
"start": 477,
"end": 926
} | class ____(Generic[_T]):
pass
a = A()
if isinstance(a, A):
pass
# This should generate an error because generic types with
# subscripts are not allowed.
if isinstance(a, A[str]):
pass
# This should generate an error in Python 3.9 and older because
# unions are not allowed, but this error isn't currently caught.
if issubclass(A, Union[A, int]):
pass
if issubclass(A, type(None)):
pass
if issubclass(A, NoneType):
pass
| A |
python | sympy__sympy | sympy/series/limits.py | {
"start": 4365,
"end": 13227
} | class ____(Expr):
"""Represents an unevaluated limit.
Examples
========
>>> from sympy import Limit, sin
>>> from sympy.abc import x
>>> Limit(sin(x)/x, x, 0)
Limit(sin(x)/x, x, 0, dir='+')
>>> Limit(1/x, x, 0, dir="-")
Limit(1/x, x, 0, dir='-')
"""
def __new__(cls, e, z, z0, dir="+"):
e = sympify(e)
z = sympify(z)
z0 = sympify(z0)
if z0 in (S.Infinity, S.ImaginaryUnit*S.Infinity):
dir = "-"
elif z0 in (S.NegativeInfinity, S.ImaginaryUnit*S.NegativeInfinity):
dir = "+"
if(z0.has(z)):
raise NotImplementedError("Limits approaching a variable point are"
" not supported (%s -> %s)" % (z, z0))
if isinstance(dir, str):
dir = Symbol(dir)
elif not isinstance(dir, Symbol):
raise TypeError("direction must be of type basestring or "
"Symbol, not %s" % type(dir))
if str(dir) not in ('+', '-', '+-'):
raise ValueError("direction must be one of '+', '-' "
"or '+-', not %s" % dir)
obj = Expr.__new__(cls)
obj._args = (e, z, z0, dir)
return obj
@property
def free_symbols(self):
e = self.args[0]
isyms = e.free_symbols
isyms.difference_update(self.args[1].free_symbols)
isyms.update(self.args[2].free_symbols)
return isyms
def pow_heuristics(self, e, z, z0):
b1, e1 = e.base, e.exp
if not b1.has(z):
res = limit(e1*log(b1), z, z0)
return exp(res)
ex_lim = limit(e1, z, z0)
base_lim = limit(b1, z, z0)
if base_lim is S.One:
if ex_lim in (S.Infinity, S.NegativeInfinity):
res = limit(e1*(b1 - 1), z, z0)
return exp(res)
if base_lim is S.NegativeInfinity and ex_lim is S.Infinity:
return S.ComplexInfinity
def doit(self, **hints):
"""Evaluates the limit.
Parameters
==========
deep : bool, optional (default: True)
Invoke the ``doit`` method of the expressions involved before
taking the limit.
hints : optional keyword arguments
To be passed to ``doit`` methods; only used if deep is True.
"""
e, z, z0, dir = self.args
if str(dir) == '+-':
r = limit(e, z, z0, dir='+')
l = limit(e, z, z0, dir='-')
if isinstance(r, Limit) and isinstance(l, Limit):
if r.args[0] == l.args[0]:
return self
if r == l:
return l
if r.is_infinite and l.is_infinite:
return S.ComplexInfinity
raise ValueError("The limit does not exist since "
"left hand limit = %s and right hand limit = %s"
% (l, r))
if z0 is S.ComplexInfinity:
raise NotImplementedError("Limits at complex "
"infinity are not implemented")
if z0.is_infinite:
cdir = sign(z0)
cdir = cdir/abs(cdir)
e = e.subs(z, cdir*z)
dir = "-"
z0 = S.Infinity
if hints.get('deep', True):
e = e.doit(**hints)
z = z.doit(**hints)
z0 = z0.doit(**hints)
if e == z:
return z0
if not e.has(z):
return e
if z0 is S.NaN:
return S.NaN
if e.has(*_illegal):
return self
if e.is_Order:
return Order(limit(e.expr, z, z0), *e.args[1:])
cdir = S.Zero
if str(dir) == "+":
cdir = S.One
elif str(dir) == "-":
cdir = S.NegativeOne
def set_signs(expr):
if not expr.args:
return expr
newargs = tuple(set_signs(arg) for arg in expr.args)
if newargs != expr.args:
expr = expr.func(*newargs)
abs_flag = isinstance(expr, Abs)
arg_flag = isinstance(expr, arg)
sign_flag = isinstance(expr, sign)
if abs_flag or sign_flag or arg_flag:
try:
sig = limit(expr.args[0], z, z0, dir)
if sig.is_zero:
sig = limit(1/expr.args[0], z, z0, dir)
except NotImplementedError:
return expr
else:
if sig.is_extended_real:
if (sig < 0) == True:
return (-expr.args[0] if abs_flag else
S.NegativeOne if sign_flag else S.Pi)
elif (sig > 0) == True:
return (expr.args[0] if abs_flag else
S.One if sign_flag else S.Zero)
return expr
if e.has(Float):
# Convert floats like 0.5 to exact SymPy numbers like S.Half, to
# prevent rounding errors which can lead to unexpected execution
# of conditional blocks that work on comparisons
# Also see comments in https://github.com/sympy/sympy/issues/19453
from sympy.simplify.simplify import nsimplify
e = nsimplify(e)
e = set_signs(e)
if e.is_meromorphic(z, z0):
if z0 is S.Infinity:
newe = e.subs(z, 1/z)
# cdir changes sign as oo- should become 0+
cdir = -cdir
else:
newe = e.subs(z, z + z0)
try:
coeff, ex = newe.leadterm(z, cdir=cdir)
except ValueError:
pass
else:
if ex > 0:
return S.Zero
elif ex == 0:
return coeff
if cdir == 1 or not(int(ex) & 1):
return S.Infinity*sign(coeff)
elif cdir == -1:
return S.NegativeInfinity*sign(coeff)
else:
return S.ComplexInfinity
if z0 is S.Infinity:
if e.is_Mul:
e = factor_terms(e)
dummy = Dummy('z', positive=z.is_positive, negative=z.is_negative, real=z.is_real)
newe = e.subs(z, 1/dummy)
# cdir changes sign as oo- should become 0+
cdir = -cdir
newz = dummy
else:
newe = e.subs(z, z + z0)
newz = z
try:
coeff, ex = newe.leadterm(newz, cdir=cdir)
except (ValueError, NotImplementedError, PoleError):
# The NotImplementedError catching is for custom functions
from sympy.simplify.powsimp import powsimp
e = powsimp(e)
if e.is_Pow:
r = self.pow_heuristics(e, z, z0)
if r is not None:
return r
try:
coeff = newe.as_leading_term(newz, cdir=cdir)
if coeff != newe and (coeff.has(exp) or coeff.has(S.Exp1)):
return gruntz(coeff, newz, 0, "-" if re(cdir).is_negative else "+")
except (ValueError, NotImplementedError, PoleError):
pass
else:
if isinstance(coeff, AccumBounds) and ex == S.Zero:
return coeff
if coeff.has(S.Infinity, S.NegativeInfinity, S.ComplexInfinity, S.NaN):
return self
if not coeff.has(newz):
if ex.is_positive:
return S.Zero
elif ex == 0:
return coeff
elif ex.is_negative:
if cdir == 1:
return S.Infinity*sign(coeff)
elif cdir == -1:
return S.NegativeInfinity*sign(coeff)*S.NegativeOne**(S.One + ex)
else:
return S.ComplexInfinity
else:
raise NotImplementedError("Not sure of sign of %s" % ex)
# gruntz fails on factorials but works with the gamma function
# If no factorial term is present, e should remain unchanged.
# factorial is defined to be zero for negative inputs (which
# differs from gamma) so only rewrite for non-negative z0.
if z0.is_extended_nonnegative:
e = e.rewrite(factorial, gamma)
l = None
try:
r = gruntz(e, z, z0, dir)
if r is S.NaN or l is S.NaN:
raise PoleError()
except (PoleError, ValueError):
if l is not None:
raise
r = heuristics(e, z, z0, dir)
if r is None:
return self
return r
| Limit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.