language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 11750,
"end": 11896
} | class ____(_HybridNearBase):
text: Union[str, List[str]]
move_to: Optional[Move] = None
move_away: Optional[Move] = None
| _HybridNearText |
python | getsentry__sentry | src/sentry/integrations/types.py | {
"start": 1061,
"end": 1167
} | class ____(StrEnum):
SEGMENT = "segment"
SQS = "sqs"
SPLUNK = "splunk"
| DataForwarderProviderSlug |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 103045,
"end": 108378
} | class ____(Token):
"""
Token to exactly match a specified string as a keyword, that is,
it must be immediately preceded and followed by whitespace or
non-keyword characters. Compare with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``ident_chars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example:
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> Keyword("start").parse_string("start")
ParseResults(['start'], {})
>>> Keyword("start").parse_string("starting")
Traceback (most recent call last):
ParseException: Expected Keyword 'start', keyword was immediately
followed by keyword character, found 'ing' (at char 5), (line:1, col:6)
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> Keyword("start").parse_string("starting").debug()
Traceback (most recent call last):
ParseException: Expected Keyword "start", keyword was immediately
followed by keyword character, found 'ing' ...
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(
self,
match_string: str = "",
ident_chars: typing.Optional[str] = None,
caseless: bool = False,
**kwargs,
) -> None:
matchString = deprecate_argument(kwargs, "matchString", "")
identChars = deprecate_argument(kwargs, "identChars", None)
super().__init__()
identChars = identChars or ident_chars
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
match_string = matchString or match_string
self.match = match_string
self.matchLen = len(match_string)
self.firstMatchChar = match_string[:1]
if not self.firstMatchChar:
raise ValueError("null string passed to Keyword; use Empty() instead")
self.errmsg = f"Expected {type(self).__name__} {self.name}"
self._may_return_empty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = match_string.upper()
identChars = identChars.upper()
self.ident_chars = set(identChars)
@property
def identChars(self) -> set[str]:
"""
.. deprecated:: 3.3.0
use ident_chars instead.
Property returning the characters being used as keyword characters for this expression.
"""
return self.ident_chars
def _generateDefaultName(self) -> str:
return repr(self.match)
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
errmsg = self.errmsg or ""
errloc = loc
if self.caseless:
if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
if loc == 0 or instring[loc - 1].upper() not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
):
return loc + self.matchLen, self.match
# followed by keyword char
errmsg += ", was immediately followed by keyword character"
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
elif (
instring[loc] == self.firstMatchChar
and self.matchLen == 1
or instring.startswith(self.match, loc)
):
if loc == 0 or instring[loc - 1] not in self.identChars:
if (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars
):
return loc + self.matchLen, self.match
# followed by keyword char
errmsg += ", keyword was immediately followed by keyword character"
errloc = loc + self.matchLen
else:
# preceded by keyword char
errmsg += ", keyword was immediately preceded by keyword character"
errloc = loc - 1
# else no match just raise plain exception
raise ParseException(instring, errloc, errmsg, self)
@staticmethod
def set_default_keyword_chars(chars) -> None:
"""
Overrides the default characters used by :class:`Keyword` expressions.
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
# Compatibility synonyms
setDefaultKeywordChars = staticmethod(
replaced_by_pep8("setDefaultKeywordChars", set_default_keyword_chars)
)
| Keyword |
python | ray-project__ray | python/ray/train/v2/tests/util.py | {
"start": 1143,
"end": 2944
} | class ____(WorkerGroup):
_start_failure = None
_poll_failure = None
# TODO: Clean this up and use Mocks instead.
def __init__(
self,
train_run_context: TrainRunContext,
worker_group_context: WorkerGroupContext,
callbacks=None,
):
self._num_workers = worker_group_context.num_workers
self._worker_group_state = None
self._worker_statuses = {}
def poll_status(self, *args, **kwargs) -> WorkerGroupPollStatus:
if self._poll_failure:
raise self._poll_failure
return WorkerGroupPollStatus(
worker_statuses=self._worker_statuses,
)
def _start(self):
num_workers = self._num_workers
if self._start_failure:
raise self._start_failure
self._worker_group_state = WorkerGroupState(
start_time=time_monotonic(),
workers=[MagicMock() for i in range(num_workers)],
placement_group=MagicMock(),
sync_actor=None,
)
self._worker_statuses = {
i: WorkerStatus(running=True, error=None) for i in range(num_workers)
}
def shutdown(self):
self._worker_group_state = None
def abort(self):
pass
# === Test methods ===
def error_worker(self, worker_index):
status = self._worker_statuses[worker_index]
status.error = RuntimeError(f"Worker {worker_index} failed")
def finish_worker(self, worker_index):
status = self._worker_statuses[worker_index]
status.running = False
@classmethod
def set_start_failure(cls, start_failure):
cls._start_failure = start_failure
@classmethod
def set_poll_failure(cls, poll_failure):
cls._poll_failure = poll_failure
| DummyWorkerGroup |
python | google__pytype | pytype/abstract/_instances.py | {
"start": 27740,
"end": 29120
} | class ____(_base.BaseValue, mixin.HasSlots):
"""Sequence length for match statements."""
def __init__(
self, sequence: list[cfg.Variable], ctx: "context.Context"
) -> None:
super().__init__("SequenceLength", ctx)
length = 0
splat = False
for var in sequence:
if any(isinstance(x, Splat) for x in var.data):
splat = True
else:
length += 1
self.length = length
self.splat = splat
mixin.HasSlots.init_mixin(self)
self.set_native_slot("__sub__", self.sub_slot)
def __repr__(self) -> str:
splat = "+" if self.splat else ""
return f"SequenceLength[{self.length}{splat}]"
def instantiate(
self,
node: cfg.CFGNode,
container: (
_instance_base.SimpleValue | abstract_utils.DummyContainer | None
) = None,
) -> cfg.Variable:
return self.to_variable(node)
def sub_slot(
self, node: cfg.CFGNode, other_var: cfg.Variable
) -> tuple[cfg.CFGNode, cfg.Variable]:
# We should not get a ConversionError here; this is code generated by the
# compiler from a literal sequence in a concrete match statement
val = abstract_utils.get_atomic_python_constant(other_var, int)
if self.splat:
ret = self.ctx.convert.build_int(node)
else:
ret = self.ctx.convert.constant_to_var(self.length - val, node=node)
return node, ret
| SequenceLength |
python | mahmoud__glom | glom/matching.py | {
"start": 24488,
"end": 28455
} | class ____:
r"""The :class:`Switch` specifier type routes data processing based on
matching keys, much like the classic switch statement.
Here is a spec which differentiates between lowercase English
vowel and consonant characters:
>>> switch_spec = Match(Switch([(Or('a', 'e', 'i', 'o', 'u'), Val('vowel')),
... (And(str, M, M(T[2:]) == ''), Val('consonant'))]))
The constructor accepts a :class:`dict` of ``{keyspec: valspec}``
or a list of items, ``[(keyspec, valspec)]``. Keys are tried
against the current target in order. If a keyspec raises
:class:`GlomError`, the next keyspec is tried. Once a keyspec
succeeds, the corresponding valspec is evaluated and returned.
Let's try it out:
>>> glom('a', switch_spec)
'vowel'
>>> glom('z', switch_spec)
'consonant'
If no keyspec succeeds, a :class:`MatchError` is raised. Our spec
only works on characters (strings of length 1). Let's try a
non-character, the integer ``3``:
>>> glom(3, switch_spec)
Traceback (most recent call last):
...
glom.matching.MatchError: error raised while processing, details below.
Target-spec trace (most recent last):
- Target: 3
- Spec: Match(Switch([(Or('a', 'e', 'i', 'o', 'u'), Val('vowel')), (And(str, M, (M(T[2:]) == '')), Val('...
+ Spec: Switch([(Or('a', 'e', 'i', 'o', 'u'), Val('vowel')), (And(str, M, (M(T[2:]) == '')), Val('conson...
|\ Spec: Or('a', 'e', 'i', 'o', 'u')
||\ Spec: 'a'
||X glom.matching.MatchError: 3 does not match 'a'
||\ Spec: 'e'
||X glom.matching.MatchError: 3 does not match 'e'
||\ Spec: 'i'
||X glom.matching.MatchError: 3 does not match 'i'
||\ Spec: 'o'
||X glom.matching.MatchError: 3 does not match 'o'
||\ Spec: 'u'
||X glom.matching.MatchError: 3 does not match 'u'
|X glom.matching.MatchError: 3 does not match 'u'
|\ Spec: And(str, M, (M(T[2:]) == ''))
|| Spec: str
|X glom.matching.TypeMatchError: expected type str, not int
glom.matching.MatchError: no matches for target in Switch
.. note::
:class:`~glom.Switch` is one of several *branching* specifier
types in glom. See ":ref:`branched-exceptions`" for details on
interpreting its exception messages.
A *default* value can be passed to the spec to be returned instead
of raising a :class:`MatchError`.
.. note::
Switch implements control flow similar to the switch statement
proposed in `PEP622 <https://www.python.org/dev/peps/pep-0622/>`_.
"""
def __init__(self, cases, default=_MISSING):
if type(cases) is dict:
cases = list(cases.items())
if type(cases) is not list:
raise TypeError(
"expected cases argument to be of format {{keyspec: valspec}}"
" or [(keyspec, valspec)] not: {}".format(type(cases)))
self.cases = cases
# glom.match(cases, Or([(object, object)], dict))
# start dogfooding ^
self.default = default
if not cases:
raise ValueError('expected at least one case in %s, got: %r'
% (self.__class__.__name__, self.cases))
return
def glomit(self, target, scope):
for keyspec, valspec in self.cases:
try:
scope[glom](target, keyspec, scope)
except GlomError as ge:
continue
return scope[glom](target, valspec, chain_child(scope))
if self.default is not _MISSING:
return arg_val(target, self.default, scope)
raise MatchError("no matches for target in %s" % self.__class__.__name__)
def __repr__(self):
return f'{self.__class__.__name__}({bbrepr(self.cases)})'
RAISE = make_sentinel('RAISE') # flag object for "raise on check failure"
| Switch |
python | huggingface__transformers | src/transformers/models/levit/modeling_levit.py | {
"start": 18387,
"end": 20250
} | class ____(LevitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.patch_embeddings = LevitPatchEmbeddings(config)
self.encoder = LevitEncoder(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embeddings = self.patch_embeddings(pixel_values)
encoder_outputs = self.encoder(
embeddings,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
# global average pooling, (batch_size, seq_length, hidden_sizes) -> (batch_size, hidden_sizes)
pooled_output = last_hidden_state.mean(dim=1)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
Levit Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
| LevitModel |
python | google__jax | jax/experimental/mosaic/gpu/fragmented_array.py | {
"start": 34804,
"end": 126066
} | class ____:
# An array of ir.Value, see checks in init for shapes.
registers: np.ndarray = dataclasses.field(repr=False)
layout: FragmentedLayout
is_signed: bool | None
def __init__(
self,
*,
_registers: np.ndarray,
_layout: FragmentedLayout,
_is_signed: bool | None,
):
"""Initializes a fragmented array.
This is a low-level API. Prefer using classmethods to construct fragmented
arrays instead.
"""
# We need to use ``object.__setattr__`` here because of ``frozen=True``.
object.__setattr__(self, "registers", _registers)
object.__setattr__(self, "layout", _layout)
object.__setattr__(self, "is_signed", _is_signed)
if (_is_signed is not None) != ir.IntegerType.isinstance(self.mlir_dtype):
raise TypeError(
"is_signed must be non-None if and only if the MLIR type is an"
f" integer type, got {_is_signed=} for {self.mlir_dtype}"
)
match self.layout:
# Registers are flat
case WGStridedFragLayout(shape):
[reg_size] = ir.VectorType(_registers.flat[0].type).shape
if (
math.prod(shape)
!= math.prod(_registers.shape) * WARPGROUP_SIZE * reg_size
):
raise ValueError(
f"Invalid register array shape: math.prod({_registers.shape}) *"
f" {WARPGROUP_SIZE} * {reg_size}, want: math.prod({shape})"
)
# Just a single register
case WGSplatFragLayout():
if _registers.size != 1:
raise ValueError(f"Invalid register array shape: {_registers.shape}")
case TiledLayout():
try:
self.layout.shape_from_registers_shape(_registers.shape)
except ValueError:
raise ValueError(
"Register array shape does not match the tiled layout"
) from None
case _:
raise NotImplementedError
@classmethod
def load_strided(
cls,
ref: ir.Value,
*,
is_signed: bool | None = None,
vec_size: int | None = None,
) -> FragmentedArray:
if not ir.MemRefType.isinstance(ref.type):
raise TypeError(ref.type)
ref_ty = ir.MemRefType(ref.type)
shape = tuple(ref_ty.shape)
if vec_size is None:
layout = WGStridedFragLayout.from_shaped_type(ref_ty)
if layout is None:
raise ValueError(
f"{ref_ty} must have a number of elements that is a multiple of"
f" {WARPGROUP_SIZE} (got {math.prod(shape)})"
)
else:
layout = WGStridedFragLayout(shape=shape, vec_size=vec_size)
registers = np.empty(layout.registers_shape(shape), dtype=object)
vec_ty = ir.VectorType.get((layout.vec_size,), ref_ty.element_type)
for _get, update, ref, idx in cls.transfer_strided(ref, layout.vec_size):
update(registers, vector.load(vec_ty, ref, idx))
return cls(_registers=registers, _layout=layout, _is_signed=is_signed)
@classmethod
def splat(
cls, value, shape, layout=None, *, is_signed: bool | None = None
) -> FragmentedArray:
layout = layout or WGSplatFragLayout(shape)
match layout:
case WGSplatFragLayout():
pass
case WGStridedFragLayout() | TiledLayout():
value = vector.broadcast(
layout.registers_element_type(value.type), value
)
case _:
raise NotImplementedError(layout)
return cls(
_registers=np.full(layout.registers_shape(shape), value, dtype=object),
_layout=layout,
_is_signed=is_signed,
)
@staticmethod
def broadcasted_iota(
dtype: ir.Type,
shape: tuple[int, ...],
dimension: int,
layout: FragmentedLayout | None = None,
*,
is_signed: bool | None = None,
) -> FragmentedArray:
"""Creates a broadcasted iota array along the specified dimension."""
if dimension >= len(shape):
raise ValueError(
"`dimension` must be smaller than the rank of the array."
)
def cast(idx: ir.Value) -> ir.Value:
if ir.FloatType.isinstance(dtype):
i32 = ir.IntegerType.get_signless(32)
return arith.uitofp(dtype, arith.index_cast(i32, idx))
return arith.index_cast(dtype, idx)
return mgpu.FragmentedArray.splat(
llvm.mlir_undef(dtype),
shape,
layout,
is_signed=is_signed,
).foreach(
lambda _, idx: cast(idx[dimension]),
create_array=True,
is_signed=is_signed,
)
@property
def shape(self) -> tuple[int, ...]:
match self.layout:
case WGStridedFragLayout(shape):
return shape
case WGSplatFragLayout(shape=shape):
return shape
case TiledLayout():
return self.layout.shape_from_registers_shape(self.registers.shape)
case _:
raise NotImplementedError
@property
def mlir_dtype(self) -> ir.Type:
reg_ty = self.registers.flat[0].type
match self.layout:
case WGStridedFragLayout() | TiledLayout():
return ir.VectorType(reg_ty).element_type
case WGSplatFragLayout():
return reg_ty
case _:
raise NotImplementedError
def to_layout(self, new_layout: FragmentedLayout) -> FragmentedArray:
"""Converts the fragmented array to the given layout."""
i32 = ir.IntegerType.get_signless(32)
c = lambda x: arith.constant(i32, x)
if self.layout == new_layout:
return self
shape = self.shape
bitwidth = utils.bitwidth(self.mlir_dtype)
transpose_pairs = (
(WGMMA_LAYOUT, WGMMA_TRANSPOSED_LAYOUT),
(TCGEN05_LAYOUT, TCGEN05_TRANSPOSED_LAYOUT),
)
if bitwidth in {16, 32} and (
(self.layout, new_layout) in transpose_pairs
or (new_layout, self.layout) in transpose_pairs
):
is_even_row = arith.cmpi(
arith.CmpIPredicate.eq,
arith.remui(arith.divui(utils.thread_idx(), c(4)), c(2)),
c(0),
)
perm = arith.select(is_even_row, c(0x5410), c(0x3276))
tmp_new_regs = []
for reg in self.registers.flat:
reg_ty = reg.type
if bitwidth == 16:
reg = utils.bitcast(reg, i32)
reg_shfl = utils.shfl_bfly(reg, 4)
new_reg = utils.prmt(reg, reg_shfl, perm)
elif bitwidth == 32:
i32_vec = ir.VectorType.get((1,), i32)
regs = [
utils.bitcast(utils.vector_slice(reg, slice(i, i + 1)), i32)
for i in range(2)
]
reg_to_shfl = arith.select(is_even_row, regs[1], regs[0])
reg_shfl = utils.shfl_bfly(reg_to_shfl, 4)
new_reg_low = arith.select(is_even_row, regs[0], reg_shfl)
new_reg_high = arith.select(is_even_row, reg_shfl, regs[1])
new_reg_i32 = utils.vector_concat([
utils.bitcast(new_reg_low, i32_vec),
utils.bitcast(new_reg_high, i32_vec),
])
new_reg = utils.bitcast(new_reg_i32, reg_ty)
else:
raise ValueError(f"Unsupported bitwidth: {bitwidth}")
tmp_new_regs.append(utils.bitcast(new_reg, reg_ty))
new_regs = np.asarray(
tmp_new_regs, dtype=object
).reshape(new_layout.registers_shape(shape))
return FragmentedArray(
_registers=new_regs, _layout=new_layout, _is_signed=self.is_signed
)
if (
isinstance(self.layout, TiledLayout)
and isinstance(new_layout, TiledLayout)
and self.layout == tmem_native_layout(self.layout.vector_length)
and new_layout == tmem_native_layout(new_layout.vector_length)
):
new_registers = np.empty(new_layout.registers_shape(shape), dtype=object)
if self.layout.vector_length > new_layout.vector_length:
ratio = self.layout.vector_length // new_layout.vector_length
new_length = new_layout.vector_length
for idx, reg in np.ndenumerate(self.registers):
for i in range(ratio):
new_reg = utils.vector_slice(
reg, slice(i * new_length, (i + 1) * new_length)
)
new_registers[(idx[0], idx[1] * ratio + i, *idx[2:])] = new_reg
elif self.layout.vector_length < new_layout.vector_length:
ratio = new_layout.vector_length // self.layout.vector_length
for idx in np.ndindex(new_registers.shape):
new_reg = utils.vector_concat([
self.registers[idx[0], idx[1] * ratio + i, *idx[2:]]
for i in range(ratio)
])
new_registers[idx] = new_reg
return FragmentedArray(
_registers=new_registers, _layout=new_layout, _is_signed=self.is_signed,
)
if self.layout == WGMMA_LAYOUT_ACC_32BIT and new_layout == WGMMA_LAYOUT:
new_regs_shape = new_layout.registers_shape(shape)
assert new_regs_shape[-1] == 1
assert self.registers.shape == (*new_regs_shape[:-1], 2, 1)
new_regs = np.empty(new_regs_shape, dtype=object)
for idx in np.ndindex(new_regs_shape[:-1]):
new_regs[(*idx, 0)] = utils.vector_concat([
self.registers[*idx, i, 0] for i in range(2)
])
return FragmentedArray(
_registers=new_regs, _layout=new_layout, _is_signed=self.is_signed,
)
if self.layout == WGMMA_LAYOUT and new_layout == WGMMA_LAYOUT_ACC_32BIT:
new_regs_shape = new_layout.registers_shape(shape)
assert self.registers.shape[-1] == 1
assert new_regs_shape == (*self.registers.shape[:-1], 2, 1)
new_regs = np.empty(new_regs_shape, dtype=object)
for idx, reg in np.ndenumerate(self.registers):
for i in range(2):
new_regs[(*idx[:-1], i, 0)] = utils.vector_slice(reg, slice(i, i + 1))
return FragmentedArray(
_registers=new_regs, _layout=new_layout, _is_signed=self.is_signed,
)
if (
self.layout == WGMMA_LAYOUT_UPCAST_2X
and new_layout == WGMMA_LAYOUT
and (dtype_bitwidth := utils.bitwidth(self.mlir_dtype)) <= 16
):
assert shape[1] % 16 == 0 # Should be implied by the layout
new_registers = np.empty(new_layout.registers_shape(shape), dtype=object)
is_even = arith.cmpi(
arith.CmpIPredicate.eq, arith.remui(utils.thread_idx(), c(2)), c(0)
)
registers = self.registers
if dtype_bitwidth == 4:
if registers.shape[1] % 2:
raise NotImplementedError(
"This relayout implementation requires an even number of column"
" tiles (to pack pairs of them for efficiency)"
)
# We pair up the consecutive column tiles, so each register is 32-bit.
# If this layout originated from a WGMMA_LAYOUT_UPCAST_4X layout,
# LLVM will realize that the paired up vectors actually came from the
# same 32-bit register and it will become a no-op.
col_minor_registers = np.moveaxis(registers, 1, -1)
flat_registers = [
utils.vector_concat((l, h))
for l, h in zip(
col_minor_registers.flat[::2], col_minor_registers.flat[1::2]
)
]
registers = np.asarray(flat_registers, dtype=object).reshape(
*col_minor_registers.shape[:-1], col_minor_registers.shape[-1] // 2
)
registers = np.moveaxis(registers, -1, 1)
for idx, reg in np.ndenumerate(registers):
if dtype_bitwidth == 16:
assert reg.type.shape == [4]
# A single vector is 64-bits, but shuffles are only 32-bit wide.
# We only shuffle the half that needs to go to other thread.
low = utils.vector_slice(reg, slice(0, 2))
high = utils.vector_slice(reg, slice(2, 4))
to_exchange = arith.select(is_even, high, low)
# Exchange values between even and odd threads.
exchanged = utils.shfl_bfly(to_exchange, 1)
low = arith.select(is_even, low, exchanged)
high = arith.select(is_even, exchanged, high)
new_registers[(idx[0], idx[1] * 2, *idx[2:-1])] = low
new_registers[(idx[0], idx[1] * 2 + 1, *idx[2:-1])] = high
elif dtype_bitwidth == 8:
assert reg.type.shape == [4]
# The vector is 32-bits, so we just shuffle the whole thing and
# use prmt to blend it with the local register.
exchanged = utils.shfl_bfly(reg, 1)
# Consider lanes 0 and 1, because the situation is symmetric for
# each pair. If we feed reg[lane] and exchanged[lane] (which is
# really the same as reg of the other lane) to prmt, we can index
# the elements of the result using the following indices:
# reg[0]: 0 1 2 3 reg[1]: 8 9 10 11
# prmt[0]: 0 1 2 3 4 5 6 7
# prmt[1]: 4 5 6 7 0 1 2 3
# The expected outputs and their respective permutations are:
# out[0]: 0 1 8 9 out[1]: 2 3 10 11
# prmt[0]: 0 1 4 5 prmt[1]: 6 7 2 3
# Note that the patterns still need to be flipped, since we listed
# bytes with LSB on the left, which is the opposite of how the
# numeric constants are spelled in Python (LSB on the right).
perm = arith.select(is_even, c(0x5410), c(0x3276))
blend = utils.prmt(reg, exchanged, perm)
for i in range(2):
reg = utils.vector_slice(blend, slice(i * 2, i * 2 + 2))
new_registers[(idx[0], idx[1] * 2 + i, *idx[2:-1])] = reg
else:
assert dtype_bitwidth == 4
assert reg.type.shape == [8] # We paired up the registers above.
exchanged = utils.shfl_bfly(reg, 1)
# See comment above for a more complete explanation.
# reg[0]: 0 1 2 3 16 17 18 19 reg[1]: 8 9 10 11 24 25 26 27
# prmt[0]: -0- -1- --2-- --3-- -4- --5-- --6-- --7--
# prmt[1]: -4- -5- --6-- --7-- -0- --1-- --2-- --3--
# The expected outputs and their respective permutations are:
# out[0]: 0 1 8 9 16 17 24 25 out[1]: 2 3 10 11 18 19 26 27
# prmt[0]: -0- -4- --2-- --6-- prmt[1]: -5- --1-- --7-- --3--
perm = arith.select(is_even, c(0x6240), c(0x3715))
blend = utils.prmt(reg, exchanged, perm)
for i in range(4):
reg = utils.vector_slice(blend, slice(i * 2, i * 2 + 2))
new_registers[(idx[0], idx[1] * 4 + i, *idx[2:-1])] = reg
assert all(r is not None for r in new_registers)
return FragmentedArray(
_registers=new_registers, _layout=new_layout, _is_signed=self.is_signed,
)
if (
self.layout == WGMMA_LAYOUT_UPCAST_4X
and new_layout == WGMMA_LAYOUT_UPCAST_2X
and utils.bitwidth(self.mlir_dtype) == 4
):
assert shape[0] % 64 == 0 # Should be implied by the layout
assert shape[1] % 32 == 0 # Should be implied by the layout
new_registers = np.empty(new_layout.registers_shape(shape), dtype=object)
i32 = ir.IntegerType.get_signless(32)
c = lambda x: arith.constant(i32, x)
is_01 = arith.cmpi(
arith.CmpIPredicate.ult, arith.remui(utils.thread_idx(), c(4)), c(2)
)
for idx, reg in np.ndenumerate(self.registers):
assert ir.VectorType(reg.type).shape == [8]
# The vector is 32-bits, so we just shuffle the whole thing and
# use prmt to blend it with the local register.
exchanged = utils.shfl_bfly(reg, 2)
# See comments above for conventions. Here we exchange data between
# threads with lane index related by flipping 2nd bit (e.g. 0 and 2).
# reg[0]: 0 1 2 3 4 5 6 7 reg[2]: 16 17 18 19 20 21 22 23
# prmt[0]: -0- -1- -2- -3- --4-- --5-- --6-- --7--
# prmt[1]: -4- -5- -6- -7- --0-- --1-- --2-- --3--
# The expected outputs and their respective permutations are:
# out[0]: 0 1 2 3 16 17 18 19 out[2]: 4 5 6 7 20 21 22 23
# prmt[0]: -0- -1- --4-- --5-- prmt[2]: -6- -7- --2-- --3--
perm = arith.select(is_01, c(0x5410), c(0x3276))
blend = utils.prmt(reg, exchanged, perm)
for i in range(2):
reg = utils.vector_slice(blend, slice(i * 4, i * 4 + 4))
new_registers[(idx[0], idx[1] * 2 + i, *idx[2:-1])] = reg
assert all(r is not None for r in new_registers)
return FragmentedArray(
_registers=new_registers, _layout=new_layout, _is_signed=self.is_signed,
)
if self.layout == WGMMA_LAYOUT_UPCAST_4X and new_layout == WGMMA_LAYOUT:
return self.to_layout(WGMMA_LAYOUT_UPCAST_2X).to_layout(new_layout)
if not isinstance(self.layout, WGSplatFragLayout):
raise NotImplementedError(
f"Cannot convert from {self.layout} to {new_layout}"
)
[reg] = self.registers.flat
return type(self).splat(
reg, self.shape, new_layout, is_signed=self.is_signed
)
def _pointwise(
self, op, *other, output_is_signed: bool | None = None
) -> FragmentedArray:
# If our layout is a splat, then we should either dispatch to a non-splat
# layout, or broadcast ourselves to the output shape first.
if isinstance(self.layout, WGSplatFragLayout):
output_shape = self.shape
for i, o in enumerate(other):
if not isinstance(o, FragmentedArray):
continue
elif not isinstance(o.layout, WGSplatFragLayout):
return o._pointwise(
lambda o, this, *args: op(this, *args[:i], o, *args[i:]),
self,
*other[:i],
*other[i + 1 :],
output_is_signed=output_is_signed,
)
else:
output_shape = np.broadcast_shapes(output_shape, o.shape)
# If we get here then we haven't found any non-splat layout.
if self.shape != output_shape:
return self.broadcast(output_shape)._pointwise(
op, *other, output_is_signed=output_is_signed
)
other_arrs = []
for o in other:
if not isinstance(o, FragmentedArray):
if isinstance(o, (float, int)):
o = utils.c(o, self.mlir_dtype)
elif not isinstance(o, ir.Value):
raise NotImplementedError(o)
o = FragmentedArray.splat(
o, shape=self.shape, layout=self.layout, is_signed=self.is_signed
)
if isinstance(o.layout, WGSplatFragLayout):
if not o.layout.can_broadcast_to(self.shape):
raise ValueError(
f"Cannot broadcast shape {self.shape} to layout {o.layout}")
o = FragmentedArray.splat(
o.registers.flat[0],
shape=self.shape,
layout=self.layout,
is_signed=o.is_signed,
)
else:
if self.layout != o.layout:
raise ValueError("Incompatible FragmentedArray layouts")
if self.registers.shape != o.registers.shape:
raise ValueError("Incompatible FragmentedArray shapes")
other_arrs.append(o)
new_regs = np.empty_like(self.registers)
for idx, reg in np.ndenumerate(self.registers):
new_regs[idx] = op(reg, *(o.registers[idx] for o in other_arrs))
reg_ty = new_regs.flat[0].type
if ir.VectorType.isinstance(reg_ty):
reg_ty = ir.VectorType(reg_ty).element_type
if output_is_signed is None and ir.IntegerType.isinstance(reg_ty):
output_is_signed = self.is_signed
return FragmentedArray(
_registers=new_regs, _layout=self.layout, _is_signed=output_is_signed
)
def __pos__(self):
return self
def __neg__(self):
if ir.FloatType.isinstance(self.mlir_dtype):
return self._pointwise(arith.negf)
elif ir.IntegerType.isinstance(self.mlir_dtype):
return 0 - self
else:
return NotImplemented
def __add__(self, other):
if ir.FloatType.isinstance(self.mlir_dtype):
return self._pointwise(addf, other)
elif ir.IntegerType.isinstance(self.mlir_dtype):
return self._pointwise(arith.addi, other)
else:
return NotImplemented
def __radd__(self, other):
return self + other
def __mul__(self, other):
if ir.FloatType.isinstance(self.mlir_dtype):
return self._pointwise(mulf, other)
elif ir.IntegerType.isinstance(self.mlir_dtype):
return self._pointwise(arith.muli, other)
else:
return NotImplemented
def __rmul__(self, other):
return self * other
def __sub__(self, other):
if ir.FloatType.isinstance(self.mlir_dtype):
return self._pointwise(subf, other)
elif ir.IntegerType.isinstance(self.mlir_dtype):
return self._pointwise(arith.subi, other)
else:
return NotImplemented
def __rsub__(self, other):
if ir.FloatType.isinstance(self.mlir_dtype):
return self._pointwise(lambda s, o: subf(o, s), other)
elif ir.IntegerType.isinstance(self.mlir_dtype):
return self._pointwise(lambda s, o: arith.subi(o, s), other)
else:
return NotImplemented
def __truediv__(self, other):
if not ir.FloatType.isinstance(self.mlir_dtype):
return NotImplemented
return self._pointwise(arith.divf, other)
def __rtruediv__(self, other):
if not ir.FloatType.isinstance(self.mlir_dtype):
return NotImplemented
return self._pointwise(lambda s, o: arith.divf(o, s), other)
def __floordiv__(self, other):
if ir.FloatType.isinstance(self.mlir_dtype):
return self._pointwise(
lambda s, o: mlir_math.floor(arith.divf(s, o)), other
)
elif ir.IntegerType.isinstance(self.mlir_dtype):
if self.is_signed:
return self._pointwise(arith.floordivsi, other)
else:
return self._pointwise(arith.divui, other)
else:
return NotImplemented
def __rfloordiv__(self, other):
if ir.FloatType.isinstance(self.mlir_dtype):
return self._pointwise(
lambda s, o: mlir_math.floor(arith.divf(o, s)), other
)
elif ir.IntegerType.isinstance(self.mlir_dtype):
if self.is_signed:
return self._pointwise(lambda s, o: arith.floordivsi(o, s), other)
else:
return self._pointwise(lambda s, o: arith.divui(o, s), other)
else:
return NotImplemented
def __mod__(self, other):
if not ir.IntegerType.isinstance(self.mlir_dtype):
return NotImplemented
if self.is_signed:
return self._pointwise(arith.remsi, other)
else:
return self._pointwise(arith.remui, other)
def __rmod__(self, other):
if not ir.IntegerType.isinstance(self.mlir_dtype):
return NotImplemented
if self.is_signed:
return self._pointwise(lambda s, o: arith.remsi(o, s), other)
else:
return self._pointwise(lambda s, o: arith.remui(o, s), other)
def __invert__(self):
if not ir.IntegerType.isinstance(self.mlir_dtype):
return NotImplemented
return self ^ ~0
def __or__(self, other):
if not ir.IntegerType.isinstance(self.mlir_dtype):
return NotImplemented
return self._pointwise(arith.ori, other)
def __ror__(self, other):
return self | other
def __and__(self, other):
if not ir.IntegerType.isinstance(self.mlir_dtype):
return NotImplemented
return self._pointwise(arith.andi, other)
def __rand__(self, other):
return self & other
def __xor__(self, other):
if not ir.IntegerType.isinstance(self.mlir_dtype):
return NotImplemented
return self._pointwise(arith.xori, other)
def __rxor__(self, other):
return self ^ other
def __eq__(self, other):
return self._compare(
other,
f_pred=arith.CmpFPredicate.OEQ,
si_pred=arith.CmpIPredicate.eq,
ui_pred=arith.CmpIPredicate.eq,
)
def __ne__(self, other):
return self._compare(
other,
f_pred=arith.CmpFPredicate.UNE,
si_pred=arith.CmpIPredicate.ne,
ui_pred=arith.CmpIPredicate.ne,
)
def __lt__(self, other):
return self._compare(
other,
f_pred=arith.CmpFPredicate.OLT,
si_pred=arith.CmpIPredicate.slt,
ui_pred=arith.CmpIPredicate.ult,
)
def __le__(self, other):
return self._compare(
other,
f_pred=arith.CmpFPredicate.OLE,
si_pred=arith.CmpIPredicate.sle,
ui_pred=arith.CmpIPredicate.ule,
)
def __gt__(self, other):
return self._compare(
other,
f_pred=arith.CmpFPredicate.OGT,
si_pred=arith.CmpIPredicate.sgt,
ui_pred=arith.CmpIPredicate.ugt,
)
def __ge__(self, other):
return self._compare(
other,
f_pred=arith.CmpFPredicate.OGE,
si_pred=arith.CmpIPredicate.sge,
ui_pred=arith.CmpIPredicate.uge,
)
def _compare(self, other, *, f_pred, si_pred, ui_pred):
if ir.FloatType.isinstance(self.mlir_dtype):
pred = functools.partial(arith.cmpf, f_pred)
elif ir.IntegerType.isinstance(self.mlir_dtype):
if self.is_signed:
pred = functools.partial(arith.cmpi, si_pred)
else:
pred = functools.partial(arith.cmpi, ui_pred)
else:
return NotImplemented
return self._pointwise(pred, other, output_is_signed=False)
def max(self, other) -> FragmentedArray:
if ir.FloatType.isinstance(self.mlir_dtype):
maximumf = arith.maximumf
if ir.F32Type.isinstance(self.mlir_dtype):
maximumf = self._lift_fast_instr("max.NaN.f32")
return self._pointwise(maximumf, other)
elif ir.IntegerType.isinstance(self.mlir_dtype):
return self._pointwise(
arith.maxsi if self.is_signed else arith.maxui, other
)
else:
raise NotImplementedError
def min(self, other) -> FragmentedArray:
if ir.FloatType.isinstance(self.mlir_dtype):
return self._pointwise(arith.minimumf, other)
elif ir.IntegerType.isinstance(self.mlir_dtype):
return self._pointwise(
arith.minsi if self.is_signed else arith.minui, other
)
else:
raise NotImplementedError
def exp(self, *, approx: bool = False) -> FragmentedArray:
if not ir.FloatType.isinstance(self.mlir_dtype):
raise NotImplementedError
if approx:
dtype = self.mlir_dtype
log2e = arith.constant(dtype, ir.FloatAttr.get(dtype, 1.4426950408889634))
return cast(FragmentedArray, self * log2e).exp2()
return self._pointwise(mlir_math.exp)
def exp2(self, *, approx: bool = False) -> FragmentedArray:
if not ir.FloatType.isinstance(self.mlir_dtype):
raise NotImplementedError
if approx:
if not ir.F32Type.isinstance(self.mlir_dtype):
raise NotImplementedError(self.mlir_dtype)
return self._pointwise(self._lift_fast_instr("ex2.approx.ftz.f32"))
return self._pointwise(mlir_math.exp2)
def log(self, *, approx: bool = False) -> FragmentedArray:
if not ir.FloatType.isinstance(self.mlir_dtype):
raise NotImplementedError
if approx:
dtype = self.mlir_dtype
ln2 = arith.constant(dtype, ir.FloatAttr.get(dtype, 0.6931471805599453))
return self.log2(approx=True) * ln2
return self._pointwise(mlir_math.log)
def log2(self, *, approx: bool = False) -> FragmentedArray:
if not ir.FloatType.isinstance(self.mlir_dtype):
raise NotImplementedError(self.mlir_dtype)
if approx:
if not ir.F32Type.isinstance(self.mlir_dtype):
raise NotImplementedError(self.mlir_dtype)
return self._pointwise(self._lift_fast_instr("lg2.approx.ftz.f32"))
return self._pointwise(mlir_math.log2)
def sin(self, *, approx: bool = False) -> FragmentedArray:
if not ir.FloatType.isinstance(self.mlir_dtype):
raise NotImplementedError
if approx and self.mlir_dtype != ir.F32Type.get():
raise NotImplementedError
return self._pointwise(
self._lift_fast_instr("sin.approx.f32") if approx else mlir_math.sin
)
def cos(self, *, approx: bool = False) -> FragmentedArray:
if not ir.FloatType.isinstance(self.mlir_dtype):
raise NotImplementedError
if approx and self.mlir_dtype != ir.F32Type.get():
raise NotImplementedError
return self._pointwise(
self._lift_fast_instr("cos.approx.f32") if approx else mlir_math.cos
)
def tanh(self, *, approx: bool = False) -> FragmentedArray:
if not ir.FloatType.isinstance(self.mlir_dtype):
raise NotImplementedError
if approx and self.mlir_dtype != ir.F32Type.get():
raise NotImplementedError
return self._pointwise(
self._lift_fast_instr("tanh.approx.f32") if approx else mlir_math.tanh
)
def rsqrt(self, *, approx: bool = False) -> FragmentedArray:
if not ir.FloatType.isinstance(self.mlir_dtype):
raise NotImplementedError
if approx and self.mlir_dtype != ir.F32Type.get():
raise NotImplementedError
return self._pointwise(
self._lift_fast_instr("rsqrt.approx.f32") if approx else mlir_math.rsqrt
)
@staticmethod
def _lift_fast_instr(
instr: str | Callable[[ir.Value], ir.Value],
) -> Callable[[ir.Value, ir.Value], ir.Value]:
def fast_instr(*args):
f32 = ir.F32Type.get()
arg_ty = args[0].type
assert all(a.type == arg_ty for a in args)
if arg_ty == f32:
if isinstance(instr, str):
args_ptx = ", ".join(f"${i}" for i in range(len(args) + 1))
return llvm.inline_asm(
f32, args, f"{instr} {args_ptx};", "=f" + ",f" * len(args)
)
else:
return instr(*args)
elif ir.VectorType.isinstance(arg_ty):
result = llvm.mlir_undef(arg_ty)
[vec_len] = ir.VectorType(arg_ty).shape
for i in range(vec_len):
vs = [
vector.extract(
a,
dynamic_position=[],
static_position=ir.DenseI64ArrayAttr.get([i]),
)
for a in args
]
vr = fast_instr(*vs)
result = vector.insert(
vr,
result,
dynamic_position=[],
static_position=ir.DenseI64ArrayAttr.get([i]),
)
return result
else:
raise NotImplementedError(arg_ty)
return fast_instr
def bitcast(
self, elt: ir.Type, *, output_is_signed: bool | None = None
) -> FragmentedArray:
if (output_is_signed is not None) != ir.IntegerType.isinstance(elt):
raise TypeError(
"output_is_signed must be non-None if and only if the MLIR type is an"
f" integer type, got {output_is_signed=} for {elt}"
)
if elt == self.mlir_dtype:
return self
reg_type = self.registers.flat[0].type
if ir.VectorType.isinstance(reg_type):
reg_shape = ir.VectorType(reg_type).shape
ty = ir.VectorType.get(reg_shape, elt)
else:
ty = elt
return self._pointwise(
lambda x: arith.bitcast(ty, x), output_is_signed=output_is_signed
)
def __getitem__(self, idx) -> FragmentedArray:
if not isinstance(self.layout, TiledLayout):
raise NotImplementedError("Only arrays with tiled layouts can be sliced")
base_idx, slice_shape, is_squeezed = utils.parse_indices(idx, self.shape)
if any(isinstance(idx, ir.Value) for idx in base_idx):
raise ValueError("Only slicing with static indices allowed")
if any(is_squeezed):
raise NotImplementedError("Integer indexing not implemented (only slicing allowed)")
base_tile_shape = self.layout.base_tile_shape
if len(base_tile_shape) != len(self.shape):
raise NotImplementedError("Tiling has different rank than array")
if any(b % t for b, t in zip(base_idx, base_tile_shape, strict=True)):
raise ValueError(
"Base indices of array slices must be aligned to the beginning of a"
f" tile. The array uses a tiling of {base_tile_shape}, but your base"
f" indices are {base_idx}. Consider using a different array layout."
)
if any(l % t for l, t in zip(slice_shape, base_tile_shape, strict=True)):
raise ValueError(
"The slice shape must be a multiple of the tile shape. The array"
f" uses a tiling of {base_tile_shape}, but your slice shape is"
f" {slice_shape}. Consider using a different array layout."
)
register_slices = tuple(
slice(b // t, (b + l) // t)
for b, l, t in zip(base_idx, slice_shape, base_tile_shape, strict=True)
)
new_regs = self.registers[register_slices]
return FragmentedArray(
_registers=new_regs, _layout=self.layout, _is_signed=self.is_signed
)
def __setitem__(self, idx: object, value: FragmentedArray) -> None:
if not isinstance(value, FragmentedArray):
raise ValueError(f"Expected a FragmentedArray, got: {value}")
if not isinstance(self.layout, TiledLayout):
raise NotImplementedError("Only arrays with tiled layouts can be sliced")
base_idx, slice_shape, is_squeezed = utils.parse_indices(idx, self.shape)
if any(isinstance(idx, ir.Value) for idx in base_idx):
raise ValueError("Only slicing with static indices allowed")
if any(is_squeezed):
raise NotImplementedError("Integer indexing not implemented (only slicing allowed)")
if value.shape != tuple(slice_shape):
raise ValueError(
f"Slice has shape {tuple(slice_shape)}, but assigned array has shape"
f" {value.shape}"
)
if value.mlir_dtype != self.mlir_dtype:
raise ValueError(
f"Array has dtype {value.mlir_dtype}, but assigned array has dtype"
f" {self.mlir_dtype}"
)
if value.layout != self.layout:
raise ValueError(
f"Array has layout {value.layout}, but assigned array has layout"
f" {self.layout}"
)
base_tile_shape = self.layout.base_tile_shape
if len(base_tile_shape) != len(self.shape):
raise NotImplementedError("Tiling has different rank than array")
if any(
b % t or l % t
for b, l, t in zip(base_idx, slice_shape, base_tile_shape, strict=True)
):
raise NotImplementedError("Only tile aligned slicing supported")
register_slices = tuple(
slice(b // t, (b + l) // t)
for b, l, t in zip(base_idx, slice_shape, base_tile_shape, strict=True)
)
assert self.registers[register_slices].shape == value.registers.shape
self.registers[register_slices] = value.registers
def copy(self) -> FragmentedArray:
return FragmentedArray(
_registers=np.copy(self.registers),
_layout=self.layout,
_is_signed=self.is_signed,
)
# TODO(apaszke): Support JAX dtypes here as well?
def astype(
self, new_dtype: ir.Type, *, is_signed: bool | None = None
) -> FragmentedArray:
i4 = ir.IntegerType.get_signless(4)
i8 = ir.IntegerType.get_signless(8)
i16 = ir.IntegerType.get_signless(16)
i32 = ir.IntegerType.get_signless(32)
bf16 = ir.BF16Type.get()
f32 = ir.F32Type.get()
f8e4m3fn = ir.Float8E4M3FNType.get()
cur_dtype = self.mlir_dtype
if cur_dtype == new_dtype:
if self.is_signed == is_signed:
return self
return FragmentedArray(
_registers=self.registers, _layout=self.layout, _is_signed=is_signed
)
# Otherwise, mypy is unhappy with using ``idx`` for both range and
# np.ndenumerate.
idx: Any
any_reg = self.registers.flat[0]
reg_type = any_reg.type
is_vector_reg = ir.VectorType.isinstance(reg_type)
reg_shape = tuple(ir.VectorType(reg_type).shape) if is_vector_reg else (1,)
[vector_len] = reg_shape # This is meant to be a 1D assertion.
if (new_reg_bitwidth := utils.bitwidth(new_dtype) * vector_len) % 8:
raise ValueError(
"Register bitwidth in target type must be divisible by 8, got"
f" {new_reg_bitwidth}"
)
# If the vector originates from a slice (common after relayouts), we
# can fuse the slicing into the conversion and reuse many
# preprocessing ops (shifts, prmts) accross different vectors.
regs_from_32bit_slice = (
isinstance(
_slice_op := getattr(any_reg.owner, "opview", None),
vector.ExtractStridedSliceOp,
)
and utils.bitwidth(_slice_op.source.type) == 32
and _slice_op.strides[0].value == 1
)
def packed_registers(
dst_vector_len: int, *, if_not_sliced: bool
) -> Iterable[tuple[Sequence[tuple[int, ...]], ir.Value]]:
"""Tries to pack registers up to destination vector length."""
if regs_from_32bit_slice and if_not_sliced:
for idx, reg in np.ndenumerate(self.registers):
yield [idx], reg
return
generator = np.ndenumerate(self.registers)
indices = []
regs = []
while True:
try:
for _ in range(max(dst_vector_len // vector_len, 1)):
idx, reg = next(generator)
indices.append(idx)
regs.append(reg)
yield indices, utils.vector_concat(regs)
regs.clear()
indices.clear()
except StopIteration:
break
if regs:
yield indices, utils.vector_concat(regs)
if cur_dtype == i4 and new_dtype == f8e4m3fn:
# The algorithm here is taken from CUTLASS's `NumericArrayConverter`
# specialization for int4 -> f8e4m3, available at
# https://github.com/NVIDIA/cutlass/blob/5c6bca04414e06ce74458ab0a2018e2b8272701c/include/cutlass/numeric_conversion.h#L4982.
# Each call to the function below will upcast 4 contiguous nibbles of
# the input 32-bit register, and whether to select the 4 low nibbles or
# the 4 high nibbles is determined by the `part` argument.
def upcast_to_f8e4m3fn(reg: ir.Value, part: int):
lut = [
0x44403800, # [0, 1, 2, 3] encoded as f8e4m3fn
0x4E4C4A48, # [4, 5, 6, 7] encoded as f8e4m3fn
0xCACCCED0, # [-8, -7, -6, -5] encoded as f8e4m3fn
0xB8C0C4C8, # [-4, -3, -2, -1] encoded as f8e4m3fn
]
sign = arith.shrui(arith.andi(reg, c(0x88888888, i32)), c(1, i32))
# Ignore the sign when indexing into the LUT.
lut_idx = arith.andi(reg, c(0x77777777, i32))
assert 0 <= part < 2
if part == 1:
lut_idx = arith.shrui(lut_idx, c(16, i32))
sign = arith.shrui(sign, c(16, i32))
prmt_sign_pattern = arith.ori(sign, c(0x32103210, i32))
return llvm.inline_asm(
i32,
[lut_idx, prmt_sign_pattern],
f"""
{{
.reg .b32 pos_f8s, neg_f8s;
prmt.b32 pos_f8s, {lut[0]}, {lut[1]}, $1;
prmt.b32 neg_f8s, {lut[2]}, {lut[3]}, $1;
prmt.b32 $0, pos_f8s, neg_f8s, $2;
}}
""",
"=r,r,r",
)
new_registers = np.empty_like(self.registers)
# TODO(apaszke,bchetioui): Using 8 helps some (but not all) cases.
# TODO(apaszke,bchetioui): Add the slice optimization here.
packing_width = 8 if vector_len == 2 else 4
for indices, reg in packed_registers(packing_width, if_not_sliced=False):
[group_size] = ir.VectorType(reg.type).shape
assert group_size % vector_len == 0
int_ty = ir.IntegerType.get_signless(group_size * 4)
reg_as_i32 = utils.bitcast(reg, int_ty)
if int_ty != i32:
reg_as_i32 = arith.extsi(i32, reg_as_i32)
out_i32_regs = [
upcast_to_f8e4m3fn(reg_as_i32, part=part)
for part in range(max(group_size // 4, 1))
]
out_vec_int = utils.vector_concat([
vector.broadcast(ir.VectorType.get((1,), i32), out_i32_reg)
for out_i32_reg in out_i32_regs
])
out_vector_len = len(out_i32_regs) * 4
# Bitcast to i8 first to allow slicing as necessary, since LLVM chokes
# on f8 types.
out_vec = utils.bitcast(
out_vec_int, ir.VectorType.get((out_vector_len,), i8)
)
offset = 0
for idx in indices:
sliced_out_vec = utils.vector_slice(
out_vec, slice(offset, offset + vector_len)
)
new_registers[idx] = utils.bitcast(
sliced_out_vec, ir.VectorType.get((vector_len,), f8e4m3fn)
)
offset += vector_len
return FragmentedArray(
_registers=new_registers, _layout=self.layout, _is_signed=None
)
if cur_dtype == i4 and self.is_signed and new_dtype == bf16 and vector_len % 2 == 0:
new_registers = np.empty_like(self.registers)
out_vec_ty = ir.VectorType.get((vector_len,), new_dtype)
# We use packed_registers for consistency, even though the packing is not
# really profitable here: the PTX below begins by an op dependent on the
# extracted part and so there are no ops that can be shared across packed
# parts.
for indices, reg in packed_registers(2, if_not_sliced=True):
# The algorithm here is largely the same as CUTLASS's
# NumericArrayConverter specialization for int4 -> bf16 casts.
# We modify it slightly, because we only extract 2 values.
# We first shift the value by 4 bits, to put the high int4 in low bits.
# The prmt then blends the two values together, by putting them into the
# low bits of each 16-bit subword of our register. Then, we use the lop3
# to zero any bits that don't belong to our int4s, and finally use the
# XOR to: (1) set the exponent bits to 0x43 (at which point the mantissa
# represents integer increments) and (2) flip the sign bit. If we
# interpret the 4 bits as uint4 after the flip, then we'll see that
# positive int4s will end up larger than negative int4s, with a bias of
# 8. Use use the sub to subtract the base (our initial exponent) and the
# bias coming from flipping the sign bit which is 136 (0x4308 as bits).
def upcast_i4_to_bf16(reg: ir.Value, reg_shr: ir.Value, part: int):
assert 0 <= part < 4
int_reg = llvm.inline_asm(
i32,
[reg, reg_shr],
f"""
{{
.reg .b32 s<4>;
prmt.b32 s1, $1, $2, 0xF{part + 4}F{part};
lop3.b32 s2, s1, 0x000F000F, 0x43084308, (0xf0 & 0xcc) ^ 0xaa;
mov.b32 s3, 0x43084308;
sub.bf16x2 $0, s2, s3;
}}
""",
"=r,r,r",
)
return utils.bitcast(int_reg, ir.VectorType.get((2,), bf16))
[group_size] = ir.VectorType(reg.type).shape
assert group_size % vector_len == 0
assert group_size * 4 <= 32
int_ty = ir.IntegerType.get_signless(group_size * 4)
# If the vector originates from a slice (common after relayouts), we
# can fuse the slicing into the conversion and prevent LLVM from
# generating a bunch of shifts to align the vector data to the LSB.
# This also lets us share the right shift among more vectors.
out_int_regs: list[ir.Value] = []
if regs_from_32bit_slice:
slice_op = reg.owner.opview
slice_offset = slice_op.offsets[0].value
reg_int = utils.bitcast(slice_op.source, i32)
reg_int_shr = arith.shrui(reg_int, c(4, i32))
assert slice_offset % 2 == 0
out_int_regs.extend(
upcast_i4_to_bf16(reg_int, reg_int_shr, part=slice_offset // 2 + part)
for part in range(group_size // 2)
)
else:
reg_slice_int = utils.bitcast(reg, int_ty)
if int_ty != i32:
reg_slice_int = arith.extsi(i32, reg_slice_int)
reg_slice_int_shr = arith.shrui(reg_slice_int, c(4, i32))
out_int_regs.extend(
upcast_i4_to_bf16(reg_slice_int, reg_slice_int_shr, part=part)
for part in range(group_size // 2)
)
out_reg = utils.vector_concat(out_int_regs)
offset = 0
for idx in indices:
new_registers[idx] = new_reg = utils.vector_slice(
out_reg, slice(offset, offset + vector_len)
)
offset += vector_len
assert new_reg.type == out_vec_ty
return FragmentedArray(
_registers=new_registers, _layout=self.layout, _is_signed=None
)
if cur_dtype == i4 and self.is_signed and new_dtype == i8 and is_signed:
new_registers = np.empty_like(self.registers)
out_vec_ty = ir.VectorType.get((vector_len,), new_dtype)
for indices, reg in packed_registers(8, if_not_sliced=True):
def upcast_i4_to_i8(reg: ir.Value, first_valid_nibble: int = 0):
# When first_valid_nibble is >0, then only the nibbles in the range
# [first_valid_nibble, 8) will be upcast and placed in the low
# elements of the output vector. All high entries are undefined.
assert first_valid_nibble % 2 == 0
low_prmt = "".join(str(min(first_valid_nibble // 2 + i, 7)) for i in [5, 1, 4, 0])
high_prmt = "".join(str(min(first_valid_nibble // 2 + i, 7)) for i in [7, 3, 6, 2])
# Note: (0xf0 & 0xaa) | (0xcc & ~0xaa) = 0xe4. lop3 acts as a blend.
# Below xN means the value of nibble N, sN means that all 4 bits are
# equal to the sign bit of nibble N, and 00 means an all 0 nibble.
out_struct = llvm.inline_asm(
ir.Type.parse("!llvm.struct<(i32, i32)>"),
[reg],
f"""
{{
.reg .b32 high_even; // $2 is high_odd
.reg .b32 low_odd; // $2 is low_even
.reg .b32 sign_even, sign_odd;
.reg .b32 i8_odd, i8_even;
shl.b32 high_even, $2, 4; // x6x5x4x3x2x1x000
prmt.b32 sign_even, high_even, high_even, 0xba98; // s6s6s4s4s2s2s0s0
prmt.b32 sign_odd, $2, $2, 0xba98; // s7s7s5s5s3s3s1s1
shr.u32 low_odd, $2, 4; // 00x7x6x5x4x3x2x1
lop3.b32 i8_odd, sign_odd, low_odd, 0xf0f0f0f0, 0xe4; // s7x7s5x5s3x3s1x1
lop3.b32 i8_even, sign_even, $2, 0xf0f0f0f0, 0xe4; // s6x6s4x4s2x2s0x0
prmt.b32 $0, i8_even, i8_odd, 0x{low_prmt}; // s3x3s2x2s1x2s0x0
prmt.b32 $1, i8_even, i8_odd, 0x{high_prmt}; // s7x7s6x5s4x4s3x3
}}
""",
"=r,=r,r",
)
i8_vec = ir.VectorType.get((4,), i8)
return utils.vector_concat([
utils.bitcast(llvm.extractvalue(i32, out_struct, (i,)), i8_vec)
for i in range(2)
])
[group_size] = ir.VectorType(reg.type).shape
assert group_size % vector_len == 0
assert group_size * 4 <= 32
int_ty = ir.IntegerType.get_signless(group_size * 4)
if regs_from_32bit_slice:
slice_op = reg.owner.opview
slice_offset = slice_op.offsets[0].value
reg_int = utils.bitcast(slice_op.source, i32)
reg_i8 = upcast_i4_to_i8(reg_int, first_valid_nibble=slice_offset)
else:
reg_slice_int = utils.bitcast(reg, int_ty)
if int_ty != i32:
reg_slice_int = arith.extsi(i32, reg_slice_int)
reg_i8 = upcast_i4_to_i8(reg_slice_int)
# distribute packed registers to original indices
offset = 0
for idx in indices:
new_registers[idx] = new_reg = utils.vector_slice(
reg_i8, slice(offset, offset + vector_len)
)
offset += vector_len
assert new_reg.type == out_vec_ty
return FragmentedArray(
_registers=new_registers, _layout=self.layout, _is_signed=is_signed
)
if cur_dtype == i8 and self.is_signed and new_dtype == bf16 and vector_len in {2, 4}:
new_registers = np.empty_like(self.registers)
def upcast_i8_to_bf16(reg, high):
# We first embed the s8 into a bf16 with the exponent equal to
# bias + mantissa bits. Then, we zero the msb that didn't fit into the
# mantissa, zero out all bits other than msb, and subtract the last
# two values from each other. This takes advantage of the fact that the
# lsb of the exponent (msb of the second byte) is zero, which allows us
# to losslesly pack the msb there. When 1, it doubles the value of s2,
# making the result negative.
return llvm.inline_asm(
i32,
[reg],
f"""
{{
.reg .b32 s<3>;
prmt.b32 s0, $1, 0x43, {0x4342 if high else 0x4140};
and.b32 s1, s0, 0xff7fff7f;
and.b32 s2, s0, 0xff80ff80;
sub.bf16x2 $0, s1, s2;
}}
""",
"=r,r",
)
empty_vec_32 = llvm.mlir_undef(ir.VectorType.get((vector_len // 2,), i32))
pad_vec_16 = llvm.mlir_undef(ir.VectorType.get((1,), i16))
for idx, reg in np.ndenumerate(self.registers):
if vector_len == 2:
reg_16 = vector.bitcast(ir.VectorType.get((1,), i16), reg)
reg_32 = utils.vector_concat([reg_16, pad_vec_16])
new_reg_32 = upcast_i8_to_bf16(reg_32, high=False)
new_vec_32 = llvm.insertelement(empty_vec_32, new_reg_32, c(0, i32))
elif vector_len == 4:
reg_32 = vector.bitcast(ir.VectorType.get((1,), i32), reg)
low = upcast_i8_to_bf16(reg_32, high=False)
high = upcast_i8_to_bf16(reg_32, high=True)
new_vec_32 = llvm.insertelement(empty_vec_32, low, c(0, i32))
new_vec_32 = llvm.insertelement(new_vec_32, high, c(1, i32))
else:
raise NotImplementedError(vector_len)
new_registers[idx] = vector.bitcast(
ir.VectorType.get((vector_len,), new_dtype), new_vec_32
)
return FragmentedArray(
_registers=new_registers, _layout=self.layout, _is_signed=is_signed
)
# TODO(bchetioui): handle conversions to/from other float8 types.
if cur_dtype in {bf16, f32} and new_dtype == f8e4m3fn:
if vector_len != 2:
raise NotImplementedError(vector_len)
new_registers = np.empty_like(self.registers)
empty_vec_16 = llvm.mlir_undef(ir.VectorType.get((1,), i16))
for idx, reg in np.ndenumerate(self.registers):
e0 = vector.extract(
reg,
dynamic_position=[],
static_position=ir.DenseI64ArrayAttr.get([0]),
)
e1 = vector.extract(
reg,
dynamic_position=[],
static_position=ir.DenseI64ArrayAttr.get([1]),
)
# TODO(bchetioui): can we do faster than this?
if cur_dtype == bf16:
e0 = arith.extf(f32, e0)
e1 = arith.extf(f32, e1)
new_reg_16 = llvm.inline_asm(
i16,
[e1, e0],
"cvt.rn.satfinite.e4m3x2.f32 $0, $1, $2;",
"=h,f,f",
)
new_registers[idx] = vector.bitcast(
ir.VectorType.get((2,), f8e4m3fn),
llvm.insertelement(empty_vec_16, new_reg_16, c(0, i32)))
return FragmentedArray(
_registers=new_registers, _layout=self.layout, _is_signed=is_signed
)
# Generic path.
from_float = ir.FloatType.isinstance(cur_dtype)
to_float = ir.FloatType.isinstance(new_dtype)
from_integer = ir.IntegerType.isinstance(cur_dtype)
to_integer = ir.IntegerType.isinstance(new_dtype)
if from_float and to_float:
cur_ty_width = ir.FloatType(cur_dtype).width
new_ty_width = ir.FloatType(new_dtype).width
if cur_ty_width == new_ty_width:
# There is no instruction to perform conversions between two float types
# of the same width. Go through the next-larger standard type.
# TODO(bchetioui): support conversions between float types of width 8.
# Which larger type to pick will depend on the number of bits in the
# smallest exponent.
if cur_ty_width != 16:
raise NotImplementedError(
"Conversion between float types of width other than 16 not"
" supported"
)
larger_ty = ir.F32Type.get()
match self.layout:
case WGStridedFragLayout() | TiledLayout():
shape = ir.VectorType(self.registers.flat[0].type).shape
upcast_ty = ir.VectorType.get(shape, larger_ty)
case WGSplatFragLayout():
upcast_ty = larger_ty
case _:
raise NotImplementedError(f"Unsupported layout {self.layout}")
convert = lambda ty, x: arith.truncf(ty, arith.extf(upcast_ty, x))
elif ir.FloatType(cur_dtype).width > ir.FloatType(new_dtype).width:
convert = arith.truncf
else:
convert = arith.extf
elif from_integer and to_integer:
if ir.IntegerType(cur_dtype).width > ir.IntegerType(new_dtype).width:
convert = arith.trunci
else:
convert = arith.extsi if self.is_signed else arith.extui
elif from_integer and to_float:
convert = arith.sitofp if self.is_signed else arith.uitofp
elif from_float and to_integer:
convert = arith.fptosi if is_signed else arith.fptoui
else:
raise NotImplementedError(f"Unsupported conversion {cur_dtype} -> {new_dtype}")
new_registers = np.empty_like(self.registers)
match self.layout:
case WGStridedFragLayout() | TiledLayout():
shape = ir.VectorType(self.registers.flat[0].type).shape
new_reg_ty = ir.VectorType.get(shape, new_dtype)
case WGSplatFragLayout():
new_reg_ty = new_dtype
case _:
raise NotImplementedError(f"Unsupported layout {self.layout}")
for idx, reg in np.ndenumerate(self.registers):
new_registers[idx] = convert(new_reg_ty, reg)
return FragmentedArray(
_registers=new_registers, _layout=self.layout, _is_signed=is_signed
)
def reduce(
self,
op: str | Callable[[ir.Value, ir.Value], ir.Value],
axis: int | Sequence[int],
scratch: ir.Value | None = None,
) -> FragmentedArray:
i32 = ir.IntegerType.get_signless(32)
if isinstance(axis, int):
axis = (axis,)
splat_op = None
if isinstance(op, str):
match op:
case "add":
reduced_elems = math.prod(self.shape[a] for a in axis)
if ir.FloatType.isinstance(self.mlir_dtype):
op = addf
splat_op = lambda x: arith.mulf(x, c(reduced_elems, x.type))
elif ir.IntegerType.isinstance(self.mlir_dtype):
op = arith.addi
splat_op = lambda x: arith.muli(x, c(reduced_elems, x.type))
else:
raise NotImplementedError(self.mlir_dtype)
case "max":
if ir.F32Type.isinstance(self.mlir_dtype):
op = self._lift_fast_instr("max.NaN.f32")
elif ir.FloatType.isinstance(self.mlir_dtype):
op = arith.maximumf
elif ir.IntegerType.isinstance(self.mlir_dtype):
op = arith.maxsi if self.is_signed else arith.maxui
else:
raise NotImplementedError(self.mlir_dtype)
splat_op = lambda x: x
case _:
raise ValueError(f"Unrecognized reduction operator: {op}")
assert not isinstance(op, str)
match self.layout:
case WGStridedFragLayout(shape=_, vec_size=vec_size):
if set(axis) != set(range(len(self.shape))):
raise NotImplementedError(
"Warpgroup strided layout only support reductions along all axes"
)
# We reinterpret the data as a tiled layout. We're reducing it all anyway.
layout = TiledLayout(
tiling=Tiling(((128 * vec_size,), (32 * vec_size,), (vec_size,))),
warp_dims=(-3,),
lane_dims=(-2,),
vector_dim=-1,
)
return FragmentedArray(
_registers=self.registers.reshape(
layout.registers_shape((math.prod(self.shape),))
),
_layout=layout,
_is_signed=self.is_signed,
).reduce(op, 0, scratch)
case WGSplatFragLayout():
if splat_op is None:
raise NotImplementedError(
"Splat reductions only supported when the operator is a string"
)
assert not self.registers.shape
return FragmentedArray(
_registers=np.asarray(
splat_op(self.registers.item()), dtype=object
),
_layout=WGSplatFragLayout(
tuple(d for a, d in enumerate(self.shape) if a not in axis)
),
_is_signed=self.is_signed,
)
case TiledLayout():
pass
case _:
raise NotImplementedError(self.layout)
if len(self.layout.base_tile_shape) != len(self.shape):
raise NotImplementedError
if isinstance(axis, int):
axis = (axis,)
layout = self.layout
tiled_tiling_shape = layout.tiled_tiling_shape
reduced_dims = layout.tiling.tile_dimension(axis[0])
for a in axis[1:]:
reduced_dims = tuple(
r or d for r, d in zip(reduced_dims, layout.tiling.tile_dimension(a), strict=True)
)
regs_shape = self.registers.shape
reduced_shape = tuple(
d if r else 1 for r, d in zip(reduced_dims, regs_shape, strict=True)
)
remaining_shape = tuple(
1 if r else d for r, d in zip(reduced_dims, regs_shape)
)
out_regs = np.empty(remaining_shape, dtype=object)
index = ir.IndexType.get()
for out_idx in np.ndindex(remaining_shape):
out_reg = None
for red_idx in np.ndindex(reduced_shape):
src_idx = tuple(o + r for o, r in zip(out_idx, red_idx))
if out_reg is None:
out_reg = self.registers[src_idx]
else:
out_reg = op(out_reg, self.registers[src_idx])
assert out_reg is not None
# Reduce within the vector dimension, if necessary.
if reduced_dims[layout.vector_dim]:
[vec_len] = ir.VectorType(out_reg.type).shape
scalar_out_reg = None
for i in range(vec_len):
scalar = vector.extract(
out_reg,
dynamic_position=[],
static_position=ir.DenseI64ArrayAttr.get([i]),
)
scalar_out_reg = (
scalar if scalar_out_reg is None else op(scalar_out_reg, scalar)
)
out_reg = vector.broadcast(
ir.VectorType.get((1,), out_reg.type.element_type), scalar_out_reg
)
# Reduce across warp lanes, if necessary (using warp shuffles).
if any(reduced_dims[d] for d in layout.partitioned_lane_dims):
lane_stride = 1
for d in layout.lane_dims[::-1]: # Iterate minor-to-major
if isinstance(d, Replicated):
lane_stride *= d.times
elif not reduced_dims[d]:
lane_stride *= tiled_tiling_shape[d]
else:
assert lane_stride.bit_count() == 1
reduction_size = tiled_tiling_shape[d]
while reduction_size > 1:
other_out_reg = utils.shfl_bfly(out_reg, lane_stride)
out_reg = op(out_reg, other_out_reg)
lane_stride *= 2
reduction_size //= 2
assert lane_stride == WARP_SIZE, lane_stride
# Reduce across warps in the warpgroup, if necessary.
if any(reduced_dims[d] for d in layout.partitioned_warp_dims):
if scratch is None:
raise ValueError(
"scratch must be provided when cross-warp reduction is required"
)
[vec_len] = ir.VectorType(out_reg.type).shape
scratch_ty = ir.MemRefType(scratch.type)
if scratch_ty.rank != 1:
raise ValueError(f"Expected rank 1 for scratch, got {scratch_ty.rank}")
if scratch_ty.element_type != self.mlir_dtype:
raise ValueError(
f"Expected element type {self.mlir_dtype} for scratch, got"
f" {scratch_ty.element_type}"
)
# TODO(apaszke): All lanes that replicate data can share the same scratch.
# For now we treat the complete reduction as a special case.
reduces_all_dims = set(axis) == set(range(len(self.shape)))
unique_lanes = 1 if reduces_all_dims else 32
if scratch_ty.shape[0] < WARPS_IN_WARPGROUP * unique_lanes * vec_len:
raise ValueError("Insufficient scratch space for cross-warp reduction")
if scratch_ty.get_strides_and_offset()[0] != [1]:
raise ValueError("Expected scratch to be contiguous")
thread_idx = utils.thread_idx()
if reduces_all_dims:
lane_idx = c(0, i32)
else:
lane_idx = arith.remui(thread_idx, c(WARP_SIZE, i32))
warp_idx = arith.divui(
arith.remui(thread_idx, c(WARPGROUP_SIZE, i32)), c(WARP_SIZE, i32)
)
spill_base = arith.muli(lane_idx, c(WARPS_IN_WARPGROUP, i32))
store_idx = arith.index_cast(index, arith.addi(spill_base, warp_idx))
vector.store(
out_reg, scratch, [arith.muli(store_idx, c(vec_len, index))]
)
utils.warpgroup_barrier()
# warp_idx & warp_group_mask gives you the reduction group of the current warp.
if all(isinstance(d, int) and reduced_dims[d] for d in layout.warp_dims):
warp_offsets, warp_group_mask = [*range(WARPS_IN_WARPGROUP)], 0
else:
# 4 has only two non-trivial prime factors: 2 and 2.
assert len(layout.warp_dims) == 2
wd0, wd1 = layout.warp_dims
if isinstance(wd0, int) and reduced_dims[wd0]:
warp_offsets, warp_group_mask = [0, 2], 1
else:
assert isinstance(wd1, int) and reduced_dims[wd1]
warp_offsets, warp_group_mask = [0, 1], 2
reg_ty = out_reg.type
out_reg = None
warp_reduction_group = arith.andi(warp_idx, arith.constant(i32, warp_group_mask))
for warp_offset in warp_offsets:
reduced_warp = arith.addi(warp_reduction_group, c(warp_offset, i32))
load_idx = arith.index_cast(
index,
arith.muli(arith.addi(spill_base, reduced_warp), c(vec_len, i32)),
)
part = vector.load(reg_ty, scratch, [load_idx])
out_reg = part if out_reg is None else op(out_reg, part)
utils.warpgroup_barrier() # Make sure everyone is done using scratch.
out_regs[out_idx] = out_reg
# Infer the output layout and reshape the registers accordingly.
reduced_logical_shape = list(self.shape)
for a in sorted(axis, reverse=True):
del reduced_logical_shape[a]
if not reduced_logical_shape: # Complete reduction results in a splat.
reduced_layout: FragmentedLayout = WGSplatFragLayout(())
assert out_regs.size == 1
out_reg = out_regs.flat[0]
assert ir.VectorType(out_reg.type).shape == [1]
out_reg = vector.extract(
out_reg,
dynamic_position=[],
static_position=ir.DenseI64ArrayAttr.get([0]),
)
out_regs = np.asarray(out_reg, dtype=object)
else:
reduced_layout = layout.reduce(axis)
out_regs = out_regs.reshape(
reduced_layout.registers_shape(tuple(reduced_logical_shape))
)
return FragmentedArray(
_registers=out_regs, _layout=reduced_layout, _is_signed=self.is_signed
)
def broadcast(self, shape) -> FragmentedArray:
if not isinstance(self.layout, WGSplatFragLayout):
raise NotImplementedError(self.layout)
if self.shape == shape:
return self
if not self.layout.can_broadcast_to(shape):
raise ValueError(f"Can't broadcast {self.shape} to {shape}")
return FragmentedArray(
_registers=self.registers,
_layout=WGSplatFragLayout(shape),
_is_signed=self.is_signed,
)
def reshape(self, shape) -> FragmentedArray:
if self.shape == shape:
return self
if math.prod(shape) != math.prod(self.shape):
raise ValueError(f"Can't reshape {self.shape} to {shape}")
match self.layout:
case WGSplatFragLayout() | WGStridedFragLayout():
new_layout = dataclasses.replace(self.layout, shape=shape)
case _:
raise NotImplementedError(self.layout)
return FragmentedArray(
_registers=self.registers, _layout=new_layout, _is_signed=self.is_signed
)
def broadcast_minor(self, n) -> FragmentedArray:
if len(self.shape) != 1:
raise ValueError("Broadcast minor is only supported for 1D arrays")
if n % 8:
raise ValueError(f"The broadcast dimension must be a multiple of 8, got {n}")
if self.layout == WGMMA_ROW_LAYOUT:
new_layout = WGMMA_LAYOUT
elif self.layout == TCGEN05_ROW_LAYOUT:
new_layout = TCGEN05_LAYOUT
else:
raise NotImplementedError(self.layout)
return self.broadcast_in_dim((self.shape[0], n), (0,), new_layout)
def broadcast_in_dim(
self, shape, source_dimensions, layout: FragmentedLayout
) -> FragmentedArray:
for i, target_dim in enumerate(source_dimensions):
if self.shape[i] != shape[target_dim]:
raise ValueError(
f"Dimension {i} has size {self.shape[i]} in source shape and"
f" {shape[target_dim]} in shape after broadcast"
)
if isinstance(self.layout, WGSplatFragLayout):
if isinstance(layout, WGSplatFragLayout):
if layout.shape != shape:
raise ValueError(
f"Layout shape {layout.shape} does not match broadcast shape {shape}"
)
return FragmentedArray(
_registers=self.registers, _layout=layout, _is_signed=self.is_signed,
)
# TODO: Support splat to other layouts
if not isinstance(self.layout, TiledLayout) or not isinstance(layout, TiledLayout):
raise NotImplementedError(self.layout, layout)
if any(d1 >= d2 for d1, d2 in zip(source_dimensions, source_dimensions[1:])):
raise NotImplementedError("source_dimensions must be strictly increasing")
if len(layout.base_tile_shape) != len(shape):
raise NotImplementedError("Tiling rank different than broadcast result rank")
new_dimensions = sorted(set(range(len(shape))) - set(source_dimensions))
expected_layout = layout.reduce(new_dimensions)
if expected_layout != self.layout:
raise ValueError(
"Source and destination layouts aren't compatible for a broadcast"
)
new_registers_shape = layout.registers_shape(shape)
pre_broadcast_registers_shape = list(new_registers_shape)
for new_dim in new_dimensions:
for i, is_new in enumerate(layout.tiling.tile_dimension(new_dim)):
if is_new:
pre_broadcast_registers_shape[i] = 1
# The broadcast for all dims but the vector_dim amounts to repeating the
# registers along the new dimensions. Along the vector_dim, we actually need
# to extend the vector length to change the type of the registers.
if layout.vector_length != self.layout.vector_length:
assert self.layout.vector_length == 1
registers = np.empty_like(self.registers)
for idx, reg in np.ndenumerate(self.registers):
registers[idx] = utils.vector_concat([reg] * layout.vector_length)
else:
registers = self.registers
new_registers = np.broadcast_to(
registers.reshape(pre_broadcast_registers_shape), new_registers_shape,
)
return FragmentedArray(
_registers=new_registers, _layout=layout, _is_signed=self.is_signed,
)
def select(self, on_true, on_false):
if (
not ir.IntegerType.isinstance(self.mlir_dtype)
or ir.IntegerType(self.mlir_dtype).width != 1
):
raise NotImplementedError
# We change the receiver here, because the return type is defined by
# `on_true` and `on_false` and not the predicate `self`.
return on_true._pointwise(
lambda t, p, f: arith.select(p, t, f), self, on_false,
)
@classmethod
def build(
cls,
shape: tuple[int, ...],
layout: FragmentedLayout,
fn: Callable[..., ir.Value], # ir.Value varargs, one for each dim
*,
is_signed: bool | None = None,
) -> FragmentedArray:
undef = llvm.mlir_undef(ir.IntegerType.get_signless(32))
dummy = cls.splat(undef, shape, layout, is_signed=False)
return dummy.foreach(
lambda _, idx: fn(*idx), create_array=True, is_signed=is_signed
)
def foreach(
self,
fn: Callable[[ir.Value, tuple[ir.Value, ...]], ir.Value | None],
*,
create_array=False,
is_signed=None,
):
"""Call a function for each value and index."""
index = ir.IndexType.get()
new_regs = None
orig_fn = fn
del fn
def wrapped_fn(*args):
nonlocal new_regs
result = orig_fn(*args)
old_reg_type = self.registers.flat[0].type
# Lazily create new_regs once we know the desired output type.
if create_array and new_regs is None:
assert result is not None
if ir.VectorType.isinstance(old_reg_type):
new_reg_type = ir.VectorType.get(old_reg_type.shape, result.type)
else:
new_reg_type = result.type
new_regs = np.full_like(self.registers, llvm.mlir_undef(new_reg_type))
return result
for mlir_idx, reg_idx in zip(self.layout.thread_idxs(self.shape), np.ndindex(self.registers.shape), strict=True):
reg = self.registers[reg_idx]
assert len(mlir_idx) == len(self.shape), (mlir_idx, self.shape)
if ir.VectorType.isinstance(reg.type):
[elems] = ir.VectorType(reg.type).shape
for i in range(elems):
c_i = c(i, index)
val = wrapped_fn(
vector.extract(
reg,
dynamic_position=[],
static_position=ir.DenseI64ArrayAttr.get([i]),
),
(*mlir_idx[:-1], arith.addi(mlir_idx[-1], c_i)),
)
if create_array:
assert new_regs is not None
new_regs[reg_idx] = vector.insert(
val,
new_regs[reg_idx],
dynamic_position=[],
static_position=ir.DenseI64ArrayAttr.get([i]),
)
else:
val = wrapped_fn(reg, mlir_idx)
if create_array:
assert new_regs is not None
new_regs[reg_idx] = val
if create_array:
assert new_regs is not None
return FragmentedArray(_registers=new_regs, _layout=self.layout, _is_signed=is_signed)
def debug_print(self, fmt: str) -> None:
idx_fmt = ", ".join(["{}"] * len(self.shape))
@self.foreach
def _(val, idx):
fmt_str = fmt.format(f"[{idx_fmt}]: {{}}")
utils.debug_print(fmt_str, *idx, val, uniform=False)
def store_untiled(
self, ref: ir.Value | utils.MultimemRef, *, swizzle: int = 16, optimized: bool = True
) -> None:
if not ir.MemRefType.isinstance(ref.type):
raise ValueError(ref)
match self.layout:
case WGSplatFragLayout():
if isinstance(ref, utils.MultimemRef):
raise NotImplementedError("Splat layout does not support multimem")
# All values are the same so swizzle does not affect anything here.
self._store_untiled_splat(ref)
case WGStridedFragLayout():
if swizzle != 16:
raise ValueError("Only TiledLayouts support swizzling")
assert isinstance(self.layout, WGStridedFragLayout)
for get, _update, ref, idx in self.transfer_strided(ref, self.layout.vec_size):
if isinstance(ref, utils.MultimemRef):
ref.store(get(self.registers), idx)
else:
vector.store(get(self.registers), ref, idx)
case TiledLayout():
ref_shape = ir.MemRefType(ref.type).shape
ref = utils.memref_reshape(ref, (*(1 for _ in ref_shape), *ref_shape))
self.store_tiled(ref, swizzle=swizzle, optimized=optimized)
case _:
raise NotImplementedError(self.layout)
@classmethod
def load_reduce_untiled(
cls,
ref: utils.MultimemRef,
layout: TiledLayout | WGStridedFragLayout,
reduction: utils.MultimemReductionOp,
swizzle: int = 16,
is_signed: bool | None = None,
):
ref_ty = ir.MemRefType(ref.type)
shape = tuple(ref_ty.shape)
if isinstance(layout, WGStridedFragLayout):
if swizzle != 16:
raise ValueError("Only TiledLayouts support swizzling")
registers = np.empty(layout.registers_shape(shape), dtype=object)
vec_ty = ir.VectorType.get((layout.vec_size,), ref_ty.element_type)
for _get, update, ref, idx in cls.transfer_strided(ref, layout.vec_size):
ptr = utils.memref_ptr(utils.memref_slice(ref.ref, tuple(idx)))
update(registers, utils.multimem_load_reduce(vec_ty, ptr, reduction, is_signed))
return cls(_registers=registers, _layout=layout, _is_signed=is_signed)
ref = utils.memref_reshape(ref, (*(1 for _ in shape), *shape))
return cls.load_tiled(
ref.ref,
swizzle=swizzle,
is_signed=is_signed,
layout=layout,
optimized=False, # multimem refs are always GMEM refs.
_load_fun=functools.partial(
utils.multimem_load_reduce, reduction=reduction, is_signed=is_signed
),
# multimem_load_reduce supports vectors of narrow floats, so we don't
# need to do any casting.
_narrow_float_as_int=False,
)
@classmethod
def load_untiled(
cls,
ref: ir.Value,
*,
layout: TiledLayout,
swizzle: int = 16,
is_signed: bool | None = None,
optimized: bool = True,
) -> FragmentedArray:
ref_ty = ir.MemRefType(ref.type)
ref = utils.memref_reshape(ref, (*(1 for _ in ref_ty.shape), *ref_ty.shape))
return cls.load_tiled(
ref, swizzle=swizzle, is_signed=is_signed, layout=layout, optimized=optimized
)
def _store_untiled_splat(self, ref: ir.Value):
if math.prod(self.shape) == 1:
c0 = c(0, ir.IndexType.get())
memref.store(
self.registers.flat[0], ref, [c0] * len(ir.MemRefType(ref.type).shape)
)
return
vec_size = 64 // mgpu.bitwidth(self.mlir_dtype)
if np.prod(self.shape) < vec_size * WARPGROUP_SIZE:
vec_size = 1
if np.prod(self.shape) % WARPGROUP_SIZE * vec_size:
raise NotImplementedError(
"Arrays with the splat layout can only be stored when they have a"
f" single element or a multiple of {WARPGROUP_SIZE} elements"
)
fa = FragmentedArray.splat(
self.registers.flat[0],
self.shape,
layout=WGStridedFragLayout(shape=self.shape, vec_size=vec_size),
is_signed=self.is_signed,
)
fa.store_untiled(ref)
def store_tiled(self, ref: ir.Value | utils.MultimemRef, swizzle: int | None, optimized: bool = True):
if not isinstance(self.layout, TiledLayout):
raise NotImplementedError(self.layout)
layout, shape = self.layout, self.shape
# Note that the loop below will "race" for layouts that replicate data.
# However, in that case all of the racing writes store the same data, which
# is ok in the CUDA memory model.
if isinstance(ref, utils.MultimemRef):
stores = self.transfer_tiled(ref.ref, swizzle, layout, shape, optimized)
for get, _update, _idx, ptr in stores:
utils.multimem_store(ptr, get(self.registers))
else:
stores = self.transfer_tiled(ref, swizzle, layout, shape, optimized)
for get, _update, _idx, ptr in stores:
reg = get(self.registers)
reg_ty = ir.VectorType(reg.type)
element_bitwidth = utils.bitwidth(reg_ty.element_type)
if ir.FloatType.isinstance(reg_ty.element_type) and element_bitwidth <= 8:
narrow_int = ir.IntegerType.get_signless(element_bitwidth)
reg = vector.bitcast(ir.VectorType.get(reg_ty.shape, narrow_int), reg)
llvm.store(reg, ptr)
@classmethod
def load_tiled(
cls,
ref,
swizzle: int | None,
*,
is_signed: bool | None = None,
layout: FragmentedLayout = WGMMA_LAYOUT,
optimized: bool = True,
_load_fun: Callable[[ir.VectorType, ir.Value], ir.Value] = llvm.load,
_narrow_float_as_int: bool = True,
) -> FragmentedArray:
if not isinstance(layout, TiledLayout):
raise NotImplementedError(layout)
ref_ty = ir.MemRefType(ref.type)
dtype = ref_ty.element_type
tiled_shape = ref_ty.shape
if len(tiled_shape) % 2:
raise ValueError("Tiled reference must have even rank")
if len(tiled_shape) < 2:
raise ValueError("Tiled reference must have at least two dimensions")
tiling = Tiling((tiled_shape[len(tiled_shape) // 2 :],))
shape = tiling.untile_shape(tiled_shape)
reg_ty = ir.VectorType.get((layout.vector_length,), dtype)
zero = vector.broadcast(reg_ty, c(0, dtype))
registers = np.full(layout.registers_shape(shape), zero, dtype=object)
is_narrow_float = ir.FloatType.isinstance(dtype) and utils.bitwidth(dtype) <= 8
narrow_int = ir.IntegerType.get_signless(utils.bitwidth(dtype))
# Narrow floats are not supported by LLVM, so we need to transfer them as
# narrow ints and bitcast back to the desired type.
transfer_ty = ir.VectorType.get(
(layout.vector_length,),
narrow_int if is_narrow_float and _narrow_float_as_int else dtype
)
loads = cls.transfer_tiled(ref, swizzle, layout, shape, optimized)
for _get, update, _idx, ptr in loads:
loaded_reg = _load_fun(transfer_ty, ptr)
if is_narrow_float and _narrow_float_as_int:
loaded_reg = vector.bitcast(reg_ty, loaded_reg)
update(registers, loaded_reg)
return cls(_registers=registers, _layout=layout, _is_signed=is_signed)
@classmethod
def transfer_strided(self, ref: ir.Value, vec_size: int):
ref_ty = ir.MemRefType(ref.type)
layout = WGStridedFragLayout(shape=tuple(ref_ty.shape), vec_size=vec_size)
try:
# Flattening the reference potentially produces simpler PTX but
# if the ref is not already 1D and has strided dimensions
# flattening won't work.
ref = mgpu.memref_fold(ref, 0, len(ref_ty.shape))
except ValueError:
if vec_size > 1:
ref_ty = ir.MemRefType(ref.type)
shape = ref_ty.shape
strides, _ = ref_ty.get_strides_and_offset()
# Try to fold contiguous dimension pairs.
for i in reversed(range(len(shape) - 1)):
if strides[i] == shape[i+1] * strides[i+1]:
ref = mgpu.memref_fold(ref, i, 2)
ref_ty = ir.MemRefType(ref.type)
shape = ref_ty.shape
strides, _ = ref_ty.get_strides_and_offset()
has_contiguous_dim = False
for size, stride in zip(shape, strides):
if stride == 1:
has_contiguous_dim = True
if size % vec_size != 0:
raise ValueError(
"The contiguous dimension of the reference must be a"
f" multiple of the layout's vector size (got {size} and"
f" vector size {vec_size})"
) from None
elif size > 1:
if stride % vec_size != 0:
raise ValueError(
"Non-contiguous dimension of the reference must have strides"
" that are multiples of the layout's vector size (got"
f" {stride} and vector size {vec_size})"
) from None
if not has_contiguous_dim:
raise ValueError(
"The reference must have a contiguous dimension when vec_size > 1"
)
layout = WGStridedFragLayout(shape=tuple(ref_ty.shape), vec_size=vec_size)
idx_gen = layout.thread_idxs(tuple(ref_ty.shape))
else:
idx_gen = map(lambda x: [x], layout.linear_thread_idxs())
for i, vec_idx in enumerate(idx_gen):
def update(registers, reg, _i=i):
registers[_i] = reg
def get(registers, _i=i):
return registers[_i]
yield get, update, ref, vec_idx
@staticmethod
def transfer_tiled(
ref: ir.Value,
swizzle: int | None,
layout: TiledLayout,
shape: tuple[int, ...],
optimized: bool = True,
):
"""Generate a transfer schedule for a tiled layout.
Given a ref with one level tiling applied to it (we assume all dimensions
have been tiled), this function generates an iterable describing a good
schedule for swizzled SMEM loads/stores.
At each step, the iterable yields a tuple of three values:
* a function that takes a register array and returns the register to be
stored at the current address
* a function that takes a register array and a register loaded from the
current address, and updates the register array with that register
* the current address for load/store instructions
"""
# TODO(apaszke): Use ldmatrix/stmatrix when possible.
c = lambda x: arith.constant(ir.IntegerType.get_signless(32), x)
tiling = layout.tiling
ref_ty = ir.MemRefType(ref.type)
dtype = ref_ty.element_type
if ref_ty.rank % 2:
raise ValueError("Tiled reference must have even rank")
ref_logical_rank = ref_ty.rank // 2
ref_tiling_shape = tuple(ref_ty.shape[ref_logical_rank:])
ref_tiling = Tiling((ref_tiling_shape,))
ref_strides, _ = ref_ty.get_strides_and_offset()
if (ref_logical_shape := ref_tiling.untile_shape(tuple(ref_ty.shape))) != shape:
raise ValueError(
f"The reference has untiled shape of {ref_logical_shape} while the"
f" register array has shape {shape}"
)
nested_ref_shape = tuple(
(ref_ty.shape[i], ref_ty.shape[i + ref_logical_rank])
if ref_ty.shape[i + ref_logical_rank] != 1 else (ref_ty.shape[i],)
for i in range(ref_logical_rank)
)
nested_ref_strides = tuple(
(ref_strides[i], ref_strides[i + ref_logical_rank])
if ref_ty.shape[i + ref_logical_rank] != 1 else (ref_strides[i],)
for i in range(ref_logical_rank)
)
tiled_nested_shape, tiled_nested_strides = tiling.tile_nested_shape_strides(
nested_ref_shape, nested_ref_strides
)
# Not sure if this is strictly required for all data types, but it certainly
# is for sub-byte types (else we might not increment the pointer by whole bytes).
if any(
any(s % layout.vector_length and d != 1 for s, d in zip(ss, ds))
for i, (ss, ds) in enumerate_negative(list(zip(tiled_nested_strides, tiled_nested_shape)))
if i != layout.vector_dim
):
raise ValueError(
"Tiled strides must be a multiple of the vector length, except for the"
" vector dimension"
)
if tiled_nested_strides[layout.vector_dim] != (1,):
raise ValueError(
"Vectorized dimension should not require further tiling and have a"
" stride of 1"
)
tiles_shape = list(tiled_nested_shape)
tiles_strides = list(tiled_nested_strides)
for d in (*layout.partitioned_warp_dims, *layout.partitioned_lane_dims, layout.vector_dim):
# We could avoid repeating the singleton dimensions, but it simplifies the
# code below that computes the register index for a given tile.
tiles_shape[d] = (1,) * len(tiles_shape[d])
tiles_strides[d] = (0,) * len(tiles_strides[d])
tiles_shape = list(itertools.chain.from_iterable(tiles_shape))
tiles_strides = list(itertools.chain.from_iterable(tiles_strides))
warp_shape = list(itertools.chain.from_iterable(
(d.times,) if isinstance(d, Replicated) else tiled_nested_shape[d] for d in layout.warp_dims
))
warp_strides = list(itertools.chain.from_iterable(
(0,) if isinstance(d, Replicated) else tiled_nested_strides[d] for d in layout.warp_dims
))
lane_shape = list(itertools.chain.from_iterable(
(d.times,) if isinstance(d, Replicated) else tiled_nested_shape[d] for d in layout.lane_dims
))
lane_strides = list(itertools.chain.from_iterable(
(0,) if isinstance(d, Replicated) else tiled_nested_strides[d] for d in layout.lane_dims
))
vector_length = layout.vector_length
element_bits = mgpu.bitwidth(dtype)
if (vector_length * element_bits) % 8 != 0:
raise ValueError(
f"Vector length ({vector_length}) must be a multiple of bytes,"
f" but has {vector_length * element_bits} bits"
)
transfer_bytes = (vector_length * element_bits) // 8
if swizzle not in {16, 32, 64, 128}:
raise ValueError("Only swizzled transfers supported")
# We will be computing the offsets in units of vectors, not elements,
# to better support sub-byte types.
swizzle_tile_transfers = 16 // transfer_bytes
swizzle_group_transfers = 128 // transfer_bytes
swizzle_groups_per_block = swizzle // 16
swizzle_block_transfers = swizzle_groups_per_block * swizzle_group_transfers
if ir.FloatType.isinstance(dtype) and element_bits <= 8:
narrow_int = ir.IntegerType.get_signless(element_bits)
transfer_dtype = ir.VectorType.get((vector_length,), narrow_int)
else:
transfer_dtype = ir.VectorType.get((vector_length,), dtype)
if ref_ty.memory_space is None:
llvm_memory_space = None
elif utils.is_smem_ref(ref_ty):
llvm_memory_space = 3
else:
raise ValueError(f"Unsupported memory space: {ref_ty.memory_space}")
if optimized:
if llvm_memory_space != 3:
raise NotImplementedError("Only optimized transfers to SMEM supported")
plan = plan_tiled_transfer(
tiles_shape, tiles_strides,
warp_shape, warp_strides,
lane_shape, lane_strides,
vector_length, element_bits, swizzle
)
else:
plan = TrivialTransferPlan()
tiles_strides_transfer = [s // vector_length for s in tiles_strides]
# Technically we should keep the vector_dim stride set to 1, but its shape
# is 1 so it does not matter.
dyn_tiled_strides = [
c(s // vector_length)
for s in itertools.chain.from_iterable(
tiled_nested_strides[-layout.tiled_tiling_rank :]
)
]
# This expands a tiled index into a finer-grained index that accounts for
# the fact that some tiled dims are tiled further in the nested shape.
def expand_nested_dims(idxs: Sequence[ir.Value]) -> list[ir.Value]:
assert len(idxs) == layout.tiled_tiling_rank
new_idxs = []
for idx, dim_shape in zip(idxs, tiled_nested_shape[-layout.tiled_tiling_rank :]):
if dim_shape == (1,):
new_idxs.append(idx)
continue
dim_strides = utils.get_contiguous_strides(dim_shape)
for i, (size, stride) in enumerate(zip(dim_shape, dim_strides)):
new_idx = arith.divui(idx, c(stride))
if i != 0: # No need to apply rem to the first dim.
new_idx = arith.remui(new_idx, c(size))
new_idxs.append(new_idx)
assert len(new_idxs) == sum(map(len, tiled_nested_shape[-layout.tiled_tiling_rank :]))
return new_idxs
# All offsets are in units of transfer_dtype.
lane_offset = utils.dyn_dot(expand_nested_dims(layout.lane_indices()), dyn_tiled_strides)
warp_offset = utils.dyn_dot(expand_nested_dims(layout.warp_indices()), dyn_tiled_strides)
dyn_offset = arith.addi(lane_offset, warp_offset)
ptr = utils.memref_ptr(ref, memory_space=llvm_memory_space)
_as_consts = lambda consts: [c(const) for const in consts.tolist()]
# This has bits set only for the offset bits that influence swizzling.
swizzle_mask = swizzle_block_transfers - swizzle_tile_transfers
for tile_idx in np.ndindex(*tiles_shape):
indices = np.asarray([f(tile_idx) for f in plan.tile_index_transforms])
const_offset = np.dot(indices, tiles_strides_transfer)
# We split the offset into a part that interacts with swizzling and a
# part that doesn't. This lets us generate better code because constant
# offsets can be fused into load and store instructions.
const_offset_swizzle = const_offset & swizzle_mask
const_offset_no_swizzle = const_offset - const_offset_swizzle
offset_pre_swizzle = arith.addi(
dyn_offset, plan.select(_as_consts(const_offset_swizzle))
)
swizzle_group = arith.remui(
arith.divui(offset_pre_swizzle, c(swizzle_group_transfers)),
c(swizzle_groups_per_block),
)
swizzle_bits = arith.muli(swizzle_group, c(swizzle_tile_transfers))
offset = arith.xori(offset_pre_swizzle, swizzle_bits)
reg_ptr = utils.getelementptr(ptr, [offset], transfer_dtype)
offset_no_swizzle = plan.select(_as_consts(const_offset_no_swizzle))
reg_ptr = utils.getelementptr(reg_ptr, [offset_no_swizzle], transfer_dtype)
# Here, registers are organized in an array with shape obtained by tiling
# the logical data bounds. But, the reference was tiled and so each
# logical tiled dimension can map to multiple dims in tiled_shape.
# The transform below maps this potentially higher-rank representation
# back to the lower-rank representation used by the register arrays.
def mem_idx_to_reg_idx(idx):
reg_tiled_idx = []
base_idx = 0
for dim_shape in tiled_nested_shape:
dim_strides = utils.get_contiguous_strides(dim_shape)
dim_idxs = idx[base_idx:base_idx + len(dim_shape)]
base_idx += len(dim_shape)
reg_tiled_idx.append(sum(i * s for i, s in zip(dim_idxs, dim_strides)))
return tuple(reg_tiled_idx)
reg_idxs = [mem_idx_to_reg_idx(idx) for idx in indices.tolist()]
def get_register(regs, reg_idxs=reg_idxs):
# f8 data types are not handled by the LLVM dialect, so we need to
# transfer them as i8 and bitcast them back to f8.
return plan.select([regs[reg_idx] for reg_idx in reg_idxs])
def update_registers(regs, new, reg_idxs=reg_idxs):
# TODO(apaszke): If the staggering forms a permutation with a small
# cycle length, then instead of blending at each step we could construct
# a small routing network (kind of like a sorting network) to fix up
# each cycle separately after all the loads are performed.
# This would be especially useful for dims that are powers of two and
# staggered by another power of 2, since all cycles are of length 2 (and
# we could save half the selects).
for i, reg_idx in enumerate(reg_idxs):
regs[reg_idx] = plan.select_if_group(i, regs[reg_idx], new)
def get_base_index():
if not isinstance(plan, TrivialTransferPlan):
raise NotImplementedError(
"Base index computation only supported for trivial transfer plans"
)
if any(len(t) != 1 for t in tiled_nested_shape):
raise NotImplementedError("Tiling too complicated")
return tiling.untile_indices(indices.tolist()[0])
yield get_register, update_registers, get_base_index, reg_ptr
def tree_flatten(self):
aux = self.layout, self.registers.shape, self.is_signed
return list(self.registers.flat), aux
@classmethod
def tree_unflatten(cls, aux, flat_registers):
layout, reg_shape, is_signed = aux
registers = np.asarray(flat_registers, dtype=object).reshape(reg_shape)
return cls(_registers=registers, _layout=layout, _is_signed=is_signed)
IndexTransform: TypeAlias = Callable[[tuple[int, ...]], tuple[int, ...]]
| FragmentedArray |
python | django__django | tests/template_tests/filter_tests/test_escapejs.py | {
"start": 1071,
"end": 2460
} | class ____(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
escapejs_filter("\"double quotes\" and 'single quotes'"),
"\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027",
)
def test_backslashes(self):
self.assertEqual(
escapejs_filter(r"\ : backslashes, too"), "\\u005C : backslashes, too"
)
def test_whitespace(self):
self.assertEqual(
escapejs_filter("and lots of whitespace: \r\n\t\v\f\b"),
"and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008",
)
def test_script(self):
self.assertEqual(
escapejs_filter(r"<script>and this</script>"),
"\\u003Cscript\\u003Eand this\\u003C/script\\u003E",
)
def test_paragraph_separator(self):
self.assertEqual(
escapejs_filter("paragraph separator:\u2029and line separator:\u2028"),
"paragraph separator:\\u2029and line separator:\\u2028",
)
def test_lazy_string(self):
append_script = lazy(lambda string: r"<script>this</script>" + string, str)
self.assertEqual(
escapejs_filter(append_script("whitespace: \r\n\t\v\f\b")),
"\\u003Cscript\\u003Ethis\\u003C/script\\u003E"
"whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008",
)
| FunctionTests |
python | fluentpython__example-code-2e | 22-dyn-attr-prop/oscon/schedule_v5.py | {
"start": 944,
"end": 2233
} | class ____(Record):
def __repr__(self):
try:
return f'<{self.__class__.__name__} {self.name!r}>'
except AttributeError:
return super().__repr__()
# tag::SCHEDULE5_CACHED_PROPERTY[]
@cached_property
def venue(self):
key = f'venue.{self.venue_serial}'
return self.__class__.fetch(key)
# end::SCHEDULE5_CACHED_PROPERTY[]
# tag::SCHEDULE5_PROPERTY_OVER_CACHE[]
@property # <1>
@cache # <2>
def speakers(self):
spkr_serials = self.__dict__['speakers']
fetch = self.__class__.fetch
return [fetch(f'speaker.{key}')
for key in spkr_serials]
# end::SCHEDULE5_PROPERTY_OVER_CACHE[]
def load(path=JSON_PATH):
records = {}
with open(path) as fp:
raw_data = json.load(fp)
for collection, raw_records in raw_data['Schedule'].items():
record_type = collection[:-1]
cls_name = record_type.capitalize()
cls = globals().get(cls_name, Record)
if inspect.isclass(cls) and issubclass(cls, Record):
factory = cls
else:
factory = Record
for raw_record in raw_records:
key = f'{record_type}.{raw_record["serial"]}'
records[key] = factory(**raw_record)
return records
| Event |
python | pandas-dev__pandas | pandas/core/computation/scope.py | {
"start": 2691,
"end": 10204
} | class ____:
"""
Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = ["level", "resolvers", "scope", "target", "temps"]
level: int
scope: DeepChainMap
resolvers: DeepChainMap
temps: dict
def __init__(
self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None
) -> None:
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self._update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
scope_global = self.scope.new_child(
(global_dict if global_dict is not None else frame.f_globals).copy()
)
self.scope = DeepChainMap(scope_global)
if not isinstance(local_dict, Scope):
scope_local = self.scope.new_child(
(local_dict if local_dict is not None else frame.f_locals).copy()
)
self.scope = DeepChainMap(scope_local)
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __repr__(self) -> str:
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
return f"{type(self).__name__}(scope={scope_keys}, resolvers={res_keys})"
@property
def has_resolvers(self) -> bool:
"""
Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key: str, is_local: bool):
"""
Resolve a variable name in a possibly local context.
Parameters
----------
key : str
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError as err:
raise UndefinedVariableError(key, is_local) from err
def swapkey(self, old_key: str, new_key: str, new_value=None) -> None:
"""
Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes: list[str]) -> None:
"""
Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, f"f_{scope}")
self.scope = DeepChainMap(self.scope.new_child(d))
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def _update(self, level: int) -> None:
"""
Update the current scope by going back `level` levels.
Parameters
----------
level : int
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=["locals"])
finally:
del stack[:], stack
def add_tmp(self, value) -> str:
"""
Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
str
The name of the temporary variable created.
"""
name = f"{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}"
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self) -> int:
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self) -> DeepChainMap:
"""
Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
| Scope |
python | fastai__fastai | fastai/text/data.py | {
"start": 5734,
"end": 7688
} | class ____(ItemTransform):
def encodes(self,samples, pad_idx=1, pad_fields=0, pad_first=False, backwards=False):
"Function that collect `samples` and adds padding"
self.pad_idx = pad_idx
pad_fields = L(pad_fields)
max_len_l = pad_fields.map(lambda f: max([len(s[f]) for s in samples]))
if backwards: pad_first = not pad_first
def _f(field_idx, x):
if field_idx not in pad_fields: return x
idx = pad_fields.items.index(field_idx) #TODO: remove items if L.index is fixed
sl = slice(-len(x), sys.maxsize) if pad_first else slice(0, len(x))
pad = x.new_zeros(max_len_l[idx]-x.shape[0])+pad_idx
x1 = torch.cat([pad, x] if pad_first else [x, pad])
if backwards: x1 = x1.flip(0)
return retain_type(x1, x)
return [tuple(map(lambda idxx: _f(*idxx), enumerate(s))) for s in samples]
def decodes(self, o:TensorText):
pad_idx = self.pad_idx if hasattr(self,'pad_idx') else 1
return o[o != pad_idx]
pad_input=Pad_Input()
# %% ../../nbs/31_text.data.ipynb 44
def pad_chunk(x,pad_idx=1, pad_first=True, seq_len=72, pad_len=10):
"Pad `x` by adding padding by chunks of size `seq_len`"
l = pad_len - x.shape[0]
pad_chunk = x.new_zeros((l//seq_len) * seq_len) + pad_idx
pad_res = x.new_zeros(l % seq_len) + pad_idx
x1 = torch.cat([pad_chunk, x, pad_res]) if pad_first else torch.cat([x, pad_chunk, pad_res])
return retain_type(x1, x)
# %% ../../nbs/31_text.data.ipynb 47
@delegates(pad_chunk)
def pad_input_chunk(samples, n_inp=1,**kwargs):
"Pad `samples` by adding padding by chunks of size `seq_len`"
max_len = max([len(s[n]) for s in samples for n in range(n_inp)])
padeds = [[pad_chunk(s[n],pad_len=max_len,**kwargs) for n in range(n_inp) ] for s in samples]
return [(*p, *s[n_inp:]) for p,s in zip(padeds,samples)]
# %% ../../nbs/31_text.data.ipynb 52
| Pad_Input |
python | kamyu104__LeetCode-Solutions | Python/4sum.py | {
"start": 2647,
"end": 3575
} | class ____(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums, result, lookup = sorted(nums), [], collections.defaultdict(list)
for i in xrange(0, len(nums) - 1):
for j in xrange(i + 1, len(nums)):
lookup[nums[i] + nums[j]].append([i, j])
for i in lookup.keys():
if target - i in lookup:
for x in lookup[i]:
for y in lookup[target - i]:
[a, b], [c, d] = x, y
if a is not c and a is not d and \
b is not c and b is not d:
quad = sorted([nums[a], nums[b], nums[c], nums[d]])
if quad not in result:
result.append(quad)
return sorted(result)
| Solution3 |
python | anthropics__anthropic-sdk-python | tests/api_resources/beta/test_messages.py | {
"start": 443,
"end": 19382
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_method_create_overload_1(self, client: Anthropic) -> None:
message = client.beta.messages.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
)
assert_matches_type(BetaMessage, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> None:
message = client.beta.messages.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
container={
"id": "id",
"skills": [
{
"skill_id": "x",
"type": "anthropic",
"version": "x",
}
],
},
context_management={
"edits": [
{
"type": "clear_tool_uses_20250919",
"clear_at_least": {
"type": "input_tokens",
"value": 0,
},
"clear_tool_inputs": True,
"exclude_tools": ["string"],
"keep": {
"type": "tool_uses",
"value": 0,
},
"trigger": {
"type": "input_tokens",
"value": 1,
},
}
]
},
mcp_servers=[
{
"name": "name",
"type": "url",
"url": "url",
"authorization_token": "authorization_token",
"tool_configuration": {
"allowed_tools": ["string"],
"enabled": True,
},
}
],
metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
output_config={"effort": "low"},
output_format={
"schema": {"foo": "bar"},
"type": "json_schema",
},
service_tier="auto",
stop_sequences=["string"],
stream=False,
system=[
{
"text": "Today's date is 2024-06-01.",
"type": "text",
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"citations": [
{
"cited_text": "cited_text",
"document_index": 0,
"document_title": "x",
"end_char_index": 0,
"start_char_index": 0,
"type": "char_location",
}
],
}
],
temperature=1,
thinking={
"budget_tokens": 1024,
"type": "enabled",
},
tool_choice={
"type": "auto",
"disable_parallel_tool_use": True,
},
tools=[
{
"input_schema": {
"type": "object",
"properties": {
"location": "bar",
"unit": "bar",
},
"required": ["location"],
},
"name": "name",
"allowed_callers": ["direct"],
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"defer_loading": True,
"description": "Get the current weather in a given location",
"input_examples": [{"foo": "bar"}],
"strict": True,
"type": "custom",
}
],
top_k=5,
top_p=0.7,
betas=["string"],
)
assert_matches_type(BetaMessage, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_raw_response_create_overload_1(self, client: Anthropic) -> None:
response = client.beta.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(BetaMessage, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_streaming_response_create_overload_1(self, client: Anthropic) -> None:
with client.beta.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(BetaMessage, message, path=["response"])
assert cast(Any, response.is_closed) is True
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_method_create_overload_2(self, client: Anthropic) -> None:
message_stream = client.beta.messages.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
stream=True,
)
message_stream.response.close()
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> None:
message_stream = client.beta.messages.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
stream=True,
container={
"id": "id",
"skills": [
{
"skill_id": "x",
"type": "anthropic",
"version": "x",
}
],
},
context_management={
"edits": [
{
"type": "clear_tool_uses_20250919",
"clear_at_least": {
"type": "input_tokens",
"value": 0,
},
"clear_tool_inputs": True,
"exclude_tools": ["string"],
"keep": {
"type": "tool_uses",
"value": 0,
},
"trigger": {
"type": "input_tokens",
"value": 1,
},
}
]
},
mcp_servers=[
{
"name": "name",
"type": "url",
"url": "url",
"authorization_token": "authorization_token",
"tool_configuration": {
"allowed_tools": ["string"],
"enabled": True,
},
}
],
metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
output_config={"effort": "low"},
output_format={
"schema": {"foo": "bar"},
"type": "json_schema",
},
service_tier="auto",
stop_sequences=["string"],
system=[
{
"text": "Today's date is 2024-06-01.",
"type": "text",
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"citations": [
{
"cited_text": "cited_text",
"document_index": 0,
"document_title": "x",
"end_char_index": 0,
"start_char_index": 0,
"type": "char_location",
}
],
}
],
temperature=1,
thinking={
"budget_tokens": 1024,
"type": "enabled",
},
tool_choice={
"type": "auto",
"disable_parallel_tool_use": True,
},
tools=[
{
"input_schema": {
"type": "object",
"properties": {
"location": "bar",
"unit": "bar",
},
"required": ["location"],
},
"name": "name",
"allowed_callers": ["direct"],
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"defer_loading": True,
"description": "Get the current weather in a given location",
"input_examples": [{"foo": "bar"}],
"strict": True,
"type": "custom",
}
],
top_k=5,
top_p=0.7,
betas=["string"],
)
message_stream.response.close()
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_raw_response_create_overload_2(self, client: Anthropic) -> None:
response = client.beta.messages.with_raw_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
stream=True,
)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_streaming_response_create_overload_2(self, client: Anthropic) -> None:
with client.beta.messages.with_streaming_response.create(
max_tokens=1024,
messages=[
{
"content": "Hello, world",
"role": "user",
}
],
model="claude-sonnet-4-5-20250929",
stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
assert cast(Any, response.is_closed) is True
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_method_count_tokens(self, client: Anthropic) -> None:
message = client.beta.messages.count_tokens(
messages=[
{
"content": "string",
"role": "user",
}
],
model="claude-opus-4-5-20251101",
)
assert_matches_type(BetaMessageTokensCount, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_method_count_tokens_with_all_params(self, client: Anthropic) -> None:
message = client.beta.messages.count_tokens(
messages=[
{
"content": "string",
"role": "user",
}
],
model="claude-opus-4-5-20251101",
context_management={
"edits": [
{
"type": "clear_tool_uses_20250919",
"clear_at_least": {
"type": "input_tokens",
"value": 0,
},
"clear_tool_inputs": True,
"exclude_tools": ["string"],
"keep": {
"type": "tool_uses",
"value": 0,
},
"trigger": {
"type": "input_tokens",
"value": 1,
},
}
]
},
mcp_servers=[
{
"name": "name",
"type": "url",
"url": "url",
"authorization_token": "authorization_token",
"tool_configuration": {
"allowed_tools": ["string"],
"enabled": True,
},
}
],
output_config={"effort": "low"},
output_format={
"schema": {"foo": "bar"},
"type": "json_schema",
},
system=[
{
"text": "Today's date is 2024-06-01.",
"type": "text",
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"citations": [
{
"cited_text": "cited_text",
"document_index": 0,
"document_title": "x",
"end_char_index": 0,
"start_char_index": 0,
"type": "char_location",
}
],
}
],
thinking={
"budget_tokens": 1024,
"type": "enabled",
},
tool_choice={
"type": "auto",
"disable_parallel_tool_use": True,
},
tools=[
{
"input_schema": {
"type": "object",
"properties": {
"location": "bar",
"unit": "bar",
},
"required": ["location"],
},
"name": "name",
"allowed_callers": ["direct"],
"cache_control": {
"type": "ephemeral",
"ttl": "5m",
},
"defer_loading": True,
"description": "Get the current weather in a given location",
"input_examples": [{"foo": "bar"}],
"strict": True,
"type": "custom",
}
],
betas=["string"],
)
assert_matches_type(BetaMessageTokensCount, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_raw_response_count_tokens(self, client: Anthropic) -> None:
response = client.beta.messages.with_raw_response.count_tokens(
messages=[
{
"content": "string",
"role": "user",
}
],
model="claude-opus-4-5-20251101",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(BetaMessageTokensCount, message, path=["response"])
@pytest.mark.skip(reason="prism validates based on the non-beta endpoint")
@parametrize
def test_streaming_response_count_tokens(self, client: Anthropic) -> None:
with client.beta.messages.with_streaming_response.count_tokens(
messages=[
{
"content": "string",
"role": "user",
}
],
model="claude-opus-4-5-20251101",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(BetaMessageTokensCount, message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_pydantic_error_in_create(self, client: Anthropic) -> None:
class MyModel(pydantic.BaseModel):
name: str
age: int
with pytest.raises(TypeError) as exc_info:
client.beta.messages.create(
max_tokens=1024,
messages=[{"role": "user", "content": "Test"}],
model="claude-sonnet-4-5-20250929",
output_format=MyModel, # type: ignore
)
error_message = str(exc_info.value)
assert "parse()" in error_message
| TestMessages |
python | jina-ai__jina | tests/integration/hot_reload/my_executor_3_new.py | {
"start": 169,
"end": 313
} | class ____(A):
@requests
def y(self, docs, **kwargs):
for doc in docs:
doc.text = 'EnhancedAfterReload'
| EnhancedExecutor |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-postgresml/llama_index/indices/managed/postgresml/query.py | {
"start": 1269,
"end": 2343
} | class ____(Generator, AsyncGenerator):
def __init__(self, rag_stream_results) -> None:
self.rag_stream_results = rag_stream_results
self.rag_stream = None
def asend(self):
raise Exception("asend is not implemented")
def send(self):
raise Exception("send is not implemented")
def athrow(self):
raise Exception("athrow is not implemented")
def throw(self):
raise Exception("throw is not implemented")
def __iter__(self) -> "AsyncJsonGenerator":
return self
def __aiter__(self) -> "AsyncJsonGenerator":
return self
def __next__(self) -> str:
try:
return run_async_tasks([self.__anext__()])[0]
except StopAsyncIteration:
raise StopIteration
async def __anext__(self) -> str:
if not self.rag_stream:
self.rag_stream = self.rag_stream_results.stream()
result = await self.rag_stream.__anext__()
if len(result) > 0:
return result[0]
else:
return ""
| AsyncJsonGenerator |
python | pytorch__pytorch | .github/scripts/generate_ci_workflows.py | {
"start": 2956,
"end": 10877
} | class ____:
LINUX = "linux"
WINDOWS = "windows"
WINDOWS_ARM64 = "windows-arm64"
MACOS = "macos"
MACOS_ARM64 = "macos-arm64"
LINUX_AARCH64 = "linux-aarch64"
LINUX_S390X = "linux-s390x"
LINUX_BINARY_BUILD_WORFKLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX,
generate_binary_build_matrix.RELEASE,
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
]
WINDOWS_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.WINDOWS
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
build_variant=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS,
generate_binary_build_matrix.RELEASE,
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
build_variant=generate_binary_build_matrix.DEBUG,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS,
generate_binary_build_matrix.DEBUG,
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS_ARM64,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.WINDOWS_ARM64,
arches=["cpu"],
python_versions=["3.11", "3.12", "3.13"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS_ARM64,
package_type="libtorch",
build_variant=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS_ARM64,
generate_binary_build_matrix.RELEASE,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS_ARM64,
package_type="libtorch",
build_variant=generate_binary_build_matrix.DEBUG,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS_ARM64,
generate_binary_build_matrix.DEBUG,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
]
MACOS_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.MACOS_ARM64,
package_type="libtorch",
build_variant=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.MACOS,
generate_binary_build_matrix.RELEASE,
libtorch_variants=["shared-with-deps"],
),
macos_runner="macos-14-xlarge",
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.MACOS_ARM64,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.MACOS_ARM64
),
macos_runner="macos-14-xlarge",
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
]
AARCH64_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX_AARCH64,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX_AARCH64
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
]
S390X_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX_S390X,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX_S390X
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
]
def main() -> None:
jinja_env = jinja2.Environment(
variable_start_string="!{{",
loader=jinja2.FileSystemLoader(str(GITHUB_DIR.joinpath("templates"))),
undefined=jinja2.StrictUndefined,
)
# not ported yet
template_and_workflows = [
(
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
LINUX_BINARY_BUILD_WORFKLOWS,
),
(
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
AARCH64_BINARY_BUILD_WORKFLOWS,
),
(
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
S390X_BINARY_BUILD_WORKFLOWS,
),
(
jinja_env.get_template("windows_binary_build_workflow.yml.j2"),
WINDOWS_BINARY_BUILD_WORKFLOWS,
),
(
jinja_env.get_template("macos_binary_build_workflow.yml.j2"),
MACOS_BINARY_BUILD_WORKFLOWS,
),
]
# Delete the existing generated files first, this should align with .gitattributes file description.
existing_workflows = GITHUB_DIR.glob("workflows/generated-*")
for w in existing_workflows:
try:
os.remove(w)
except Exception as e:
print(f"Error occurred when deleting file {w}: {e}")
for template, workflows in template_and_workflows:
# added Iterable check to appease the mypy gods
if not isinstance(workflows, Iterable):
raise Exception( # noqa: TRY002
f"How is workflows not iterable? {workflows}"
) # noqa: TRY002
for workflow in workflows:
workflow.generate_workflow_file(workflow_template=template)
if __name__ == "__main__":
main()
| OperatingSystem |
python | getsentry__sentry | src/sentry/snuba/outcomes.py | {
"start": 3878,
"end": 5199
} | class ____(Dimension[DataCategory]):
def resolve_filter(self, raw_filter: Sequence[str]) -> list[DataCategory]:
resolved_categories = set()
for category in raw_filter:
# combine DEFAULT, ERROR, and SECURITY as errors.
# see relay: py/sentry_relay/consts.py and relay-cabi/include/relay.h
parsed_category = DataCategory.parse(category)
if parsed_category is None and parsed_category != "metrics":
raise InvalidField(f'Invalid category: "{category}"')
elif parsed_category == DataCategory.ERROR:
resolved_categories.update(DataCategory.error_categories())
else:
resolved_categories.add(parsed_category)
if DataCategory.ATTACHMENT in resolved_categories and len(resolved_categories) > 1:
raise InvalidQuery("if filtering by attachment no other category may be present")
return list(resolved_categories)
def map_row(self, row: MutableMapping[str, Any]) -> None:
if "category" in row:
category = (
DataCategory.ERROR
if row["category"] in DataCategory.error_categories()
else DataCategory(row["category"])
)
row["category"] = category.api_name()
| CategoryDimension |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 29657,
"end": 31266
} | class ____(GeneratedAirbyteSource):
class OAuth20:
@public
def __init__(self, client_id: str, client_secret: str, refresh_token: str):
self.auth_type = "oauth2.0"
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
class APIToken:
@public
def __init__(self, api_token: str):
self.auth_type = "api_token"
self.api_token = check.str_param(api_token, "api_token")
@public
def __init__(
self,
name: str,
credentials: Union["OktaSource.OAuth20", "OktaSource.APIToken"],
domain: Optional[str] = None,
start_date: Optional[str] = None,
):
"""Airbyte Source for Okta.
Documentation can be found at https://docs.airbyte.com/integrations/sources/okta
Args:
name (str): The name of the destination.
domain (Optional[str]): The Okta domain. See the docs for instructions on how to find it.
start_date (Optional[str]): UTC date and time in the format YYYY-MM-DDTHH:MM:SSZ. Any data before this date will not be replicated.
"""
self.domain = check.opt_str_param(domain, "domain")
self.start_date = check.opt_str_param(start_date, "start_date")
self.credentials = check.inst_param(
credentials, "credentials", (OktaSource.OAuth20, OktaSource.APIToken)
)
super().__init__("Okta", name)
| OktaSource |
python | plotly__plotly.py | plotly/graph_objs/sankey/_hoverlabel.py | {
"start": 233,
"end": 11234
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sankey"
_path_str = "sankey.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.sankey.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sankey.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | coleifer__peewee | playhouse/postgres_ext.py | {
"start": 13226,
"end": 15308
} | class ____(PostgresqlDatabase):
def __init__(self, *args, **kwargs):
self._register_hstore = kwargs.pop('register_hstore', False)
self._server_side_cursors = kwargs.pop('server_side_cursors', False)
super(PostgresqlExtDatabase, self).__init__(*args, **kwargs)
def _connect(self):
conn = super(PostgresqlExtDatabase, self)._connect()
if self._register_hstore:
register_hstore(conn, globally=True)
return conn
def cursor(self, commit=None, named_cursor=None):
if commit is not None:
__deprecated__('"commit" has been deprecated and is a no-op.')
if self.is_closed():
if self.autoconnect:
self.connect()
else:
raise InterfaceError('Error, database connection not opened.')
if named_cursor:
curs = self._state.conn.cursor(name=str(uuid.uuid1()),
withhold=True)
return curs
return self._state.conn.cursor()
def execute(self, query, commit=None, named_cursor=False, array_size=None,
**context_options):
if commit is not None:
__deprecated__('"commit" has been deprecated and is a no-op.')
ctx = self.get_sql_context(**context_options)
sql, params = ctx.sql(query).query()
named_cursor = named_cursor or (self._server_side_cursors and
sql[:6].lower() == 'select')
cursor = self.execute_sql(sql, params, named_cursor=named_cursor)
if named_cursor:
cursor = FetchManyCursor(cursor, array_size)
return cursor
def execute_sql(self, sql, params=None, commit=None, named_cursor=None):
if commit is not None:
__deprecated__('"commit" has been deprecated and is a no-op.')
logger.debug((sql, params))
with __exception_wrapper__:
cursor = self.cursor(named_cursor=named_cursor)
cursor.execute(sql, params or ())
return cursor
| PostgresqlExtDatabase |
python | django__django | tests/contenttypes_tests/test_models.py | {
"start": 12509,
"end": 12789
} | class ____:
def db_for_read(self, model, **hints):
return "other"
def db_for_write(self, model, **hints):
return "default"
def allow_relation(self, obj1, obj2, **hints):
return True
@override_settings(DATABASE_ROUTERS=[TestRouter()])
| TestRouter |
python | pytorch__pytorch | torch/_inductor/cudagraph_trees.py | {
"start": 27435,
"end": 27600
} | class ____(OutputAliasInfo):
"Singleton to mark that the graph output constructs a new alias or is None"
UnaliasedStorage = _UnaliasedStorage()
| _UnaliasedStorage |
python | davidhalter__jedi | jedi/inference/value/decorator.py | {
"start": 194,
"end": 1207
} | class ____(ValueWrapper):
def __init__(self, wrapped_value, original_value):
super().__init__(wrapped_value)
self._original_value = original_value
def py__doc__(self):
return self._original_value.py__doc__()
def py__get__(self, instance, class_value):
return ValueSet(
Decoratee(v, self._original_value)
for v in self._wrapped_value.py__get__(instance, class_value)
)
def get_signatures(self):
signatures = self._wrapped_value.get_signatures()
if signatures:
return signatures
# Fallback to signatures of the original function/class if the
# decorator has no signature or it is not inferrable.
#
# __get__ means that it's a descriptor. In that case we don't return
# signatures, because they are usually properties.
if not self._wrapped_value.py__getattribute__('__get__'):
return self._original_value.get_signatures()
return []
| Decoratee |
python | ray-project__ray | python/ray/serve/_private/cluster_node_info_cache.py | {
"start": 206,
"end": 3379
} | class ____(ABC):
"""Provide access to cached node information in the cluster."""
def __init__(self, gcs_client: GcsClient):
self._gcs_client = gcs_client
self._cached_alive_nodes = None
self._cached_node_labels = dict()
self._cached_total_resources_per_node = dict()
self._cached_available_resources_per_node = dict()
def update(self):
"""Update the cache by fetching latest node information from GCS.
This should be called once in each update cycle.
Within an update cycle, everyone will see the same
cached node info avoiding any potential issues
caused by inconsistent node info seen by different components.
"""
nodes = self._gcs_client.get_all_node_info(timeout=RAY_GCS_RPC_TIMEOUT_S)
alive_nodes = [
(node_id.hex(), node.node_name, node.instance_id)
for (node_id, node) in nodes.items()
if node.state == ray.core.generated.gcs_pb2.GcsNodeInfo.ALIVE
]
# Sort on NodeID to ensure the ordering is deterministic across the cluster.
sorted(alive_nodes)
self._cached_alive_nodes = alive_nodes
self._cached_node_labels = {
node_id.hex(): dict(node.labels) for (node_id, node) in nodes.items()
}
# Node resources
self._cached_total_resources_per_node = {
node_id.hex(): dict(node.resources_total)
for (node_id, node) in nodes.items()
}
self._cached_available_resources_per_node = (
ray._private.state.available_resources_per_node()
)
def get_alive_nodes(self) -> List[Tuple[str, str, str]]:
"""Get IDs, IPs, and Instance IDs for all live nodes in the cluster.
Returns a list of (node_id: str, node_ip: str, instance_id: str).
The node_id can be passed into the Ray SchedulingPolicy API.
"""
return self._cached_alive_nodes
def get_total_resources_per_node(self) -> Dict[str, Dict]:
"""Get total resources for alive nodes."""
return self._cached_total_resources_per_node
def get_alive_node_ids(self) -> Set[str]:
"""Get IDs of all live nodes in the cluster."""
return {node_id for node_id, _, _ in self.get_alive_nodes()}
@abstractmethod
def get_draining_nodes(self) -> Dict[str, int]:
"""Get draining nodes in the cluster and their deadlines."""
raise NotImplementedError
@abstractmethod
def get_node_az(self, node_id: str) -> Optional[str]:
"""Get availability zone of a node."""
raise NotImplementedError
def get_active_node_ids(self) -> Set[str]:
"""Get IDs of all active nodes in the cluster.
A node is active if it's schedulable for new tasks and actors.
"""
return self.get_alive_node_ids() - set(self.get_draining_nodes())
def get_available_resources_per_node(self) -> Dict[str, Union[float, Dict]]:
"""Get available resources per node.
Returns a map from (node_id -> Dict of resources).
"""
return self._cached_available_resources_per_node
| ClusterNodeInfoCache |
python | urllib3__urllib3 | test/test_exceptions.py | {
"start": 525,
"end": 2420
} | class ____:
@pytest.mark.parametrize(
"exception",
[
HTTPError(None),
MaxRetryError(DUMMY_POOL, "", None),
MaxRetryError(DUMMY_POOL, "", Exception("Error occured")),
LocationParseError(""),
ConnectTimeoutError(None),
HTTPError("foo"),
HTTPError("foo", IOError("foo")),
MaxRetryError(HTTPConnectionPool("localhost"), "/", None),
LocationParseError("fake location"),
ClosedPoolError(HTTPConnectionPool("localhost"), ""),
EmptyPoolError(HTTPConnectionPool("localhost"), ""),
HostChangedError(HTTPConnectionPool("localhost"), "/", 0),
ReadTimeoutError(HTTPConnectionPool("localhost"), "/", ""),
ReadTimeoutError(HTTPConnectionPool("localhost"), "/", "message"),
NewConnectionError(HTTPConnection("localhost"), ""),
NameResolutionError("", HTTPConnection("localhost"), socket.gaierror()),
NameResolutionError(
"host", HTTPConnection("localhost"), socket.gaierror("error")
),
],
)
def test_exceptions(self, exception: Exception) -> None:
result = pickle.loads(pickle.dumps(exception))
assert isinstance(result, type(exception))
if hasattr(exception, "_message"):
assert exception._message == result._message # type: ignore[attr-defined]
assert exception._message in str(result)
if hasattr(exception, "_host"):
# host is likely a string so directly comparable
assert exception._host == result._host # type: ignore[attr-defined]
if hasattr(exception, "_reason"):
# reason is likely an exception so do string comparison instead
assert str(exception._reason) == str(result._reason) # type: ignore[attr-defined]
| TestPickle |
python | mlflow__mlflow | mlflow/system_metrics/metrics/gpu_monitor.py | {
"start": 406,
"end": 2905
} | class ____(BaseMetricsMonitor):
"""Class for monitoring GPU stats."""
def __init__(self):
if "pynvml" not in sys.modules:
# Only instantiate if `pynvml` is installed.
raise ImportError(
"`nvidia-ml-py` is not installed, to log GPU metrics please run "
"`pip install nvidia-ml-py` to install it."
)
try:
# `nvmlInit()` will fail if no GPU is found.
pynvml.nvmlInit()
except pynvml.NVMLError as e:
raise RuntimeError(f"Failed to initialize NVML, skip logging GPU metrics: {e}")
super().__init__()
self.num_gpus = pynvml.nvmlDeviceGetCount()
self.gpu_handles = [pynvml.nvmlDeviceGetHandleByIndex(i) for i in range(self.num_gpus)]
def collect_metrics(self):
# Get GPU metrics.
for i, handle in enumerate(self.gpu_handles):
try:
memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
self._metrics[f"gpu_{i}_memory_usage_percentage"].append(
round(memory.used / memory.total * 100, 1)
)
self._metrics[f"gpu_{i}_memory_usage_megabytes"].append(memory.used / 1e6)
except pynvml.NVMLError as e:
_logger.warning(f"Encountered error {e} when trying to collect GPU memory metrics.")
try:
device_utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
self._metrics[f"gpu_{i}_utilization_percentage"].append(device_utilization.gpu)
except pynvml.NVMLError as e:
_logger.warning(
f"Encountered error {e} when trying to collect GPU utilization metrics."
)
try:
power_milliwatts = pynvml.nvmlDeviceGetPowerUsage(handle)
power_capacity_milliwatts = pynvml.nvmlDeviceGetEnforcedPowerLimit(handle)
self._metrics[f"gpu_{i}_power_usage_watts"].append(power_milliwatts / 1000)
self._metrics[f"gpu_{i}_power_usage_percentage"].append(
(power_milliwatts / power_capacity_milliwatts) * 100
)
except pynvml.NVMLError as e:
_logger.warning(
f"Encountered error {e} when trying to collect GPU power usage metrics."
)
def aggregate_metrics(self):
return {k: round(sum(v) / len(v), 1) for k, v in self._metrics.items()}
| GPUMonitor |
python | pola-rs__polars | pyo3-polars/example/derive_expression/expression_lib/expression_lib/extension.py | {
"start": 688,
"end": 1165
} | class ____:
def __init__(self, expr: pl.Expr):
self._expr = expr
def __getattr__(self, attr: str) -> Callable[..., pl.Expr]:
if attr in ("pig_latinnify", "append_args"):
def func(*args: Any, **kwargs: Any) -> pl.Expr:
return getattr(language, attr)(self._expr, *args, **kwargs)
return func
raise AttributeError(f"{self.__class__} has no attribute {attr}")
@pl.api.register_expr_namespace("dist")
| Language |
python | google__pytype | pytype/directors/directors_test.py | {
"start": 13595,
"end": 15401
} | class ____(DirectorTestCase):
def test_type_comment_on_multiline_value(self):
self._create("""
v = [
("hello",
"world", # type: should_be_ignored
)
] # type: dict
""")
self.assertEqual({2: "dict"}, self._director.type_comments)
def test_type_comment_with_trailing_comma(self):
self._create("""
v = [
("hello",
"world"
),
] # type: dict
w = [
["hello",
"world"
], # some comment
] # type: dict
""")
self.assertEqual({2: "dict", 7: "dict"}, self._director.type_comments)
def test_decorators(self):
self._create("""
class A:
'''
@decorator in a docstring
'''
@real_decorator
def f(x):
x = foo @ bar @ baz
@decorator(
x, y
)
def bar():
pass
""")
self.assertEqual(
self._director.decorators, {7: ["real_decorator"], 14: ["decorator"]}
)
self.assertEqual(self._director.decorated_functions, {6: 7, 10: 14})
def test_stacked_decorators(self):
self._create("""
@decorator(
x, y
)
@foo
class A:
pass
""")
self.assertEqual(self._director.decorators, {8: ["decorator", "foo"]})
self.assertEqual(self._director.decorated_functions, {2: 8, 6: 8})
def test_overload(self):
self._create("""
from typing import overload
@overload
def f() -> int: ...
@overload
def f(x: str) -> str: ...
def f(x=None):
return 0 if x is None else x
""")
self.assertEqual(
self._director.decorators, {5: ["overload"], 8: ["overload"]}
)
self.assertEqual(self._director.decorated_functions, {4: 5, 7: 8})
| LineNumbersTest |
python | doocs__leetcode | solution/1600-1699/1631.Path With Minimum Effort/Solution3.py | {
"start": 0,
"end": 682
} | class ____:
def minimumEffortPath(self, heights: List[List[int]]) -> int:
m, n = len(heights), len(heights[0])
dist = [[inf] * n for _ in range(m)]
dist[0][0] = 0
dirs = (-1, 0, 1, 0, -1)
q = [(0, 0, 0)]
while q:
t, i, j = heappop(q)
for a, b in pairwise(dirs):
x, y = i + a, j + b
if (
0 <= x < m
and 0 <= y < n
and (d := max(t, abs(heights[i][j] - heights[x][y]))) < dist[x][y]
):
dist[x][y] = d
heappush(q, (d, x, y))
return int(dist[-1][-1])
| Solution |
python | getsentry__sentry | src/sentry/interfaces/exception.py | {
"start": 7195,
"end": 11706
} | class ____(Interface):
"""
A standard exception with a ``type`` and value argument, and an optional
``module`` argument describing the exception class type and
module namespace. Either ``type`` or ``value`` must be present.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``stacktrace``.
>>> {
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__",
>>> "mechanism": {},
>>> "stacktrace": {
>>> # see stacktrace
>>> }
>>> }
"""
grouping_variants = ["system", "app"]
@classmethod
def to_python(cls, data, **kwargs):
if get_path(data, "stacktrace", "frames", filter=True):
stacktrace = Stacktrace.to_python(data["stacktrace"], **kwargs)
else:
stacktrace = None
if get_path(data, "raw_stacktrace", "frames", filter=True):
raw_stacktrace = Stacktrace.to_python(data["raw_stacktrace"], **kwargs)
else:
raw_stacktrace = None
type = data.get("type")
value = data.get("value")
if data.get("mechanism"):
mechanism = Mechanism.to_python(data["mechanism"], **kwargs)
else:
mechanism = None
new_data = {
"type": type,
"value": value,
"module": data.get("module"),
"mechanism": mechanism,
"stacktrace": stacktrace,
"thread_id": data.get("thread_id"),
"raw_stacktrace": raw_stacktrace,
}
return super().to_python(new_data, **kwargs)
def to_json(self):
mechanism = (
isinstance(self.mechanism, Mechanism)
and self.mechanism.to_json()
or self.mechanism
or None
)
if self.stacktrace:
stacktrace = self.stacktrace.to_json()
else:
stacktrace = None
if self.raw_stacktrace:
raw_stacktrace = self.raw_stacktrace.to_json()
else:
raw_stacktrace = None
return prune_empty_keys(
{
"type": self.type,
"value": self.value,
"mechanism": mechanism,
"module": self.module,
"stacktrace": stacktrace,
"thread_id": self.thread_id,
"raw_stacktrace": raw_stacktrace,
}
)
def get_api_context(self, is_public=False, platform=None):
mechanism = (
isinstance(self.mechanism, Mechanism)
and self.mechanism.get_api_context(is_public=is_public, platform=platform)
or self.mechanism
or None
)
if self.stacktrace:
stacktrace = self.stacktrace.get_api_context(is_public=is_public, platform=platform)
else:
stacktrace = None
if self.raw_stacktrace:
raw_stacktrace = self.raw_stacktrace.get_api_context(
is_public=is_public, platform=platform
)
else:
raw_stacktrace = None
return {
"type": self.type,
"value": str(self.value) if self.value else None,
"mechanism": mechanism,
"threadId": self.thread_id,
"module": self.module,
"stacktrace": stacktrace,
"rawStacktrace": raw_stacktrace,
}
def get_api_meta(self, meta, is_public=False, platform=None):
mechanism_meta = (
self.mechanism.get_api_meta(meta["mechanism"], is_public=is_public, platform=platform)
if isinstance(self.mechanism, Mechanism) and meta.get("mechanism")
else None
)
stacktrace_meta = (
self.stacktrace.get_api_meta(meta["stacktrace"], is_public=is_public, platform=platform)
if self.stacktrace and meta.get("stacktrace")
else None
)
return {
"": meta.get(""),
"type": meta.get("type"),
"value": meta.get("value"),
"mechanism": mechanism_meta,
"threadId": meta.get("thread_id"),
"module": meta.get("module"),
"stacktrace": stacktrace_meta,
}
def __str__(self) -> str:
return f"{type(self).__name__}: {self.type}: {self.value}"
def __repr__(self) -> str:
return f"{type(self).__name__} -> {self.type}: {self.value}"
| SingleException |
python | pytorch__pytorch | test/export/test_export_opinfo.py | {
"start": 3400,
"end": 4067
} | class ____(TestCase):
@ops(op_db, allowed_dtypes=(torch.float,))
@skipOps(
"TestExportOpInfo", "test_fake_export", export_failures | fake_export_failures
)
def test_fake_export(self, device, dtype, op):
_test_export_helper(self, dtype, op)
instantiate_device_type_tests(TestExportOpInfo, globals(), only_for="cpu")
selected_ops = {
"__getitem__",
"nn.functional.batch_norm",
"nn.functional.conv2d",
"nn.functional.instance_norm",
"nn.functional.multi_margin_loss",
"nn.functional.scaled_dot_product_attention",
"nonzero",
}
selected_op_db = [op for op in op_db if op.name in selected_ops]
| TestExportOpInfo |
python | pola-rs__polars | py-polars/src/polars/io/iceberg/_utils.py | {
"start": 19213,
"end": 19574
} | class ____(LoadFromBytesImpl):
def load_from_bytes(self, byte_values: list[bytes | None]) -> pl.Series:
import polars as pl
return (
pl.Series(byte_values, dtype=pl.Binary).bin.reinterpret(
dtype=pl.Int64, endianness="little"
)
* ICEBERG_TIME_TO_NS
).cast(pl.Time)
| LoadTimeFromBytes |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_type_check.py | {
"start": 6023,
"end": 6312
} | class ____(TestCase):
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
assert_(not np.any(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
| TestIscomplex |
python | django__django | tests/forms_tests/widget_tests/test_multiwidget.py | {
"start": 2293,
"end": 10524
} | class ____(WidgetTest):
def test_subwidgets_name(self):
widget = MultiWidget(
widgets={
"": TextInput(),
"big": TextInput(attrs={"class": "big"}),
"small": TextInput(attrs={"class": "small"}),
},
)
self.check_html(
widget,
"name",
["John", "George", "Paul"],
html=(
'<input type="text" name="name" value="John">'
'<input type="text" name="name_big" value="George" class="big">'
'<input type="text" name="name_small" value="Paul" class="small">'
),
)
def test_text_inputs(self):
widget = MyMultiWidget(
widgets=(
TextInput(attrs={"class": "big"}),
TextInput(attrs={"class": "small"}),
)
)
self.check_html(
widget,
"name",
["john", "lennon"],
html=(
'<input type="text" class="big" value="john" name="name_0">'
'<input type="text" class="small" value="lennon" name="name_1">'
),
)
self.check_html(
widget,
"name",
("john", "lennon"),
html=(
'<input type="text" class="big" value="john" name="name_0">'
'<input type="text" class="small" value="lennon" name="name_1">'
),
)
self.check_html(
widget,
"name",
"john__lennon",
html=(
'<input type="text" class="big" value="john" name="name_0">'
'<input type="text" class="small" value="lennon" name="name_1">'
),
)
self.check_html(
widget,
"name",
"john__lennon",
attrs={"id": "foo"},
html=(
'<input id="foo_0" type="text" class="big" value="john" name="name_0">'
'<input id="foo_1" type="text" class="small" value="lennon" '
'name="name_1">'
),
)
def test_constructor_attrs(self):
widget = MyMultiWidget(
widgets=(
TextInput(attrs={"class": "big"}),
TextInput(attrs={"class": "small"}),
),
attrs={"id": "bar"},
)
self.check_html(
widget,
"name",
["john", "lennon"],
html=(
'<input id="bar_0" type="text" class="big" value="john" name="name_0">'
'<input id="bar_1" type="text" class="small" value="lennon" '
'name="name_1">'
),
)
def test_constructor_attrs_with_type(self):
attrs = {"type": "number"}
widget = MyMultiWidget(widgets=(TextInput, TextInput()), attrs=attrs)
self.check_html(
widget,
"code",
["1", "2"],
html=(
'<input type="number" value="1" name="code_0">'
'<input type="number" value="2" name="code_1">'
),
)
widget = MyMultiWidget(
widgets=(TextInput(attrs), TextInput(attrs)), attrs={"class": "bar"}
)
self.check_html(
widget,
"code",
["1", "2"],
html=(
'<input type="number" value="1" name="code_0" class="bar">'
'<input type="number" value="2" name="code_1" class="bar">'
),
)
def test_value_omitted_from_data(self):
widget = MyMultiWidget(widgets=(TextInput(), TextInput()))
self.assertIs(widget.value_omitted_from_data({}, {}, "field"), True)
self.assertIs(
widget.value_omitted_from_data({"field_0": "x"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data({"field_1": "y"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data(
{"field_0": "x", "field_1": "y"}, {}, "field"
),
False,
)
def test_value_from_datadict_subwidgets_name(self):
widget = MultiWidget(widgets={"x": TextInput(), "": TextInput()})
tests = [
({}, [None, None]),
({"field": "x"}, [None, "x"]),
({"field_x": "y"}, ["y", None]),
({"field": "x", "field_x": "y"}, ["y", "x"]),
]
for data, expected in tests:
with self.subTest(data):
self.assertEqual(
widget.value_from_datadict(data, {}, "field"),
expected,
)
def test_value_omitted_from_data_subwidgets_name(self):
widget = MultiWidget(widgets={"x": TextInput(), "": TextInput()})
tests = [
({}, True),
({"field": "x"}, False),
({"field_x": "y"}, False),
({"field": "x", "field_x": "y"}, False),
]
for data, expected in tests:
with self.subTest(data):
self.assertIs(
widget.value_omitted_from_data(data, {}, "field"),
expected,
)
def test_needs_multipart_true(self):
"""
needs_multipart_form should be True if any widgets need it.
"""
widget = MyMultiWidget(widgets=(TextInput(), FileInput()))
self.assertTrue(widget.needs_multipart_form)
def test_needs_multipart_false(self):
"""
needs_multipart_form should be False if no widgets need it.
"""
widget = MyMultiWidget(widgets=(TextInput(), TextInput()))
self.assertFalse(widget.needs_multipart_form)
def test_nested_multiwidget(self):
"""
MultiWidgets can be composed of other MultiWidgets.
"""
widget = ComplexMultiWidget()
self.check_html(
widget,
"name",
"some text,JP,2007-04-25 06:24:00",
html=(
"""
<input type="text" name="name_0" value="some text">
<select multiple name="name_1">
<option value="J" selected>John</option>
<option value="P" selected>Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="name_2_0" value="2007-04-25">
<input type="text" name="name_2_1" value="06:24:00">
"""
),
)
def test_no_whitespace_between_widgets(self):
widget = MyMultiWidget(widgets=(TextInput, TextInput()))
self.check_html(
widget,
"code",
None,
html=('<input type="text" name="code_0"><input type="text" name="code_1">'),
strict=True,
)
def test_deepcopy(self):
"""
MultiWidget should define __deepcopy__() (#12048).
"""
w1 = DeepCopyWidget(choices=[1, 2, 3])
w2 = copy.deepcopy(w1)
w2.choices = [4, 5, 6]
# w2 ought to be independent of w1, since MultiWidget ought
# to make a copy of its sub-widgets when it is copied.
self.assertEqual(w1.choices, [1, 2, 3])
def test_fieldset(self):
class TestForm(Form):
template_name = "forms_tests/use_fieldset.html"
field = ComplexField(widget=ComplexMultiWidget)
form = TestForm()
self.assertIs(form["field"].field.widget.use_fieldset, True)
self.assertHTMLEqual(
"<div><fieldset><legend>Field:</legend>"
'<input type="text" name="field_0" required id="id_field_0">'
'<select name="field_1" required id="id_field_1" multiple>'
'<option value="J">John</option><option value="P">Paul</option>'
'<option value="G">George</option><option value="R">Ringo</option></select>'
'<input type="text" name="field_2_0" required id="id_field_2_0">'
'<input type="text" name="field_2_1" required id="id_field_2_1">'
"</fieldset></div>",
form.render(),
)
| MultiWidgetTest |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_random_test.py | {
"start": 1399,
"end": 2337
} | class ____(test.TestCase, parameterized.TestCase):
def _test(self, *args, **kw_args):
onp_dtype = kw_args.pop('onp_dtype', None)
allow_float64 = kw_args.pop('allow_float64', True)
old_allow_float64 = np_dtypes.is_allow_float64()
np_dtypes.set_allow_float64(allow_float64)
old_func = getattr(self, 'onp_func', None)
# TODO(agarwal): Note that onp can return a scalar type while np returns
# ndarrays. Currently np does not support scalar types.
self.onp_func = lambda *args, **kwargs: onp.asarray( # pylint: disable=g-long-lambda
old_func(*args, **kwargs))
np_out = self.np_func(*args, **kw_args)
onp_out = onp.asarray(self.onp_func(*args, **kw_args))
if onp_dtype is not None:
onp_out = onp_out.astype(onp_dtype)
self.assertEqual(np_out.shape, onp_out.shape)
self.assertEqual(np_out.dtype, onp_out.dtype)
np_dtypes.set_allow_float64(old_allow_float64)
| RandomTestBase |
python | doocs__leetcode | solution/1100-1199/1175.Prime Arrangements/Solution.py | {
"start": 0,
"end": 469
} | class ____:
def numPrimeArrangements(self, n: int) -> int:
def count(n):
cnt = 0
primes = [True] * (n + 1)
for i in range(2, n + 1):
if primes[i]:
cnt += 1
for j in range(i + i, n + 1, i):
primes[j] = False
return cnt
cnt = count(n)
ans = factorial(cnt) * factorial(n - cnt)
return ans % (10**9 + 7)
| Solution |
python | explosion__spaCy | spacy/lang/ca/__init__.py | {
"start": 700,
"end": 1344
} | class ____(Language):
lang = "ca"
Defaults = CatalanDefaults
@Catalan.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={
"model": None,
"mode": "rule",
"overwrite": False,
"scorer": {"@scorers": "spacy.lemmatizer_scorer.v1"},
},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(
nlp: Language,
model: Optional[Model],
name: str,
mode: str,
overwrite: bool,
scorer: Optional[Callable],
):
return CatalanLemmatizer(
nlp.vocab, model, name, mode=mode, overwrite=overwrite, scorer=scorer
)
__all__ = ["Catalan"]
| Catalan |
python | getsentry__sentry | src/sentry/notifications/services/model.py | {
"start": 750,
"end": 873
} | class ____(RpcModel):
is_disabled: bool
is_active: bool
has_only_inactive_subscriptions: bool
| RpcSubscriptionStatus |
python | tensorflow__tensorflow | tensorflow/python/ops/special_math_ops_test.py | {
"start": 16184,
"end": 26816
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_besseli_boundary(self):
self.assertAllClose(1., special_math_ops.bessel_i0(0.))
self.assertAllClose(1., special_math_ops.bessel_i0e(0.))
self.assertAllClose(0., special_math_ops.bessel_i1(0.))
self.assertAllClose(0., special_math_ops.bessel_i1e(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_i0(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_i0e(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_i1(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_i1e(np.nan))))
@test_util.run_in_graph_and_eager_modes
def test_besselj_boundary(self):
self.assertAllClose(1., special_math_ops.bessel_j0(0.))
self.assertAllClose(0., special_math_ops.bessel_j1(0.))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_j0(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_j1(np.nan))))
@test_util.run_in_graph_and_eager_modes
def test_besselk_boundary(self):
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k0(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k0e(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k1(0.))))
self.assertTrue(np.isinf(self.evaluate(special_math_ops.bessel_k1e(0.))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_k0(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_k0e(np.nan))))
self.assertTrue(np.isnan(self.evaluate(special_math_ops.bessel_k1(np.nan))))
self.assertTrue(
np.isnan(self.evaluate(special_math_ops.bessel_k1e(np.nan))))
@parameterized.parameters(np.float32, np.float64)
def test_i0j0_even(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i0(x)),
self.evaluate(special_math_ops.bessel_i0(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i0e(x)),
self.evaluate(special_math_ops.bessel_i0e(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_j0(x)),
self.evaluate(special_math_ops.bessel_j0(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_i1j1_odd(self, dtype):
x = np.random.uniform(-100., 100., size=int(1e4)).astype(dtype)
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i1(x)),
self.evaluate(-special_math_ops.bessel_i1(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_i1e(x)),
self.evaluate(-special_math_ops.bessel_i1e(-x)))
self.assertAllClose(
self.evaluate(special_math_ops.bessel_j1(x)),
self.evaluate(-special_math_ops.bessel_j1(-x)))
@parameterized.parameters(np.float32, np.float64)
def test_besseli_small(self, dtype):
x = np.random.uniform(-1., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.i0(x), self.evaluate(special_math_ops.bessel_i0(x)))
self.assertAllClose(
special.i1(x), self.evaluate(special_math_ops.bessel_i1(x)))
self.assertAllClose(
special.i0e(x), self.evaluate(special_math_ops.bessel_i0e(x)))
self.assertAllClose(
special.i1e(x), self.evaluate(special_math_ops.bessel_i1e(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselj_small(self, dtype):
x = np.random.uniform(-1., 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.j0(x), self.evaluate(special_math_ops.bessel_j0(x)))
self.assertAllClose(
special.j1(x), self.evaluate(special_math_ops.bessel_j1(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselk_small(self, dtype):
x = np.random.uniform(np.finfo(dtype).eps, 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.k0(x), self.evaluate(special_math_ops.bessel_k0(x)))
self.assertAllClose(
special.k0e(x), self.evaluate(special_math_ops.bessel_k0e(x)))
self.assertAllClose(
special.k1(x), self.evaluate(special_math_ops.bessel_k1(x)))
self.assertAllClose(
special.k1e(x), self.evaluate(special_math_ops.bessel_k1e(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_bessely_small(self, dtype):
x = np.random.uniform(np.finfo(dtype).eps, 1., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.y0(x), self.evaluate(special_math_ops.bessel_y0(x)))
self.assertAllClose(
special.y1(x), self.evaluate(special_math_ops.bessel_y1(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besseli_larger(self, dtype):
x = np.random.uniform(1., 20., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.i0e(x), self.evaluate(special_math_ops.bessel_i0e(x)))
self.assertAllClose(
special.i1e(x), self.evaluate(special_math_ops.bessel_i1e(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselj_larger(self, dtype):
x = np.random.uniform(1., 30., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.j0(x), self.evaluate(special_math_ops.bessel_j0(x)))
self.assertAllClose(
special.j1(x), self.evaluate(special_math_ops.bessel_j1(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_besselk_larger(self, dtype):
x = np.random.uniform(1., 30., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.k0(x), self.evaluate(special_math_ops.bessel_k0(x)))
self.assertAllClose(
special.k0e(x), self.evaluate(special_math_ops.bessel_k0e(x)))
self.assertAllClose(
special.k1(x), self.evaluate(special_math_ops.bessel_k1(x)))
self.assertAllClose(
special.k1e(x), self.evaluate(special_math_ops.bessel_k1e(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
@parameterized.parameters(np.float32, np.float64)
def test_bessely_larger(self, dtype):
x = np.random.uniform(1., 30., size=int(1e4)).astype(dtype)
try:
from scipy import special # pylint: disable=g-import-not-at-top
self.assertAllClose(
special.y0(x), self.evaluate(special_math_ops.bessel_y0(x)))
self.assertAllClose(
special.y1(x), self.evaluate(special_math_ops.bessel_y1(x)))
except ImportError as e:
tf_logging.warn('Cannot test special functions: %s' % str(e))
def test_besseli_gradient(self):
inputs = [np.random.uniform(-10., 10., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-3)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i0e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-3)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_i1e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_besselj_gradient(self):
inputs = [np.random.uniform(-50., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_j0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_j1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_besselk_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k0e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_k1e, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
def test_bessely_gradient(self):
inputs = [np.random.uniform(1., 50., size=int(1e2))]
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_y0, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
analytical, numerical = gradient_checker_v2.compute_gradient(
special_math_ops.bessel_y1, inputs)
self.assertLess(gradient_checker_v2.max_error(analytical, numerical), 1e-4)
@test_util.run_all_in_graph_and_eager_modes
@test_util.run_all_without_tensor_float_32(
'Tests einsum, which sometimes does a matmul with cuBLAS')
| BesselTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zenloop/source_zenloop/streams.py | {
"start": 7159,
"end": 7761
} | class ____(ZenloopStream):
# API Doc: https://docs.zenloop.com/reference#get-list-of-survey-groups
primary_key = None
has_date_param = False
extra_params = {"page": "1"}
use_cache = True
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "survey_groups"
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
response_json = response.json()
yield from response_json.get("survey_groups", [])
| SurveyGroups |
python | getsentry__sentry | src/sentry/issues/endpoints/project_user_issue.py | {
"start": 5568,
"end": 5870
} | class ____(ProjectUserIssueRequestSerializer):
score = serializers.IntegerField(required=True, min_value=0, max_value=100)
vital = serializers.ChoiceField(required=True, choices=["lcp", "fcp", "cls", "inp", "ttfb"])
value = serializers.IntegerField(required=True)
| WebVitalsIssueDataSerializer |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/cli_tests/api_tests/agent_tests/test_business_logic.py | {
"start": 5765,
"end": 9612
} | class ____:
"""Test processing of agent data structures.
This class would test any pure functions in the GraphQL adapter
that process the raw GraphQL responses into our domain models.
Since the actual GraphQL processing is done inline in the adapter
functions, these tests will verify our data model creation.
"""
def test_agent_creation_with_all_statuses(self, snapshot):
"""Test creating agents with all possible status values."""
agents = [
DgApiAgent(
id=f"agent-{status.value.lower()}-uuid",
agent_label=f"Agent {status.value.title()}",
status=status,
last_heartbeat_time=1641046800.0 if status == DgApiAgentStatus.RUNNING else None,
metadata=[
DgApiAgentMetadataEntry(key="status_test", value=status.value),
],
)
for status in DgApiAgentStatus
]
agent_list = DgApiAgentList(items=agents, total=len(agents))
# Test JSON serialization works correctly for all statuses
result = agent_list.model_dump_json(indent=2)
import json
parsed = json.loads(result)
snapshot.assert_match(parsed)
def test_agent_metadata_handling(self):
"""Test agent metadata entry creation and access."""
agent = DgApiAgent(
id="metadata-test-agent",
agent_label="Metadata Test",
status=DgApiAgentStatus.RUNNING,
last_heartbeat_time=1641046800.0,
metadata=[
DgApiAgentMetadataEntry(key="version", value="1.0.0"),
DgApiAgentMetadataEntry(key="environment", value="production"),
DgApiAgentMetadataEntry(key="region", value="us-west-2"),
],
)
assert len(agent.metadata) == 3
assert agent.metadata[0].key == "version"
assert agent.metadata[0].value == "1.0.0"
assert agent.metadata[1].key == "environment"
assert agent.metadata[1].value == "production"
assert agent.metadata[2].key == "region"
assert agent.metadata[2].value == "us-west-2"
def test_agent_list_total_count(self):
"""Test that AgentList properly tracks total count."""
agents = [
DgApiAgent(
id=f"agent-{i}",
agent_label=f"Agent {i}",
status=DgApiAgentStatus.RUNNING,
last_heartbeat_time=None,
metadata=[],
)
for i in range(3)
]
agent_list = DgApiAgentList(
items=agents, total=10
) # Total could be different from items length (pagination)
assert len(agent_list.items) == 3
assert agent_list.total == 10
def test_agent_id_fallback_display(self):
"""Test agent display label fallback behavior."""
# Test with label
agent_with_label = DgApiAgent(
id="very-long-agent-uuid-12345678901234567890",
agent_label="Custom Label",
status=DgApiAgentStatus.RUNNING,
last_heartbeat_time=None,
metadata=[],
)
# Test without label
agent_without_label = DgApiAgent(
id="very-long-agent-uuid-12345678901234567890",
agent_label=None,
status=DgApiAgentStatus.RUNNING,
last_heartbeat_time=None,
metadata=[],
)
# Format both agents and check the label display
result_with_label = format_agent(agent_with_label, as_json=False)
result_without_label = format_agent(agent_without_label, as_json=False)
assert "Label: Custom Label" in result_with_label
assert "Label: Agent very-lon" in result_without_label # Should show first 8 chars
| TestAgentDataProcessing |
python | PyCQA__bandit | tests/unit/formatters/test_custom.py | {
"start": 230,
"end": 2211
} | class ____(testtools.TestCase):
def setUp(self):
super().setUp()
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, "custom")
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.context = {
"filename": self.tmp_fname,
"lineno": 4,
"linerange": [4],
"col_offset": 30,
"end_col_offset": 38,
}
self.check_name = "hardcoded_bind_all_interfaces"
self.issue = issue.Issue(
bandit.MEDIUM,
bandit.MEDIUM,
text="Possible binding to all interfaces.",
)
self.manager.out_file = self.tmp_fname
self.issue.fname = self.context["filename"]
self.issue.lineno = self.context["lineno"]
self.issue.linerange = self.context["linerange"]
self.issue.col_offset = self.context["col_offset"]
self.issue.end_col_offset = self.context["end_col_offset"]
self.issue.test = self.check_name
self.manager.results.append(self.issue)
def test_report(self):
with open(self.tmp_fname, "w") as tmp_file:
custom.report(
self.manager,
tmp_file,
self.issue.severity,
self.issue.confidence,
template="{line},{col},{end_col},{severity},{msg}",
)
with open(self.tmp_fname) as f:
reader = csv.DictReader(
f, ["line", "col", "end_col", "severity", "message"]
)
data = next(reader)
self.assertEqual(str(self.context["lineno"]), data["line"])
self.assertEqual(str(self.context["col_offset"]), data["col"])
self.assertEqual(
str(self.context["end_col_offset"]), data["end_col"]
)
self.assertEqual(self.issue.severity, data["severity"])
self.assertEqual(self.issue.text, data["message"])
| CustomFormatterTests |
python | celery__celery | t/unit/events/test_events.py | {
"start": 6757,
"end": 12395
} | class ____:
def test_process(self):
message = {'type': 'world-war'}
got_event = [False]
def my_handler(event):
got_event[0] = True
connection = Mock()
connection.transport_cls = 'memory'
r = self.app.events.Receiver(
connection,
handlers={'world-war': my_handler},
node_id='celery.tests',
)
r._receive(message, object())
assert got_event[0]
def test_accept_argument(self):
r = self.app.events.Receiver(Mock(), accept={'app/foo'})
assert r.accept == {'app/foo'}
def test_event_queue_prefix__default(self):
r = self.app.events.Receiver(Mock())
assert r.queue.name.startswith('celeryev.')
def test_event_queue_prefix__setting(self):
self.app.conf.event_queue_prefix = 'eventq'
r = self.app.events.Receiver(Mock())
assert r.queue.name.startswith('eventq.')
def test_event_queue_prefix__argument(self):
r = self.app.events.Receiver(Mock(), queue_prefix='fooq')
assert r.queue.name.startswith('fooq.')
def test_event_exchange__default(self):
r = self.app.events.Receiver(Mock())
assert r.exchange.name == 'celeryev'
def test_event_exchange__setting(self):
self.app.conf.event_exchange = 'exchange_ev'
r = self.app.events.Receiver(Mock())
assert r.exchange.name == 'exchange_ev'
def test_catch_all_event(self):
message = {'type': 'world-war'}
got_event = [False]
def my_handler(event):
got_event[0] = True
connection = Mock()
connection.transport_cls = 'memory'
r = self.app.events.Receiver(connection, node_id='celery.tests')
r.handlers['*'] = my_handler
r._receive(message, object())
assert got_event[0]
def test_itercapture(self):
connection = self.app.connection_for_write()
try:
r = self.app.events.Receiver(connection, node_id='celery.tests')
it = r.itercapture(timeout=0.0001, wakeup=False)
with pytest.raises(socket.timeout):
next(it)
with pytest.raises(socket.timeout):
r.capture(timeout=0.00001)
finally:
connection.close()
def test_event_from_message_localize_disabled(self):
r = self.app.events.Receiver(Mock(), node_id='celery.tests')
r.adjust_clock = Mock()
ts_adjust = Mock()
r.event_from_message(
{'type': 'worker-online', 'clock': 313},
localize=False,
adjust_timestamp=ts_adjust,
)
ts_adjust.assert_not_called()
r.adjust_clock.assert_called_with(313)
def test_event_from_message_clock_from_client(self):
r = self.app.events.Receiver(Mock(), node_id='celery.tests')
r.clock.value = 302
r.adjust_clock = Mock()
body = {'type': 'task-sent'}
r.event_from_message(
body, localize=False, adjust_timestamp=Mock(),
)
assert body['clock'] == r.clock.value + CLIENT_CLOCK_SKEW
def test_receive_multi(self):
r = self.app.events.Receiver(Mock(name='connection'))
r.process = Mock(name='process')
efm = r.event_from_message = Mock(name='event_from_message')
def on_efm(*args):
return args
efm.side_effect = on_efm
r._receive([1, 2, 3], Mock())
r.process.assert_has_calls([call(1), call(2), call(3)])
def test_itercapture_limit(self):
connection = self.app.connection_for_write()
channel = connection.channel()
try:
events_received = [0]
def handler(event):
events_received[0] += 1
producer = self.app.events.Dispatcher(
connection, enabled=True, channel=channel,
)
r = self.app.events.Receiver(
connection,
handlers={'*': handler},
node_id='celery.tests',
)
evs = ['ev1', 'ev2', 'ev3', 'ev4', 'ev5']
for ev in evs:
producer.send(ev)
it = r.itercapture(limit=4, wakeup=True)
next(it) # skip consumer (see itercapture)
list(it)
assert events_received[0] == 4
finally:
channel.close()
connection.close()
def test_event_queue_exclusive(self):
self.app.conf.update(
event_queue_exclusive=True,
event_queue_durable=False
)
ev_recv = self.app.events.Receiver(Mock(name='connection'))
q = ev_recv.queue
assert q.exclusive is True
assert q.durable is False
assert q.auto_delete is True
def test_event_queue_durable_and_validation(self):
self.app.conf.update(
event_queue_exclusive=False,
event_queue_durable=True
)
ev_recv = self.app.events.Receiver(Mock(name='connection'))
q = ev_recv.queue
assert q.durable is True
assert q.exclusive is False
assert q.auto_delete is False
self.app.conf.update(
event_queue_exclusive=True,
event_queue_durable=True
)
with pytest.raises(ImproperlyConfigured):
self.app.events.Receiver(Mock(name='connection'))
def test_State(app):
state = app.events.State()
assert dict(state.workers) == {}
def test_default_dispatcher(app):
with app.events.default_dispatcher() as d:
assert d
assert d.connection
| test_EventReceiver |
python | sphinx-doc__sphinx | tests/test_ext_napoleon/test_ext_napoleon.py | {
"start": 3888,
"end": 8261
} | class ____:
def assert_skip(
self,
what: str,
member: str,
obj: object,
expect_default_skip: bool,
config_name: str,
) -> None:
skip = True
app = mock.Mock()
app.config = Config()
setattr(app.config, config_name, True)
if expect_default_skip:
assert None is _skip_member(app, what, member, obj, skip, mock.Mock())
else:
assert _skip_member(app, what, member, obj, skip, mock.Mock()) is False
setattr(app.config, config_name, False)
assert None is _skip_member(app, what, member, obj, skip, mock.Mock())
def test_namedtuple(self) -> None:
# Since python 3.7, namedtuple._asdict() has not been documented
# because there is no way to check the method is a member of the
# namedtuple class. This testcase confirms only it does not
# raise an error on building document
# See: https://github.com/sphinx-doc/sphinx/issues/1455
self.assert_skip(
'class',
'_asdict',
SampleNamedTuple._asdict,
True,
'napoleon_include_private_with_doc',
)
def test_class_private_doc(self) -> None:
self.assert_skip(
'class',
'_private_doc',
SampleClass._private_doc,
False,
'napoleon_include_private_with_doc',
)
def test_class_private_undoc(self) -> None:
self.assert_skip(
'class',
'_private_undoc',
SampleClass._private_undoc,
True,
'napoleon_include_private_with_doc',
)
def test_class_special_doc(self) -> None:
self.assert_skip(
'class',
'__special_doc__',
SampleClass.__special_doc__,
False,
'napoleon_include_special_with_doc',
)
def test_class_special_undoc(self) -> None:
self.assert_skip(
'class',
'__special_undoc__',
SampleClass.__special_undoc__,
True,
'napoleon_include_special_with_doc',
)
def test_class_decorated_doc(self) -> None:
self.assert_skip(
'class',
'__decorated_func__',
SampleClass.__decorated_func__,
False,
'napoleon_include_special_with_doc',
)
def test_exception_private_doc(self) -> None:
self.assert_skip(
'exception',
'_private_doc',
SampleError._private_doc,
False,
'napoleon_include_private_with_doc',
)
def test_exception_private_undoc(self) -> None:
self.assert_skip(
'exception',
'_private_undoc',
SampleError._private_undoc,
True,
'napoleon_include_private_with_doc',
)
def test_exception_special_doc(self) -> None:
self.assert_skip(
'exception',
'__special_doc__',
SampleError.__special_doc__,
False,
'napoleon_include_special_with_doc',
)
def test_exception_special_undoc(self) -> None:
self.assert_skip(
'exception',
'__special_undoc__',
SampleError.__special_undoc__,
True,
'napoleon_include_special_with_doc',
)
def test_module_private_doc(self) -> None:
self.assert_skip(
'module',
'_private_doc',
_private_doc,
False,
'napoleon_include_private_with_doc',
)
def test_module_private_undoc(self) -> None:
self.assert_skip(
'module',
'_private_undoc',
_private_undoc,
True,
'napoleon_include_private_with_doc',
)
def test_module_special_doc(self) -> None:
self.assert_skip(
'module',
'__special_doc__',
__special_doc__,
False,
'napoleon_include_special_with_doc',
)
def test_module_special_undoc(self) -> None:
self.assert_skip(
'module',
'__special_undoc__',
__special_undoc__,
True,
'napoleon_include_special_with_doc',
)
| TestSkipMember |
python | huggingface__transformers | tests/models/encodec/test_feature_extraction_encodec.py | {
"start": 1459,
"end": 3143
} | class ____:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=1,
padding_value=0.0,
sampling_rate=24000,
return_attention_mask=True,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.return_attention_mask = return_attention_mask
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
audio_inputs = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
audio_inputs = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
audio_inputs = [np.asarray(x) for x in audio_inputs]
return audio_inputs
@require_torch
| EnCodecFeatureExtractionTester |
python | fastai__fastai | fastai/vision/core.py | {
"start": 4953,
"end": 5106
} | class ____(PILBase):
"A RGB Pillow `Image` that can show itself and converts to `TensorImage`"
pass
# %% ../../nbs/07_vision.core.ipynb 39
| PILImage |
python | kamyu104__LeetCode-Solutions | Python/maximum-sum-score-of-array.py | {
"start": 517,
"end": 855
} | class ____(object):
def maximumSumScore(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
total = sum(nums)
prefix = 0
result = float("-inf")
for x in nums:
prefix += x
result = max(result, prefix, total-prefix+x)
return result
| Solution2 |
python | astropy__astropy | astropy/cosmology/_src/tests/funcs/test_comparison.py | {
"start": 536,
"end": 3003
} | class ____(ToFromTestMixinBase):
"""Tests for cosmology comparison functions.
This class inherits from
`astropy.cosmology._src.tests.io.base.ToFromTestMixinBase` because the cosmology
comparison functions all have a kwarg ``format`` that allow the arguments to
be converted to a |Cosmology| using the ``to_format`` architecture.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must be
inherited in a subclass.
"""
@pytest.fixture(scope="class")
def cosmo(self):
return Planck18
@pytest.fixture(scope="class")
def cosmo_eqvxflat(self, cosmo):
if isinstance(cosmo, FlatCosmologyMixin):
return cosmo.nonflat
pytest.skip(
"cosmology is not flat, so does not have an equivalent non-flat cosmology."
)
@pytest.fixture(
scope="class",
params=sorted(
{k for k, _ in convert_registry._readers.keys()} - {"astropy.cosmology"}
),
)
def format(self, request):
return request.param
@pytest.fixture(scope="class")
def xfail_cant_autoidentify(self, format):
"""`pytest.fixture` form of method ``can_autoidentify`."""
if not self.can_autodentify(format):
pytest.xfail("cannot autoidentify")
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
return to_format(format)
@pytest.fixture(scope="class")
def pert_cosmo(self, cosmo):
# change one parameter
p, v = next(iter(cosmo.parameters.items()))
return cosmo.clone(
**{p: v * 1.0001 if v != 0 else 0.001 * getattr(v, "unit", 1)}
)
@pytest.fixture(scope="class")
def pert_cosmo_eqvxflat(self, pert_cosmo):
if isinstance(pert_cosmo, FlatCosmologyMixin):
return pert_cosmo.nonflat
pytest.skip(
"cosmology is not flat, so does not have an equivalent non-flat cosmology."
)
@pytest.fixture(scope="class")
def pert_converted(self, pert_cosmo, format):
if format == "astropy.model": # special case Model
return pert_cosmo.to_format(format, method="comoving_distance")
return pert_cosmo.to_format(format)
| ComparisonFunctionTestBase |
python | PrefectHQ__prefect | src/prefect/tasks.py | {
"start": 2625,
"end": 3277
} | class ____(Protocol):
@classmethod
def is_callback_with_parameters(cls, callable: Callable[..., str]) -> TypeIs[Self]:
sig = inspect.signature(callable)
return "parameters" in sig.parameters
def __call__(self, parameters: dict[str, Any]) -> str: ...
StateHookCallable: TypeAlias = Callable[
["Task[..., Any]", TaskRun, State], Union[Awaitable[None], None]
]
RetryConditionCallable: TypeAlias = Callable[
["Task[..., Any]", TaskRun, State], Union[Awaitable[bool], bool]
]
TaskRunNameValueOrCallable: TypeAlias = Union[
Callable[[], str], TaskRunNameCallbackWithParameters, str
]
| TaskRunNameCallbackWithParameters |
python | facebookresearch__faiss | contrib/torch/clustering.py | {
"start": 1438,
"end": 1676
} | class ____(DatasetAssign):
def __init__(self, res, x):
DatasetAssign.__init__(self, x)
self.res = res
def perform_search(self, centroids):
return faiss.knn_gpu(self.res, self.x, centroids, 1)
| DatasetAssignGPU |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/layout.py | {
"start": 445,
"end": 3835
} | class ____(LayoutOperatorBase):
"""Operator for tensor.view() operation."""
def __init__(self):
"""Initialize ViewOperator."""
super().__init__("view")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.Tensor.view"
def can_produce(self, output_spec: Spec) -> bool:
"""ViewOperator can produce tensor outputs but not scalars due to element count constraints."""
if not isinstance(output_spec, TensorSpec):
return False
# Don't produce scalars since we can't guarantee input has exactly 1 element
return len(output_spec.size) > 0
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input spec for view operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ViewOperator can only produce TensorSpec outputs")
# Calculate total number of elements in output
output_numel = 1
for dim in output_spec.size:
output_numel *= dim
# Generate a compatible input shape with exactly the same number of elements
input_size = fuzz_tensor_size()
# Always ensure exact element count match
if output_numel == 0:
# For zero-sized output, create zero-sized input
input_size = tuple(list(input_size)[:-1] + [0])
else:
# Calculate input shape that gives exactly output_numel elements
# Try to use the fuzzed shape structure but adjust to match element count
if len(input_size) > 1:
# Keep all dims except last, adjust last to make total = output_numel
prefix_numel = 1
for dim in input_size[:-1]:
prefix_numel *= dim
if prefix_numel > 0 and output_numel % prefix_numel == 0:
last_dim = output_numel // prefix_numel
input_size = tuple(list(input_size)[:-1] + [last_dim])
else:
# Fallback: create a simple shape with exact element count
input_size = (output_numel,)
else:
# For single-dim input, just use the exact element count
input_size = (output_numel,)
# Create input tensor spec with contiguous stride for view compatibility
# .view() requires compatible memory layout, so use contiguous stride
input_stride = tuple()
if input_size:
# Calculate contiguous stride
stride = [1]
for i in range(len(input_size) - 1, 0, -1):
stride.insert(0, stride[0] * input_size[i])
input_stride = tuple(stride)
return [
TensorSpec(size=input_size, stride=input_stride, dtype=output_spec.dtype)
]
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for view operation."""
if not isinstance(output_spec, TensorSpec):
raise ValueError("ViewOperator can only produce TensorSpec outputs")
shape_str = str(list(output_spec.size))
# Ensure tensor is contiguous before view to avoid stride compatibility issues
return f"{output_name} = {input_names[0]}.contiguous().view({shape_str})"
| ViewOperator |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/components/workspace_component/component.py | {
"start": 1869,
"end": 2657
} | class ____(pydantic.BaseModel):
by_id: Sequence[str] = pydantic.Field(
...,
description="A list of connector IDs to include in the collection.",
)
def resolve_connector_selector(
context: dg.ResolutionContext, model
) -> Optional[Callable[[FivetranConnector], bool]]:
if isinstance(model, str):
model = context.resolve_value(model)
if isinstance(model, FivetranConnectorSelectorByName):
return lambda connector: connector.name in model.by_name
elif isinstance(model, FivetranConnectorSelectorById):
return lambda connector: connector.id in model.by_id
else:
check.failed(f"Unknown connector target type: {type(model)}")
@public
@dg.scaffold_with(FivetranAccountComponentScaffolder)
| FivetranConnectorSelectorById |
python | tensorflow__tensorflow | tensorflow/python/framework/composite_tensor_test.py | {
"start": 3143,
"end": 3232
} | class ____(CT):
_type_spec_class = CTSpec
@test_util.run_all_in_graph_and_eager_modes
| CT3 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/aiomysql.py | {
"start": 3549,
"end": 5614
} | class ____(AsyncAdapt_dbapi_module):
def __init__(self, aiomysql: ModuleType, pymysql: ModuleType):
super().__init__(aiomysql, dbapi_module=pymysql)
self.aiomysql = aiomysql
self.pymysql = pymysql
self.paramstyle = "format"
self._init_dbapi_attributes()
self.Cursor, self.SSCursor = self._init_cursors_subclasses()
def _init_dbapi_attributes(self) -> None:
for name in (
"Warning",
"Error",
"InterfaceError",
"DataError",
"DatabaseError",
"OperationalError",
"InterfaceError",
"IntegrityError",
"ProgrammingError",
"InternalError",
"NotSupportedError",
):
setattr(self, name, getattr(self.aiomysql, name))
for name in (
"NUMBER",
"STRING",
"DATETIME",
"BINARY",
"TIMESTAMP",
"Binary",
):
setattr(self, name, getattr(self.pymysql, name))
def connect(self, *arg: Any, **kw: Any) -> AsyncAdapt_aiomysql_connection:
creator_fn = kw.pop("async_creator_fn", self.aiomysql.connect)
return await_(
AsyncAdapt_aiomysql_connection.create(
self,
creator_fn(*arg, **kw),
)
)
def _init_cursors_subclasses(
self,
) -> tuple[AsyncIODBAPICursor, AsyncIODBAPICursor]:
# suppress unconditional warning emitted by aiomysql
class Cursor(self.aiomysql.Cursor): # type: ignore[misc, name-defined]
async def _show_warnings(
self, conn: AsyncIODBAPIConnection
) -> None:
pass
class SSCursor(self.aiomysql.SSCursor): # type: ignore[misc, name-defined] # noqa: E501
async def _show_warnings(
self, conn: AsyncIODBAPIConnection
) -> None:
pass
return Cursor, SSCursor # type: ignore[return-value]
| AsyncAdapt_aiomysql_dbapi |
python | mlflow__mlflow | mlflow/tracing/display/display_handler.py | {
"start": 2968,
"end": 6275
} | class ____:
_instance = None
disabled = False
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = IPythonTraceDisplayHandler()
return cls._instance
@classmethod
def disable(cls):
cls.disabled = True
@classmethod
def enable(cls):
cls.disabled = False
if cls._instance is None:
cls._instance = IPythonTraceDisplayHandler()
def __init__(self):
self.traces_to_display = {}
if not _is_jupyter():
return
try:
from IPython import get_ipython
# Register a post-run cell display hook to display traces
# after the cell has executed. We don't validate that the
# user is using a tracking server at this step, because
# the user might set it later using mlflow.set_tracking_uri()
get_ipython().events.register("post_run_cell", self._display_traces_post_run)
except Exception:
# swallow exceptions. this function is called as
# a side-effect in a few other functions (e.g. log_trace,
# get_traces, search_traces), and we don't want to block
# the core functionality if the display fails.
_logger.debug("Failed to register post-run cell display hook", exc_info=True)
def _display_traces_post_run(self, result):
if self.disabled or not is_trace_ui_available():
self.traces_to_display = {}
return
try:
from IPython.display import display
MAX_TRACES_TO_DISPLAY = MLFLOW_MAX_TRACES_TO_DISPLAY_IN_NOTEBOOK.get()
traces_to_display = list(self.traces_to_display.values())[:MAX_TRACES_TO_DISPLAY]
if len(traces_to_display) == 0:
self.traces_to_display = {}
return
display(self.get_mimebundle(traces_to_display), raw=True)
# reset state
self.traces_to_display = {}
except Exception:
# swallow exceptions. this function is called as
# a side-effect in a few other functions (e.g. log_trace,
# get_traces, search_traces), and we don't want to block
# the core functionality if the display fails.
_logger.debug("Failed to display traces", exc_info=True)
self.traces_to_display = {}
def get_mimebundle(self, traces: list["Trace"]):
if len(traces) == 1:
return traces[0]._repr_mimebundle_()
else:
bundle = {"text/plain": repr(traces)}
if is_in_databricks_runtime():
bundle["application/databricks.mlflow.trace"] = _serialize_trace_list(traces)
else:
bundle["text/html"] = get_notebook_iframe_html(traces)
return bundle
def display_traces(self, traces: list["Trace"]):
if self.disabled or not is_trace_ui_available():
return
try:
if len(traces) == 0:
return
traces_dict = {trace.info.request_id: trace for trace in traces}
self.traces_to_display.update(traces_dict)
except Exception:
_logger.debug("Failed to update traces", exc_info=True)
| IPythonTraceDisplayHandler |
python | huggingface__transformers | examples/pytorch/instance-segmentation/run_instance_segmentation.py | {
"start": 1941,
"end": 5872
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
"""
model_name_or_path: str = field(
default="facebook/mask2former-swin-tiny-coco-instance",
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
)
dataset_name: str = field(
default="qubvel-hf/ade20k-mini",
metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
image_height: Optional[int] = field(default=512, metadata={"help": "Image height after resizing."})
image_width: Optional[int] = field(default=512, metadata={"help": "Image width after resizing."})
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
do_reduce_labels: bool = field(
default=False,
metadata={
"help": (
"If background class is labeled as 0 and you want to remove it from the labels, set this flag to True."
)
},
)
def augment_and_transform_batch(
examples: Mapping[str, Any], transform: A.Compose, image_processor: AutoImageProcessor
) -> BatchFeature:
batch = {
"pixel_values": [],
"mask_labels": [],
"class_labels": [],
}
for pil_image, pil_annotation in zip(examples["image"], examples["annotation"]):
image = np.array(pil_image)
semantic_and_instance_masks = np.array(pil_annotation)[..., :2]
# Apply augmentations
output = transform(image=image, mask=semantic_and_instance_masks)
aug_image = output["image"]
aug_semantic_and_instance_masks = output["mask"]
aug_instance_mask = aug_semantic_and_instance_masks[..., 1]
# Create mapping from instance id to semantic id
unique_semantic_id_instance_id_pairs = np.unique(aug_semantic_and_instance_masks.reshape(-1, 2), axis=0)
instance_id_to_semantic_id = {
instance_id: semantic_id for semantic_id, instance_id in unique_semantic_id_instance_id_pairs
}
# Apply the image processor transformations: resizing, rescaling, normalization
model_inputs = image_processor(
images=[aug_image],
segmentation_maps=[aug_instance_mask],
instance_id_to_semantic_id=instance_id_to_semantic_id,
return_tensors="pt",
)
batch["pixel_values"].append(model_inputs.pixel_values[0])
batch["mask_labels"].append(model_inputs.mask_labels[0])
batch["class_labels"].append(model_inputs.class_labels[0])
return batch
def collate_fn(examples):
batch = {}
batch["pixel_values"] = torch.stack([example["pixel_values"] for example in examples])
batch["class_labels"] = [example["class_labels"] for example in examples]
batch["mask_labels"] = [example["mask_labels"] for example in examples]
if "pixel_mask" in examples[0]:
batch["pixel_mask"] = torch.stack([example["pixel_mask"] for example in examples])
return batch
@dataclass
| Arguments |
python | ray-project__ray | python/ray/data/tests/test_auto_parallelism.py | {
"start": 301,
"end": 5880
} | class ____:
avail_cpus: int
target_max_block_size: int
data_size: int
expected_parallelism: int
MiB = 1024 * 1024
GiB = 1024 * MiB
TEST_CASES = [
TestCase(
avail_cpus=4,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=1024,
expected_parallelism=8, # avail_cpus has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=10 * MiB,
expected_parallelism=10, # MIN_BLOCK_SIZE has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=20 * MiB,
expected_parallelism=20, # MIN_BLOCK_SIZE has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=100 * MiB,
expected_parallelism=100, # MIN_BLOCK_SIZE has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=1 * GiB,
expected_parallelism=200, # MIN_PARALLELISM has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=10 * GiB,
expected_parallelism=200, # MIN_PARALLELISM has precedence
),
TestCase(
avail_cpus=150,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=10 * GiB,
expected_parallelism=300, # avail_cpus has precedence
),
TestCase(
avail_cpus=400,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=10 * GiB,
expected_parallelism=800, # avail_cpus has precedence
),
TestCase(
avail_cpus=400,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=1 * MiB,
expected_parallelism=800, # avail_cpus has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=1000 * GiB,
expected_parallelism=8000, # MAX_BLOCK_SIZE has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=DataContext.get_current().target_max_block_size,
data_size=10000 * GiB,
expected_parallelism=80000, # MAX_BLOCK_SIZE has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=512 * MiB,
data_size=1000 * GiB,
expected_parallelism=2000, # passed max_block_size has precedence
),
TestCase(
avail_cpus=4,
target_max_block_size=512 * MiB,
data_size=10000 * GiB,
expected_parallelism=20000, # passed max_block_size has precedence
),
]
@pytest.mark.parametrize(
"avail_cpus,target_max_block_size,data_size,expected",
[astuple(test) for test in TEST_CASES],
)
def test_autodetect_parallelism(
shutdown_only, avail_cpus, target_max_block_size, data_size, expected
):
class MockReader:
def estimate_inmemory_data_size(self):
return data_size
result, _, _ = _autodetect_parallelism(
parallelism=-1,
target_max_block_size=target_max_block_size,
ctx=DataContext.get_current(),
datasource_or_legacy_reader=MockReader(),
avail_cpus=avail_cpus,
)
assert result == expected, (result, expected)
def test_auto_parallelism_basic(shutdown_only):
ray.init(num_cpus=8)
context = DataContext.get_current()
context.read_op_min_num_blocks = 1
# Datasource bound.
ds = ray.data.range_tensor(5, shape=(100,), override_num_blocks=-1)
assert ds._plan.initial_num_blocks() == 5, ds
# CPU bound. TODO(ekl) we should fix range datasource to respect parallelism more
# properly, currently it can go a little over.
ds = ray.data.range_tensor(10000, shape=(100,), override_num_blocks=-1)
assert ds._plan.initial_num_blocks() == 16, ds
# Block size bound.
ds = ray.data.range_tensor(100000000, shape=(100,), override_num_blocks=-1)
assert ds._plan.initial_num_blocks() >= 590, ds
assert ds._plan.initial_num_blocks() <= 600, ds
def test_auto_parallelism_placement_group(shutdown_only):
ray.init(num_cpus=16, num_gpus=8)
@ray.remote
def run():
context = DataContext.get_current()
context.min_parallelism = 1
ds = ray.data.range_tensor(2000, shape=(100,), override_num_blocks=-1)
return ds._plan.initial_num_blocks()
# 1/16 * 4 * 16 = 4
pg = ray.util.placement_group([{"CPU": 1}])
num_blocks = ray.get(
run.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
assert num_blocks == 4, num_blocks
# 2/16 * 4 * 16 = 8
pg = ray.util.placement_group([{"CPU": 2}])
num_blocks = ray.get(
run.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
assert num_blocks == 8, num_blocks
# 1/8 * 4 * 16 = 8
pg = ray.util.placement_group([{"CPU": 1, "GPU": 1}])
num_blocks = ray.get(
run.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
)
assert num_blocks == 8, num_blocks
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| TestCase |
python | huggingface__transformers | src/transformers/models/wavlm/modeling_wavlm.py | {
"start": 12200,
"end": 13866
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):
super().__init__()
self.attention = WavLMAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
num_buckets=config.num_buckets,
max_distance=config.max_bucket_distance,
has_relative_position_bias=has_relative_position_bias,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = WavLMFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0):
attn_residual = hidden_states
hidden_states, attn_weights, position_bias = self.attention(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
output_attentions=output_attentions,
index=index,
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states, position_bias)
if output_attentions:
outputs += (attn_weights,)
return outputs
| WavLMEncoderLayer |
python | uqfoundation__dill | dill/session.py | {
"start": 10711,
"end": 23541
} | class ____:
"""lightweight stream wrapper that implements peek()"""
def __init__(self, stream):
self.stream = stream
def read(self, n):
return self.stream.read(n)
def readline(self):
return self.stream.readline()
def tell(self):
return self.stream.tell()
def close(self):
return self.stream.close()
def peek(self, n):
stream = self.stream
try:
if hasattr(stream, 'flush'): stream.flush()
position = stream.tell()
stream.seek(position) # assert seek() works before reading
chunk = stream.read(n)
stream.seek(position)
return chunk
except (AttributeError, OSError):
raise NotImplementedError("stream is not peekable: %r", stream) from None
def _make_peekable(stream):
"""return stream as an object with a peek() method"""
import io
if hasattr(stream, 'peek'):
return stream
if not (hasattr(stream, 'tell') and hasattr(stream, 'seek')):
try:
return io.BufferedReader(stream)
except Exception:
pass
return _PeekableReader(stream)
def _identify_module(file, main=None):
"""identify the name of the module stored in the given file-type object"""
from pickletools import genops
UNICODE = {'UNICODE', 'BINUNICODE', 'SHORT_BINUNICODE'}
found_import = False
try:
for opcode, arg, pos in genops(file.peek(256)):
if not found_import:
if opcode.name in ('GLOBAL', 'SHORT_BINUNICODE') and \
arg.endswith('_import_module'):
found_import = True
else:
if opcode.name in UNICODE:
return arg
else:
raise UnpicklingError("reached STOP without finding main module")
except (NotImplementedError, ValueError) as error:
# ValueError occours when the end of the chunk is reached (without a STOP).
if isinstance(error, NotImplementedError) and main is not None:
# file is not peekable, but we have main.
return None
raise UnpicklingError("unable to identify main module") from error
def load_module(
filename: Union[str, os.PathLike] = None,
module: Optional[Union[ModuleType, str]] = None,
**kwds
) -> Optional[ModuleType]:
"""Update the selected module (default is :py:mod:`__main__`) with
the state saved at ``filename``.
Restore a module to the state saved with :py:func:`dump_module`. The
saved module can be :py:mod:`__main__` (e.g. an interpreter session),
an imported module, or a module-type object (e.g. created with
:py:class:`~types.ModuleType`).
When restoring the state of a non-importable module-type object, the
current instance of this module may be passed as the argument ``main``.
Otherwise, a new instance is created with :py:class:`~types.ModuleType`
and returned.
Args:
filename: a path-like object or a readable stream. If `None`
(the default), read from a named file in a temporary directory.
module: a module object or the name of an importable module;
the module name and kind (i.e. imported or non-imported) must
match the name and kind of the module stored at ``filename``.
**kwds: extra keyword arguments passed to :py:class:`Unpickler()`.
Raises:
:py:exc:`UnpicklingError`: if unpickling fails.
:py:exc:`ValueError`: if the argument ``main`` and module saved
at ``filename`` are incompatible.
Returns:
A module object, if the saved module is not :py:mod:`__main__` or
a module instance wasn't provided with the argument ``main``.
Examples:
- Save the state of some modules:
>>> import dill
>>> squared = lambda x: x*x
>>> dill.dump_module() # save state of __main__ to /tmp/session.pkl
>>>
>>> import pox # an imported module
>>> pox.plus_one = lambda x: x+1
>>> dill.dump_module('pox_session.pkl', module=pox)
>>>
>>> from types import ModuleType
>>> foo = ModuleType('foo') # a module-type object
>>> foo.values = [1,2,3]
>>> import math
>>> foo.sin = math.sin
>>> dill.dump_module('foo_session.pkl', module=foo, refimported=True)
- Restore the state of the interpreter:
>>> import dill
>>> dill.load_module() # updates __main__ from /tmp/session.pkl
>>> squared(2)
4
- Load the saved state of an importable module:
>>> import dill
>>> pox = dill.load_module('pox_session.pkl')
>>> pox.plus_one(1)
2
>>> import sys
>>> pox in sys.modules.values()
True
- Load the saved state of a non-importable module-type object:
>>> import dill
>>> foo = dill.load_module('foo_session.pkl')
>>> [foo.sin(x) for x in foo.values]
[0.8414709848078965, 0.9092974268256817, 0.1411200080598672]
>>> import math
>>> foo.sin is math.sin # foo.sin was saved by reference
True
>>> import sys
>>> foo in sys.modules.values()
False
- Update the state of a non-importable module-type object:
>>> import dill
>>> from types import ModuleType
>>> foo = ModuleType('foo')
>>> foo.values = ['a','b']
>>> foo.sin = lambda x: x*x
>>> dill.load_module('foo_session.pkl', module=foo)
>>> [foo.sin(x) for x in foo.values]
[0.8414709848078965, 0.9092974268256817, 0.1411200080598672]
*Changed in version 0.3.6:* Function ``load_session()`` was renamed to
``load_module()``. Parameter ``main`` was renamed to ``module``.
See also:
:py:func:`load_module_asdict` to load the contents of module saved
with :py:func:`dump_module` into a dictionary.
"""
if 'main' in kwds:
warnings.warn(
"The argument 'main' has been renamed 'module'.",
PendingDeprecationWarning
)
if module is not None:
raise TypeError("both 'module' and 'main' arguments were used")
module = kwds.pop('main')
main = module
if hasattr(filename, 'read'):
file = filename
else:
if filename is None:
filename = str(TEMPDIR/'session.pkl')
file = open(filename, 'rb')
try:
file = _make_peekable(file)
#FIXME: dill.settings are disabled
unpickler = Unpickler(file, **kwds)
unpickler._session = True
# Resolve unpickler._main
pickle_main = _identify_module(file, main)
if main is None and pickle_main is not None:
main = pickle_main
if isinstance(main, str):
if main.startswith('__runtime__.'):
# Create runtime module to load the session into.
main = ModuleType(main.partition('.')[-1])
else:
main = _import_module(main)
if main is not None:
if not isinstance(main, ModuleType):
raise TypeError("%r is not a module" % main)
unpickler._main = main
else:
main = unpickler._main
# Check against the pickle's main.
is_main_imported = _is_imported_module(main)
if pickle_main is not None:
is_runtime_mod = pickle_main.startswith('__runtime__.')
if is_runtime_mod:
pickle_main = pickle_main.partition('.')[-1]
error_msg = "can't update{} module{} %r with the saved state of{} module{} %r"
if is_runtime_mod and is_main_imported:
raise ValueError(
error_msg.format(" imported", "", "", "-type object")
% (main.__name__, pickle_main)
)
if not is_runtime_mod and not is_main_imported:
raise ValueError(
error_msg.format("", "-type object", " imported", "")
% (pickle_main, main.__name__)
)
if main.__name__ != pickle_main:
raise ValueError(error_msg.format("", "", "", "") % (main.__name__, pickle_main))
# This is for find_class() to be able to locate it.
if not is_main_imported:
runtime_main = '__runtime__.%s' % main.__name__
sys.modules[runtime_main] = main
loaded = unpickler.load()
finally:
if not hasattr(filename, 'read'): # if newly opened file
file.close()
try:
del sys.modules[runtime_main]
except (KeyError, NameError):
pass
assert loaded is main
_restore_modules(unpickler, main)
if main is _main_module or main is module:
return None
else:
return main
# Backward compatibility.
def load_session(filename=None, main=None, **kwds):
warnings.warn("load_session() has been renamed load_module().", PendingDeprecationWarning)
load_module(filename, module=main, **kwds)
load_session.__doc__ = load_module.__doc__
def load_module_asdict(
filename: Union[str, os.PathLike] = None,
update: bool = False,
**kwds
) -> dict:
"""
Load the contents of a saved module into a dictionary.
``load_module_asdict()`` is the near-equivalent of::
lambda filename: vars(dill.load_module(filename)).copy()
however, does not alter the original module. Also, the path of
the loaded module is stored in the ``__session__`` attribute.
Args:
filename: a path-like object or a readable stream. If `None`
(the default), read from a named file in a temporary directory.
update: if `True`, initialize the dictionary with the current state
of the module prior to loading the state stored at filename.
**kwds: extra keyword arguments passed to :py:class:`Unpickler()`
Raises:
:py:exc:`UnpicklingError`: if unpickling fails
Returns:
A copy of the restored module's dictionary.
Note:
If ``update`` is True, the corresponding module may first be imported
into the current namespace before the saved state is loaded from
filename to the dictionary. Note that any module that is imported into
the current namespace as a side-effect of using ``update`` will not be
modified by loading the saved module in filename to a dictionary.
Example:
>>> import dill
>>> alist = [1, 2, 3]
>>> anum = 42
>>> dill.dump_module()
>>> anum = 0
>>> new_var = 'spam'
>>> main = dill.load_module_asdict()
>>> main['__name__'], main['__session__']
('__main__', '/tmp/session.pkl')
>>> main is globals() # loaded objects don't reference globals
False
>>> main['alist'] == alist
True
>>> main['alist'] is alist # was saved by value
False
>>> main['anum'] == anum # changed after the session was saved
False
>>> new_var in main # would be True if the option 'update' was set
False
"""
if 'module' in kwds:
raise TypeError("'module' is an invalid keyword argument for load_module_asdict()")
if hasattr(filename, 'read'):
file = filename
else:
if filename is None:
filename = str(TEMPDIR/'session.pkl')
file = open(filename, 'rb')
try:
file = _make_peekable(file)
main_name = _identify_module(file)
old_main = sys.modules.get(main_name)
main = ModuleType(main_name)
if update:
if old_main is None:
old_main = _import_module(main_name)
main.__dict__.update(old_main.__dict__)
else:
main.__builtins__ = __builtin__
sys.modules[main_name] = main
load_module(file, **kwds)
finally:
if not hasattr(filename, 'read'): # if newly opened file
file.close()
try:
if old_main is None:
del sys.modules[main_name]
else:
sys.modules[main_name] = old_main
except NameError: # failed before setting old_main
pass
main.__session__ = str(filename)
return main.__dict__
# Internal exports for backward compatibility with dill v0.3.5.1
# Can't be placed in dill._dill because of circular import problems.
for name in (
'_lookup_module', '_module_map', '_restore_modules', '_stash_modules',
'dump_session', 'load_session' # backward compatibility functions
):
setattr(_dill, name, globals()[name])
del name
| _PeekableReader |
python | getsentry__sentry | src/sentry/core/endpoints/organization_request_project_creation.py | {
"start": 552,
"end": 712
} | class ____(CamelSnakeSerializer):
target_user_email = serializers.EmailField(required=True)
@region_silo_endpoint
| OrganizationRequestProjectCreationSerializer |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_wrapper_gpu.py | {
"start": 36612,
"end": 36719
} | class ____:
"""Marker that we need to call .item() on the tensor"""
dtype: torch_dtype
| UnwrapUnspecArg |
python | walkccc__LeetCode | solutions/1708. Largest Subarray Length K/1708.py | {
"start": 0,
"end": 170
} | class ____:
def largestSubarray(self, nums: list[int], k: int) -> list[int]:
mx = max(nums[:len(nums) - k + 1])
i = nums.index(mx)
return nums[i:i + k]
| Solution |
python | pandas-dev__pandas | pandas/tests/groupby/test_numeric_only.py | {
"start": 297,
"end": 15298
} | class ____:
# make sure that we are passing thru kwargs to our agg functions
@pytest.fixture
def df(self):
# GH3668
# GH5724
df = DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": Series(["a", "b", "c"], dtype="str"),
"object": Series(["a", "b", "c"], dtype=object),
"category_string": Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": date_range("20130101", periods=3),
"datetimetz": date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"object",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
return df
@pytest.mark.parametrize("method", ["mean", "median"])
def test_averages(self, df, method):
# mean / median
expected_columns_numeric = Index(["int", "float", "category_int"])
gb = df.groupby("group")
expected = DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
Timestamp("2013-01-01 12:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=[
"int",
"float",
"category_int",
],
)
result = getattr(gb, method)(numeric_only=True)
tm.assert_frame_equal(result.reindex_like(expected), expected)
expected_columns = expected.columns
self._check(df, method, expected_columns, expected_columns_numeric)
@pytest.mark.parametrize("method", ["min", "max"])
def test_extrema(self, df, method):
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
expected_columns_numeric = expected_columns
self._check(df, method, expected_columns, expected_columns_numeric)
@pytest.mark.parametrize("method", ["first", "last"])
def test_first_last(self, df, method):
expected_columns = Index(
[
"int",
"float",
"string",
"object",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
expected_columns_numeric = expected_columns
self._check(df, method, expected_columns, expected_columns_numeric)
@pytest.mark.parametrize("method", ["sum", "cumsum"])
def test_sum_cumsum(self, df, method):
expected_columns_numeric = Index(["int", "float", "category_int"])
expected_columns = Index(
["int", "float", "string", "category_int", "timedelta"]
)
if method == "cumsum":
# cumsum loses string
expected_columns = Index(["int", "float", "category_int", "timedelta"])
self._check(df, method, expected_columns, expected_columns_numeric)
@pytest.mark.parametrize("method", ["prod", "cumprod"])
def test_prod_cumprod(self, df, method):
expected_columns = Index(["int", "float", "category_int"])
expected_columns_numeric = expected_columns
self._check(df, method, expected_columns, expected_columns_numeric)
@pytest.mark.parametrize("method", ["cummin", "cummax"])
def test_cummin_cummax(self, df, method):
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
# GH#15561: numeric_only=False set by default like min/max
expected_columns_numeric = expected_columns
self._check(df, method, expected_columns, expected_columns_numeric)
def _check(self, df, method, expected_columns, expected_columns_numeric):
gb = df.groupby("group")
# object dtypes for transformations are not implemented in Cython and
# have no Python fallback
exception = (
(NotImplementedError, TypeError) if method.startswith("cum") else TypeError
)
if method in ("min", "max", "cummin", "cummax", "cumsum", "cumprod"):
# The methods default to numeric_only=False and raise TypeError
msg = "|".join(
[
"Categorical is not ordered",
f"Cannot perform {method} with non-ordered Categorical",
re.escape(f"agg function failed [how->{method},dtype->object]"),
# cumsum/cummin/cummax/cumprod
"function is not implemented for this dtype",
f"dtype 'str' does not support operation '{method}'",
]
)
with pytest.raises(exception, match=msg):
getattr(gb, method)()
elif method in ("sum", "mean", "median", "prod"):
msg = "|".join(
[
"category type does not support sum operations",
re.escape(f"agg function failed [how->{method},dtype->object]"),
re.escape(f"agg function failed [how->{method},dtype->string]"),
f"dtype 'str' does not support operation '{method}'",
]
)
with pytest.raises(exception, match=msg):
getattr(gb, method)()
else:
result = getattr(gb, method)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
if method not in ("first", "last"):
msg = "|".join(
[
"Categorical is not ordered",
"category type does not support",
"function is not implemented for this dtype",
f"Cannot perform {method} with non-ordered Categorical",
re.escape(f"agg function failed [how->{method},dtype->object]"),
re.escape(f"agg function failed [how->{method},dtype->string]"),
f"dtype 'str' does not support operation '{method}'",
]
)
with pytest.raises(exception, match=msg):
getattr(gb, method)(numeric_only=False)
else:
result = getattr(gb, method)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
@pytest.mark.parametrize(
"kernel, has_arg",
[
("all", False),
("any", False),
("bfill", False),
("corr", True),
("corrwith", True),
("cov", True),
("cummax", True),
("cummin", True),
("cumprod", True),
("cumsum", True),
("diff", False),
("ffill", False),
("first", True),
("idxmax", True),
("idxmin", True),
("last", True),
("max", True),
("mean", True),
("median", True),
("min", True),
("nth", False),
("nunique", False),
("pct_change", False),
("prod", True),
("quantile", True),
("sem", True),
("skew", True),
("kurt", True),
("std", True),
("sum", True),
("var", True),
],
)
@pytest.mark.parametrize("numeric_only", [True, False, lib.no_default])
@pytest.mark.parametrize("keys", [["a1"], ["a1", "a2"]])
def test_numeric_only(kernel, has_arg, numeric_only, keys):
# GH#46072
# drops_nuisance: Whether the op drops nuisance columns even when numeric_only=False
# has_arg: Whether the op has a numeric_only arg
df = DataFrame({"a1": [1, 1], "a2": [2, 2], "a3": [5, 6], "b": 2 * [object]})
args = get_groupby_method_args(kernel, df)
kwargs = {} if numeric_only is lib.no_default else {"numeric_only": numeric_only}
gb = df.groupby(keys)
method = getattr(gb, kernel)
if has_arg and numeric_only is True:
# Cases where b does not appear in the result
if kernel == "corrwith":
warn = Pandas4Warning
msg = "DataFrameGroupBy.corrwith is deprecated"
else:
warn = None
msg = ""
with tm.assert_produces_warning(warn, match=msg):
result = method(*args, **kwargs)
assert "b" not in result.columns
elif (
# kernels that work on any dtype and have numeric_only arg
kernel in ("first", "last")
or (
# kernels that work on any dtype and don't have numeric_only arg
kernel in ("any", "all", "bfill", "ffill", "nth", "nunique")
and numeric_only is lib.no_default
)
):
result = method(*args, **kwargs)
assert "b" in result.columns
elif has_arg:
assert numeric_only is not True
# kernels that are successful on any dtype were above; this will fail
# object dtypes for transformations are not implemented in Cython and
# have no Python fallback
exception = NotImplementedError if kernel.startswith("cum") else TypeError
msg = "|".join(
[
"not allowed for this dtype",
"cannot be performed against 'object' dtypes",
"must be a string or a real number",
"unsupported operand type",
"function is not implemented for this dtype",
re.escape(f"agg function failed [how->{kernel},dtype->object]"),
]
)
if kernel == "quantile":
msg = "dtype 'object' does not support operation 'quantile'"
elif kernel == "idxmin":
msg = "'<' not supported between instances of 'type' and 'type'"
elif kernel == "idxmax":
msg = "'>' not supported between instances of 'type' and 'type'"
with pytest.raises(exception, match=msg):
if kernel == "corrwith":
warn = Pandas4Warning
msg = "DataFrameGroupBy.corrwith is deprecated"
else:
warn = None
msg = ""
with tm.assert_produces_warning(warn, match=msg):
method(*args, **kwargs)
elif not has_arg and numeric_only is not lib.no_default:
with pytest.raises(
TypeError, match="got an unexpected keyword argument 'numeric_only'"
):
method(*args, **kwargs)
else:
assert kernel in ("diff", "pct_change")
assert numeric_only is lib.no_default
# Doesn't have numeric_only argument and fails on nuisance columns
with pytest.raises(TypeError, match=r"unsupported operand type"):
method(*args, **kwargs)
@pytest.mark.parametrize("dtype", [bool, int, float, object])
def test_deprecate_numeric_only_series(dtype, groupby_func, request):
# GH#46560
grouper = [0, 0, 1]
ser = Series([1, 0, 0], dtype=dtype)
gb = ser.groupby(grouper)
if groupby_func == "corrwith":
# corrwith is not implemented on SeriesGroupBy
assert not hasattr(gb, groupby_func)
return
method = getattr(gb, groupby_func)
expected_ser = Series([1, 0, 0])
expected_gb = expected_ser.groupby(grouper)
expected_method = getattr(expected_gb, groupby_func)
args = get_groupby_method_args(groupby_func, ser)
fails_on_numeric_object = (
"corr",
"cov",
"cummax",
"cummin",
"cumprod",
"cumsum",
"quantile",
)
# ops that give an object result on object input
obj_result = (
"first",
"last",
"nth",
"bfill",
"ffill",
"shift",
"sum",
"diff",
"pct_change",
"var",
"mean",
"median",
"min",
"max",
"prod",
"skew",
"kurt",
)
# Test default behavior; kernels that fail may be enabled in the future but kernels
# that succeed should not be allowed to fail (without deprecation, at least)
if groupby_func in fails_on_numeric_object and dtype is object:
if groupby_func == "quantile":
msg = "dtype 'object' does not support operation 'quantile'"
else:
msg = "is not supported for object dtype"
with pytest.raises(TypeError, match=msg):
method(*args)
elif dtype is object:
result = method(*args)
expected = expected_method(*args)
if groupby_func in obj_result:
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
has_numeric_only = (
"first",
"last",
"max",
"mean",
"median",
"min",
"prod",
"quantile",
"sem",
"skew",
"kurt",
"std",
"sum",
"var",
"cummax",
"cummin",
"cumprod",
"cumsum",
)
if groupby_func not in has_numeric_only:
msg = "got an unexpected keyword argument 'numeric_only'"
with pytest.raises(TypeError, match=msg):
method(*args, numeric_only=True)
elif dtype is object:
msg = "|".join(
[
"SeriesGroupBy.sem called with numeric_only=True and dtype object",
"Series.skew does not allow numeric_only=True with non-numeric",
"cum(sum|prod|min|max) is not supported for object dtype",
r"Cannot use numeric_only=True with SeriesGroupBy\..* and non-numeric",
]
)
with pytest.raises(TypeError, match=msg):
method(*args, numeric_only=True)
elif dtype == bool and groupby_func == "quantile":
msg = "Cannot use quantile with bool dtype"
with pytest.raises(TypeError, match=msg):
# GH#51424
method(*args, numeric_only=False)
else:
result = method(*args, numeric_only=True)
expected = method(*args, numeric_only=False)
tm.assert_series_equal(result, expected)
| TestNumericOnly |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_connections.py | {
"start": 1660,
"end": 4205
} | class ____:
def test_connection_get_from_db(self, client, session):
connection = Connection(
conn_id="test_conn",
conn_type="http",
description="description",
host="localhost",
login="root",
password="admin",
schema="http",
port=8080,
extra='{"x_secret": "testsecret", "y_secret": "test"}',
)
session.add(connection)
session.commit()
response = client.get("/execution/connections/test_conn")
assert response.status_code == 200
assert response.json() == {
"conn_id": "test_conn",
"conn_type": "http",
"host": "localhost",
"login": "root",
"password": "admin",
"schema": "http",
"port": 8080,
"extra": '{"x_secret": "testsecret", "y_secret": "test"}',
}
# Remove connection
session.delete(connection)
session.commit()
@mock.patch.dict(
"os.environ",
{"AIRFLOW_CONN_TEST_CONN2": '{"uri": "http://root:admin@localhost:8080/https?headers=header"}'},
)
def test_connection_get_from_env_var(self, client, session):
response = client.get("/execution/connections/test_conn2")
assert response.status_code == 200
assert response.json() == {
"conn_id": "test_conn2",
"conn_type": "http",
"host": "localhost",
"login": "root",
"password": "admin",
"schema": "https",
"port": 8080,
"extra": '{"headers": "header"}',
}
def test_connection_get_not_found(self, client):
response = client.get("/execution/connections/non_existent_test_conn")
assert response.status_code == 404
assert response.json() == {
"detail": {
"message": "Connection with ID non_existent_test_conn not found",
"reason": "not_found",
}
}
@pytest.mark.usefixtures("access_denied")
def test_connection_get_access_denied(self, client):
response = client.get("/execution/connections/test_conn")
# Assert response status code and detail for access denied
assert response.status_code == 403
assert response.json() == {
"detail": {
"reason": "access_denied",
"message": "Task does not have access to connection test_conn",
}
}
| TestGetConnection |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_alloy_db.py | {
"start": 51474,
"end": 56208
} | class ____:
def setup_method(self):
self.operator = AlloyDBDeleteInstanceOperator(
task_id=TEST_TASK_ID,
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
etag=TEST_ETAG,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
gcp_conn_id=TEST_GCP_CONN_ID,
request_id=TEST_REQUEST_ID,
validate_request=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_init(self):
assert self.operator.cluster_id == TEST_CLUSTER_ID
assert self.operator.instance_id == TEST_INSTANCE_ID
assert self.operator.etag == TEST_ETAG
def test_template_fields(self):
expected_template_fields = {"cluster_id", "instance_id", "etag"} | set(
AlloyDBWriteBaseOperator.template_fields
)
assert set(AlloyDBDeleteInstanceOperator.template_fields) == expected_template_fields
@mock.patch(DELETE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(DELETE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute(self, mock_hook, mock_log, mock_get_operation_result):
mock_delete_instance = mock_hook.return_value.delete_instance
mock_operation = mock_delete_instance.return_value
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
mock_delete_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
etag=TEST_ETAG,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_operation_result.assert_called_once_with(mock_operation)
assert result is None
mock_log.info.assert_has_calls(
[
call("Deleting an AlloyDB instance."),
call("AlloyDB instance %s was successfully removed.", TEST_INSTANCE_ID),
]
)
@mock.patch(DELETE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(DELETE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_validate_request(self, mock_hook, mock_log, mock_get_operation_result):
mock_delete_instance = mock_hook.return_value.delete_instance
mock_operation = mock_delete_instance.return_value
mock_context = mock.MagicMock()
self.operator.validate_request = True
result = self.operator.execute(context=mock_context)
mock_delete_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
etag=TEST_ETAG,
request_id=TEST_REQUEST_ID,
validate_only=True,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_operation_result.assert_called_once_with(mock_operation)
assert result is None
mock_log.info.assert_called_once_with("Validating a Delete AlloyDB instance request.")
@mock.patch(DELETE_INSTANCE_OPERATOR_PATH.format("get_operation_result"))
@mock.patch(DELETE_INSTANCE_OPERATOR_PATH.format("log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_exception(self, mock_hook, mock_log, mock_get_operation_result):
mock_delete_instance = mock_hook.return_value.delete_instance
mock_delete_instance.side_effect = Exception
mock_context = mock.MagicMock()
with pytest.raises(AirflowException):
_ = self.operator.execute(context=mock_context)
mock_delete_instance.assert_called_once_with(
instance_id=TEST_INSTANCE_ID,
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
etag=TEST_ETAG,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert not mock_get_operation_result.called
mock_log.info.assert_called_once_with("Deleting an AlloyDB instance.")
| TestAlloyDBDeleteInstanceOperator |
python | apache__airflow | providers/google/tests/unit/google/ads/hooks/test_ads.py | {
"start": 2738,
"end": 5687
} | class ____:
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_get_customer_service(self, mock_client, mock_hook):
mock_hook._get_customer_service()
client = mock_client.load_from_dict
client.assert_called_once_with(mock_hook.google_ads_config)
client.return_value.get_service.assert_called_once_with("CustomerService", version=API_VERSION)
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_get_service(self, mock_client, mock_hook):
mock_hook._get_service()
client = mock_client.load_from_dict
client.assert_called_once_with(mock_hook.google_ads_config)
client.return_value.get_service.assert_called_once_with("GoogleAdsService", version=API_VERSION)
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_search(self, mock_client, mock_hook):
service = mock_client.load_from_dict.return_value.get_service.return_value
mock_client.load_from_dict.return_value.get_type.side_effect = [PropertyMock(), PropertyMock()]
client_ids = ["1", "2"]
rows = ["row1", "row2"]
service.search.side_effects = rows
# Here we mock _extract_rows to assert calls and
# avoid additional __iter__ calls
mock_hook._extract_rows = list
query = "QUERY"
mock_hook.search(client_ids=client_ids, query=query)
for i, client_id in enumerate(client_ids):
name, args, kwargs = service.search.mock_calls[i]
assert kwargs["request"]["customer_id"] == client_id
assert kwargs["request"]["query"] == query
assert "page_size" not in kwargs["request"]
def test_extract_rows(self, mock_hook):
iterators = [[1, 2, 3], [4, 5, 6]]
assert mock_hook._extract_rows(iterators) == sum(iterators, [])
@mock.patch("airflow.providers.google.ads.hooks.ads.GoogleAdsClient")
def test_list_accessible_customers(self, mock_client, mock_hook):
accounts = ["a", "b", "c"]
service = mock_client.load_from_dict.return_value.get_service.return_value
service.list_accessible_customers.return_value = mock.MagicMock(resource_names=accounts)
result = mock_hook.list_accessible_customers()
service.list_accessible_customers.assert_called_once_with()
assert accounts == result
def test_determine_authentication_method(self, mock_hook_for_authentication_method):
mock_hook, expected_method = mock_hook_for_authentication_method
mock_hook._get_config()
if isinstance(expected_method, type) and issubclass(expected_method, Exception):
with pytest.raises(expected_method):
mock_hook._determine_authentication_method()
else:
mock_hook._determine_authentication_method()
assert mock_hook.authentication_method == expected_method
| TestGoogleAdsHook |
python | pytorch__pytorch | torch/_inductor/runtime/autotune_cache.py | {
"start": 2855,
"end": 3496
} | class ____(CacheArtifact):
@override
def populate_cache(self) -> None:
autotune_cache = _LocalAutotuneCacheBackend()
key = os.path.join(cache_dir(), self.key)
autotune_cache._put(key, self.content)
@override
@staticmethod
def type() -> str:
return "autotune"
@override
@staticmethod
def encode(content: JsonDataTy) -> bytes:
assert not isinstance(content, bytes)
serde = RemoteCacheJsonSerde()
content_bytes = serde.encode(content)
assert isinstance(content_bytes, bytes)
return content_bytes
@dataclasses.dataclass
| AutotuneCacheArtifact |
python | ansible__ansible | test/lib/ansible_test/_internal/ci/__init__.py | {
"start": 580,
"end": 1857
} | class ____:
"""Authentication helper."""
NAMESPACE: t.ClassVar = 'ci@core.ansible.com'
def __init__(self, key_file: pathlib.Path) -> None:
self.private_key_file = pathlib.Path(str(key_file).removesuffix('.pub'))
self.public_key_file = pathlib.Path(f'{self.private_key_file}.pub')
def sign_request(self, request: dict[str, object], context: AuthContext) -> None:
"""Sign the given auth request using the provided context."""
request.update(
stage=context.stage,
provider=context.provider,
request_id=context.request_id,
timestamp=datetime.datetime.now(tz=datetime.timezone.utc).replace(microsecond=0).isoformat(),
)
with tempfile.TemporaryDirectory() as temp_dir:
payload_path = pathlib.Path(temp_dir) / 'auth.json'
payload_path.write_text(json.dumps(request, sort_keys=True))
cmd = ['ssh-keygen', '-q', '-Y', 'sign', '-f', str(self.private_key_file), '-n', self.NAMESPACE, str(payload_path)]
raw_command(cmd, capture=False, interactive=True)
signature_path = pathlib.Path(f'{payload_path}.sig')
signature = signature_path.read_text()
request.update(signature=signature)
| AuthHelper |
python | joke2k__faker | faker/providers/credit_card/pt_PT/__init__.py | {
"start": 122,
"end": 5682
} | class ____(CreditCardProvider):
"""Implementation of ``pt_PT`` locale credit card
For all methods that take ``card_type`` as an argument a random card type
will be used if the supplied value is ``None``. The list of valid card types
includes ``'visa'``, ``'mastercard'`` and ``'maestro'``.
Source: https://bincheck.org/portugal
"""
prefix_visa = [
"400131",
"400190",
"400817",
"402192",
"402947",
"402956",
"403005",
"403006",
"403007",
"403008",
"403271",
"404520",
"404530",
"405758",
"406170",
"406475",
"407548",
"407549",
"407575",
"408237",
"408239",
"409842",
"409843",
"410000",
"410344",
"410345",
"410553",
"410557",
"411635",
"411700",
"411701",
"411869",
"412487",
"412488",
"412489",
"412657",
"412782",
"412990",
"413014",
"413793",
"413871",
"415158",
"415159",
"415170",
"415171",
"415174",
"415175",
"415194",
"415195",
"415238",
"415272",
"415273",
"415403",
"415404",
"415405",
"415440",
"415441",
"415569",
"415920",
"415961",
"416952",
"416963",
"416970",
"417005",
"417091",
"417092",
"417337",
"418847",
"419022",
"419682",
"419683",
"419684",
"421149",
"421510",
"422080",
"422240",
"422241",
"422414",
"422417",
"422597",
"422869",
"423392",
"423393",
"424118",
"424184",
"424208",
"424661",
"425509",
"425510",
"425906",
"426150",
"426360",
"426370",
"427256",
"427304",
"427729",
"427770",
"427867",
"428139",
"428184",
"428185",
"428186",
"428187",
"429711",
"430240",
"430241",
"431926",
"433390",
"433391",
"433511",
"433512",
"433513",
"433599",
"433618",
"433622",
"433966",
"437886",
"438257",
"439070",
"440637",
"440644",
"440645",
"442664",
"443977",
"443978",
"444224",
"444227",
"445961",
"445962",
"446140",
"446144",
"449389",
"450915",
"451156",
"451166",
"454755",
"455250",
"455290",
"455292",
"455658",
"456811",
"456812",
"457031",
"458058",
"458059",
"459432",
"459433",
"459449",
"460340",
"460341",
"460342",
"461247",
"461248",
"461249",
"462731",
"462732",
"464406",
"465964",
"476066",
"476067",
"476068",
"476069",
"476070",
"476071",
"476329",
"477920",
"477921",
"477922",
"477947",
"477989",
"478062",
"478063",
"479702",
"479736",
"483088",
"485672",
"486449",
"486457",
"489434",
"489485",
"490772",
"490830",
"490831",
"490832",
"490841",
"490863",
"491213",
"491546",
"491547",
"491613",
"492194",
"493402",
"493480",
"493800",
"493801",
"493830",
"498800",
"499968",
"499969",
"499986",
"422239",
"422041",
"464409",
"464408",
]
prefix_mastercard = [
"510122",
"510123",
"512556",
"518772",
"519744",
"519774",
"520342",
"524552",
"524878",
"525625",
"525808",
"526819",
"527014",
"528024",
"529119",
"530267",
"530770",
"532355",
"536468",
"541171",
"541557",
"542081",
"542098",
"542858",
"543099",
"543116",
"543123",
"544051",
"544052",
"544233",
"547260",
"547459",
"548168",
"548169",
"552727",
"552755",
"553057",
"554506",
"554517",
"554518",
"556660",
"557836",
"557882",
"557883",
"557888",
]
prefix_maestro = [
"501654",
"501659",
"670530",
"670811",
"670812",
"676938",
"676938",
"677393",
"677707",
"670835",
"670817",
]
credit_card_types = OrderedDict(
(
(
"maestro",
CreditCard("Maestro", prefix_maestro, 16, security_code="CVV2"),
),
(
"mastercard",
CreditCard("Mastercard", prefix_mastercard, 16, security_code="CVV2"),
),
("visa", CreditCard("Visa", prefix_visa, 16, security_code="CVV2")),
)
)
| Provider |
python | django-haystack__django-haystack | test_haystack/test_fields.py | {
"start": 15205,
"end": 16609
} | class ____(TestCase):
def test_init(self):
try:
foo = MultiValueField(model_attr="foo")
except:
self.fail()
self.assertRaises(SearchFieldError, MultiValueField, use_template=True)
def test_prepare(self):
mock = MockModel()
mock.sites = ["3", "4", "5"]
sites = MultiValueField(model_attr="sites")
self.assertEqual(sites.prepare(mock), ["3", "4", "5"])
# Simulate default=[1].
mock = MockModel()
default = MultiValueField(default=[1])
self.assertEqual(default.prepare(mock), [1])
# Simulate null=True.
mock = MockModel()
multy_none = MultiValueField(null=True)
self.assertEqual(multy_none.prepare(mock), None)
def test_convert_with_single_string(self):
field = MultiValueField()
self.assertEqual(["String"], field.convert("String"))
def test_convert_with_single_int(self):
field = MultiValueField()
self.assertEqual([1], field.convert(1))
def test_convert_with_list_of_strings(self):
field = MultiValueField()
self.assertEqual(
["String 1", "String 2"], field.convert(["String 1", "String 2"])
)
def test_convert_with_list_of_ints(self):
field = MultiValueField()
self.assertEqual([1, 2, 3], field.convert([1, 2, 3]))
| MultiValueFieldTestCase |
python | pennersr__django-allauth | allauth/headless/base/views.py | {
"start": 418,
"end": 1005
} | class ____(RESTView):
client = None
@classonlymethod
def as_api_view(cls, **initkwargs):
view_func = cls.as_view(**initkwargs)
if initkwargs["client"] == Client.APP:
view_func = decorators.app_view(view_func)
else:
view_func = decorators.browser_view(view_func)
return view_func
def dispatch(self, request, *args, **kwargs):
try:
return super().dispatch(request, *args, **kwargs)
except ReauthenticationRequired:
return response.ReauthenticationResponse(self.request)
| APIView |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/conjecture/test_shrinker.py | {
"start": 18477,
"end": 25949
} | class ____(ShrinkerPass):
"""
A shrinker that really doesn't do anything at all. This is mostly a covering
test for the shrinker interface methods.
"""
def run_step(self):
return
def test_silly_shrinker_subclass():
assert BadShrinker.shrink(10, lambda _: True) == 10
numeric_nodes = nodes(choice_types=["integer", "float"])
@given(numeric_nodes, numeric_nodes, st.integers() | st.floats(allow_nan=False))
@example(
ChoiceNode(
type="float",
value=float(MAX_PRECISE_INTEGER - 1),
constraints=float_constr(),
was_forced=False,
),
ChoiceNode(
type="float",
value=float(MAX_PRECISE_INTEGER - 1),
constraints=float_constr(),
was_forced=False,
),
0,
)
@settings(suppress_health_check=[HealthCheck.filter_too_much])
def test_redistribute_numeric_pairs(node1, node2, stop):
assume(node1.value + node2.value > stop)
# don't test extreme shrink_towards values, which lead to this test flaking
# from floating point errors
assume(abs(node1.constraints.get("shrink_towards", 0)) <= 1e10)
assume(abs(node2.constraints.get("shrink_towards", 0)) <= 1e10)
# avoid exhausting the tree while generating, which causes @shrinking_from's
# runner to raise
assume(
compute_max_children(node1.type, node1.constraints)
+ compute_max_children(node2.type, node2.constraints)
> 2
)
@shrinking_from([node1.value, node2.value])
def shrinker(data: ConjectureData):
v1 = getattr(data, f"draw_{node1.type}")(**node1.constraints)
v2 = getattr(data, f"draw_{node2.type}")(**node2.constraints)
if v1 + v2 > stop:
data.mark_interesting(interesting_origin())
shrinker.fixate_shrink_passes([ShrinkPass(shrinker.redistribute_numeric_pairs)])
assert len(shrinker.choices) == 2
shrink_towards = (
node1.constraints["shrink_towards"] if node1.type == "integer" else 0
)
# we should always have brought the first choice closer to shrink_towards,
# or left the choices the same. And the values should sum to where they started.
assert abs(shrinker.choices[0] - shrink_towards) <= abs(
node1.value - shrink_towards
)
assert (
# pytest.approx for differences in floating-point summations
pytest.approx(shrinker.choices[0] + shrinker.choices[1], rel=0.001)
== node1.value + node2.value
)
@pytest.mark.parametrize(
"start, expected",
[
(("1" * 5, "1" * 5), ("0" * 5, "0" * 5)),
(("1222344", "1222344"), ("0" * 7, "0" * 7)),
],
)
@pytest.mark.parametrize("gap", [0, 1, 2, 3])
def test_lower_duplicated_characters_across_choices(start, expected, gap):
# the draws from `gap` are irrelevant and only test that we can still shrink
# duplicated characters from nearby choices even when the choices are not
# consecutive.
@shrinking_from([start[0], *([0] * gap), start[1]])
def shrinker(data: ConjectureData):
s1 = data.draw(st.text())
for _ in range(gap):
data.draw(st.integers())
s2 = data.draw(st.text())
if s1 == s2:
data.mark_interesting(interesting_origin())
shrinker.fixate_shrink_passes([ShrinkPass(shrinker.lower_duplicated_characters)])
assert shrinker.choices == (expected[0],) + (0,) * gap + (expected[1],)
def test_shrinking_one_of_with_same_shape():
# This is a covering test for our one_of shrinking logic for the case when
# the choice sequence *doesn't* change shape in the newly chosen one_of branch.
@shrinking_from([1, 0])
def shrinker(data: ConjectureData):
n = data.draw_integer(0, 1)
data.draw_integer()
if n == 1:
data.mark_interesting(interesting_origin())
shrinker.initial_coarse_reduction()
assert shrinker.choices == (1, 0)
@pytest.mark.parametrize("invert", [False, True]) # cover the negative case
@pytest.mark.parametrize(
"min_value, max_value", [(None, None), (None, 15), (-15, None), (-15, 15)]
)
@pytest.mark.parametrize(
"shrink_towards, start",
[
# straddles shrink_towards
(5, (2, 10)),
# both below shrink_towards
(5, (2, 4)),
# both above shrink_towards
(5, (8, 10)),
# exactly shrink_towards
(5, (5, 5)),
],
)
def test_redistribute_numeric_pairs_shrink_towards_explicit_integer(
min_value, max_value, shrink_towards, start, invert
):
if invert:
shrink_towards = -shrink_towards
start = (-start[1], -start[0])
# redistributing should redistribute towards shrink_towards, not 0
target = start[0] + start[1]
@shrinking_from(start)
def shrinker(data: ConjectureData):
v1 = data.draw_integer(
shrink_towards=shrink_towards, min_value=min_value, max_value=max_value
)
v2 = data.draw_integer(
shrink_towards=shrink_towards, min_value=min_value, max_value=max_value
)
if v1 + v2 == target:
data.mark_interesting(interesting_origin())
shrinker.fixate_shrink_passes([ShrinkPass(shrinker.redistribute_numeric_pairs)])
assert shrinker.choices == (shrink_towards, target - shrink_towards)
@pytest.mark.parametrize("invert", [False, True])
@pytest.mark.parametrize(
"start",
[
(2.0, 10.0),
(2.0, 4.0),
(8.0, 10.0),
(5.0, 5.0),
],
)
def test_redistribute_numeric_pairs_shrink_towards_explicit_float(start, invert):
if invert:
start = (-start[1], -start[0])
target = start[0] + start[1]
@shrinking_from(start)
def shrinker(data: ConjectureData):
v1 = data.draw_float()
v2 = data.draw_float()
if v1 + v2 == target:
data.mark_interesting(interesting_origin())
shrinker.fixate_shrink_passes([ShrinkPass(shrinker.redistribute_numeric_pairs)])
assert shrinker.choices == (0, target)
@pytest.mark.parametrize(
"shrink_towards, start",
[
(5, (2, 10.0)),
(5, (2, 4.0)),
(5, (8, 10.0)),
(5, (5, 5.0)),
],
)
def test_redistribute_numeric_pairs_shrink_towards_explicit_combined(
shrink_towards, start
):
# test case for one integer and one float draw. No `invert` parametrization
# because it moderately complicates things
target = start[0] + start[1]
@shrinking_from(start)
def shrinker(data: ConjectureData):
v1 = data.draw_integer(shrink_towards=shrink_towards)
v2 = data.draw_float()
if v1 + v2 == target:
data.mark_interesting(interesting_origin())
shrinker.fixate_shrink_passes([ShrinkPass(shrinker.redistribute_numeric_pairs)])
assert shrinker.choices == (shrink_towards, target - shrink_towards)
@given(st.data(), st.integers(), st.integers())
def test_redistribute_numeric_pairs_shrink_towards_integer(
data, target, shrink_towards
):
start = data.draw(st.integers(max_value=target))
end = target - start
@shrinking_from([start, end])
def shrinker(data: ConjectureData):
v1 = data.draw_integer(shrink_towards=shrink_towards)
v2 = data.draw_integer(shrink_towards=shrink_towards)
if v1 + v2 == target:
data.mark_interesting(interesting_origin())
shrinker.fixate_shrink_passes([ShrinkPass(shrinker.redistribute_numeric_pairs)])
assert shrinker.choices == (shrink_towards, target - shrink_towards)
| BadShrinker |
python | ray-project__ray | python/ray/data/tests/test_namespace_expressions.py | {
"start": 12185,
"end": 20181
} | class ____:
"""Tests for struct namespace operations."""
def test_struct_field(self, dataset_format):
"""Test struct.field() extracts field."""
# Arrow table with explicit struct types
arrow_table = pa.table(
{
"user": pa.array(
[
{"name": "Alice", "age": 30},
{"name": "Bob", "age": 25},
],
type=pa.struct(
[
pa.field("name", pa.string()),
pa.field("age", pa.int32()),
]
),
)
}
)
# Items representation
items_data = [
{"user": {"name": "Alice", "age": 30}},
{"user": {"name": "Bob", "age": 25}},
]
ds = _create_dataset(items_data, dataset_format, arrow_table)
result = ds.with_column("age", col("user").struct.field("age")).to_pandas()
expected = pd.DataFrame(
{
"user": [{"name": "Alice", "age": 30}, {"name": "Bob", "age": 25}],
"age": [30, 25],
}
)
assert rows_same(result, expected)
def test_struct_bracket(self, dataset_format):
"""Test struct['field'] bracket notation."""
arrow_table = pa.table(
{
"user": pa.array(
[
{"name": "Alice", "age": 30},
{"name": "Bob", "age": 25},
],
type=pa.struct(
[
pa.field("name", pa.string()),
pa.field("age", pa.int32()),
]
),
)
}
)
items_data = [
{"user": {"name": "Alice", "age": 30}},
{"user": {"name": "Bob", "age": 25}},
]
ds = _create_dataset(items_data, dataset_format, arrow_table)
result = ds.with_column("name", col("user").struct["name"]).to_pandas()
expected = pd.DataFrame(
{
"user": [{"name": "Alice", "age": 30}, {"name": "Bob", "age": 25}],
"name": ["Alice", "Bob"],
}
)
assert rows_same(result, expected)
def test_struct_nested_field(self, dataset_format):
"""Test nested struct field access with .field()."""
arrow_table = pa.table(
{
"user": pa.array(
[
{"name": "Alice", "address": {"city": "NYC", "zip": "10001"}},
{"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
],
type=pa.struct(
[
pa.field("name", pa.string()),
pa.field(
"address",
pa.struct(
[
pa.field("city", pa.string()),
pa.field("zip", pa.string()),
]
),
),
]
),
)
}
)
items_data = [
{"user": {"name": "Alice", "address": {"city": "NYC", "zip": "10001"}}},
{
"user": {"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
},
]
ds = _create_dataset(items_data, dataset_format, arrow_table)
result = ds.with_column(
"city", col("user").struct.field("address").struct.field("city")
).to_pandas()
expected = pd.DataFrame(
{
"user": [
{"name": "Alice", "address": {"city": "NYC", "zip": "10001"}},
{"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
],
"city": ["NYC", "LA"],
}
)
assert rows_same(result, expected)
def test_struct_nested_bracket(self, dataset_format):
"""Test nested struct field access with brackets."""
arrow_table = pa.table(
{
"user": pa.array(
[
{"name": "Alice", "address": {"city": "NYC", "zip": "10001"}},
{"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
],
type=pa.struct(
[
pa.field("name", pa.string()),
pa.field(
"address",
pa.struct(
[
pa.field("city", pa.string()),
pa.field("zip", pa.string()),
]
),
),
]
),
)
}
)
items_data = [
{"user": {"name": "Alice", "address": {"city": "NYC", "zip": "10001"}}},
{
"user": {"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
},
]
ds = _create_dataset(items_data, dataset_format, arrow_table)
result = ds.with_column(
"zip", col("user").struct["address"].struct["zip"]
).to_pandas()
expected = pd.DataFrame(
{
"user": [
{"name": "Alice", "address": {"city": "NYC", "zip": "10001"}},
{"name": "Bob", "address": {"city": "LA", "zip": "90001"}},
],
"zip": ["10001", "90001"],
}
)
assert rows_same(result, expected)
# ──────────────────────────────────────
# Datetime Namespace Tests
# ──────────────────────────────────────
def test_datetime_namespace_all_operations(ray_start_regular):
"""Test all datetime namespace operations on a datetime column."""
ts = datetime.datetime(2024, 1, 2, 10, 30, 0)
ds = ray.data.from_items([{"ts": ts}])
result_ds = ds.select(
[
col("ts").dt.year().alias("year"),
col("ts").dt.month().alias("month"),
col("ts").dt.day().alias("day"),
col("ts").dt.hour().alias("hour"),
col("ts").dt.minute().alias("minute"),
col("ts").dt.second().alias("second"),
col("ts").dt.strftime("%Y-%m-%d").alias("date_str"),
col("ts").dt.floor("day").alias("ts_floor"),
col("ts").dt.ceil("day").alias("ts_ceil"),
col("ts").dt.round("day").alias("ts_round"),
]
)
actual = result_ds.to_pandas()
expected = pd.DataFrame(
[
{
"year": 2024,
"month": 1,
"day": 2,
"hour": 10,
"minute": 30,
"second": 0,
"date_str": "2024-01-02",
"ts_floor": datetime.datetime(2024, 1, 2, 0, 0, 0),
"ts_ceil": datetime.datetime(2024, 1, 3, 0, 0, 0),
"ts_round": datetime.datetime(2024, 1, 3, 0, 0, 0),
}
]
)
assert rows_same(actual, expected)
def test_dt_namespace_invalid_dtype_raises(ray_start_regular):
"""Test that dt namespace on non-datetime column raises an error."""
ds = ray.data.from_items([{"value": 1}])
with pytest.raises(Exception):
ds.select(col("value").dt.year()).to_pandas()
# ──────────────────────────────────────
# Integration Tests
# ──────────────────────────────────────
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
| TestStructNamespace |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sql_datasource.py | {
"start": 37000,
"end": 44910
} | class ____(_SQLAsset):
"""A class representing a table from a SQL database
Args:
table_name: The name of the database table to be added
schema_name: The name of the schema containing the database table to be added.
"""
# Instance fields
type: Literal["table"] = "table"
# TODO: quoted_name or str
table_name: str = pydantic.Field(
"",
description="Name of the SQL table. Will default to the value of `name` if not provided.",
)
schema_name: Optional[str] = None
_quote_character: Optional[str] = None
@property
def qualified_name(self) -> str:
return f"{self.schema_name}.{self.table_name}" if self.schema_name else self.table_name
@pydantic.validator("table_name", pre=True, always=True)
def _default_table_name(cls, table_name: str, values: dict, **kwargs) -> str:
if not (validated_table_name := table_name or values.get("name")):
raise ValueError( # noqa: TRY003 # FIXME CoP
"table_name cannot be empty and should default to name if not provided"
)
return validated_table_name
@pydantic.validator("table_name")
def _resolve_quoted_name(cls, table_name: str, values: Dict[str, Any]) -> str:
# We reimport sqlalchemy from our compatability layer because we make
# quoted_name a top level import there.
from great_expectations.compatibility import sqlalchemy
if sqlalchemy.quoted_name: # type: ignore[truthy-function] # FIXME CoP
if isinstance(table_name, sqlalchemy.quoted_name):
return table_name
quote: bool = cls._is_bracketed_by_quotes(table_name)
if quote:
# https://docs.sqlalchemy.org/en/20/core/sqlelement.html#sqlalchemy.sql.expression.quoted_name.quote
# Remove the quotes and add them back using the sqlalchemy.quoted_name function
# TODO: We need to handle nested quotes
values["_quote_character"] = table_name[0]
quote = True
table_name = table_name.lstrip("".join(DEFAULT_INITIAL_QUOTE_CHARACTERS)).rstrip(
"".join(DEFAULT_FINAL_QUOTE_CHARACTERS.values())
)
return sqlalchemy.quoted_name(
value=table_name,
quote=quote,
)
return table_name
@override
def dict(self, **kwargs) -> Dict[str, Any]:
original_dict = super().dict(**kwargs)
# we need to ensure we retain the quotes when serializing quoted names
qc = self._quote_character
if qc is not None:
original_dict["table_name"] = (
f"{qc}{self.table_name}{DEFAULT_FINAL_QUOTE_CHARACTERS[qc]}"
)
return original_dict
@override
def test_connection(self) -> None:
"""Test the connection for the TableAsset.
Raises:
TestConnectionError: If the connection test fails.
"""
datasource: SQLDatasource = self.datasource
engine: sqlalchemy.Engine = datasource.get_engine()
inspector: sqlalchemy.Inspector = sa.inspect(engine)
if self.schema_name and self.schema_name not in inspector.get_schema_names():
raise TestConnectionError( # noqa: TRY003 # FIXME CoP
f'Attempt to connect to table: "{self.qualified_name}" failed because the schema '
f'"{self.schema_name}" does not exist.'
)
try:
with engine.connect() as connection:
table = sa.table(self.table_name, schema=self.schema_name)
# don't need to fetch any data, just want to make sure the table is accessible
connection.execute(sa.select(1, table).limit(1))
except Exception as query_error:
LOGGER.info(f"{self.name} `.test_connection()` query failed: {query_error!r}")
raise TestConnectionError( # noqa: TRY003 # FIXME CoP
f"Attempt to connect to table: {self.qualified_name} failed because the test query "
f"failed. Ensure the table exists and the user has access to select data from the table: {query_error}" # noqa: E501 # FIXME CoP
) from query_error
@override
def as_selectable(self) -> sqlalchemy.Selectable:
"""Returns the table as a sqlalchemy Selectable.
This can be used in a from clause for a query against this data.
"""
return sa.table(self.table_name, schema=self.schema_name)
@override
def _create_batch_spec_kwargs(self) -> Dict[str, Any]:
return {
"type": "table",
"data_asset_name": self.name,
"table_name": self.table_name,
"schema_name": self.schema_name,
"batch_identifiers": {},
}
@override
def _create_batch_spec(self, batch_spec_kwargs: Dict) -> SqlAlchemyDatasourceBatchSpec:
return SqlAlchemyDatasourceBatchSpec(**batch_spec_kwargs)
@staticmethod
def _is_bracketed_by_quotes(target: str) -> bool:
"""
Returns True if the target string is bracketed by quotes.
Override this method if the quote characters are different than `'` or `"` in the
target database, such as backticks in Databricks SQL.
Arguments:
target: A string to check if it is bracketed by quotes.
Returns:
True if the target string is bracketed by quotes.
"""
return any(
target.startswith(quote) and target.endswith(DEFAULT_FINAL_QUOTE_CHARACTERS[quote])
for quote in DEFAULT_INITIAL_QUOTE_CHARACTERS
)
@classmethod
def _to_lower_if_not_bracketed_by_quotes(cls, target: str) -> str:
"""Returns the target string in lowercase if it is not bracketed by quotes.
This is used to ensure case-insensitivity in sqlalchemy queries.
Arguments:
target: A string to convert to lowercase if it is not bracketed by quotes.
Returns:
The target string in lowercase if it is not bracketed by quotes.
"""
return to_lower_if_not_quoted(target, quote_characters=DEFAULT_INITIAL_QUOTE_CHARACTERS)
def _warn_for_more_specific_datasource_type(connection_string: str) -> None:
"""
Warns if a more specific datasource type may be more appropriate based on the connection string connector prefix.
""" # noqa: E501 # FIXME CoP
from great_expectations.datasource.fluent.sources import DataSourceManager
connector: str = connection_string.split("://")[0].split("+")[0]
type_lookup_plus: Dict[str, str] = {
n: DataSourceManager.type_lookup[n].__name__
for n in DataSourceManager.type_lookup.type_names()
}
# type names are not always exact match to connector strings
type_lookup_plus.update(
{
"postgresql": type_lookup_plus["postgres"],
"databricks": type_lookup_plus["databricks_sql"],
}
)
more_specific_datasource: str | None = type_lookup_plus.get(connector)
if more_specific_datasource:
warnings.warn(
f"You are using a generic SQLDatasource but a more specific {more_specific_datasource} "
"may be more appropriate"
" https://docs.greatexpectations.io/docs/guides/connecting_to_your_data/fluent/database/connect_sql_source_data",
category=GxDatasourceWarning,
)
# This improves our error messages by providing a more specific type for pydantic to validate against # noqa: E501 # FIXME CoP
# It also ensure the generated jsonschema has a oneOf instead of anyOf field for assets
# https://docs.pydantic.dev/1.10/usage/types/#discriminated-unions-aka-tagged-unions
AssetTypes = Annotated[Union[TableAsset, QueryAsset], Field(discriminator="type")]
@public_api
| TableAsset |
python | eventlet__eventlet | eventlet/hubs/hub.py | {
"start": 3094,
"end": 17604
} | class ____:
""" Base hub class for easing the implementation of subclasses that are
specific to a particular underlying event architecture. """
SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
READ = READ
WRITE = WRITE
def __init__(self, clock=None):
self.listeners = {READ: {}, WRITE: {}}
self.secondaries = {READ: {}, WRITE: {}}
self.closed = []
if clock is None:
clock = monotonic
self.clock = clock
self.greenlet = greenlet.greenlet(self.run)
self.stopping = False
self.running = False
self.timers = []
self.next_timers = []
self.lclass = FdListener
self.timers_canceled = 0
self.debug_exceptions = True
self.debug_blocking = False
self.debug_blocking_resolution = 1
def block_detect_pre(self):
# shortest alarm we can possibly raise is one second
tmp = signal.signal(signal.SIGALRM, alarm_handler)
if tmp != alarm_handler:
self._old_signal_handler = tmp
arm_alarm(self.debug_blocking_resolution)
def block_detect_post(self):
if (hasattr(self, "_old_signal_handler") and
self._old_signal_handler):
signal.signal(signal.SIGALRM, self._old_signal_handler)
signal.alarm(0)
def add(self, evtype, fileno, cb, tb, mark_as_closed):
""" Signals an intent to or write a particular file descriptor.
The *evtype* argument is either the constant READ or WRITE.
The *fileno* argument is the file number of the file of interest.
The *cb* argument is the callback which will be called when the file
is ready for reading/writing.
The *tb* argument is the throwback used to signal (into the greenlet)
that the file was closed.
The *mark_as_closed* is used in the context of the event hub to
prepare a Python object as being closed, pre-empting further
close operations from accidentally shutting down the wrong OS thread.
"""
listener = self.lclass(evtype, fileno, cb, tb, mark_as_closed)
bucket = self.listeners[evtype]
if fileno in bucket:
if g_prevent_multiple_readers:
raise RuntimeError(
"Second simultaneous %s on fileno %s "
"detected. Unless you really know what you're doing, "
"make sure that only one greenthread can %s any "
"particular socket. Consider using a pools.Pool. "
"If you do know what you're doing and want to disable "
"this error, call "
"eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; "
"THAT THREAD=%s" % (
evtype, fileno, evtype, cb, bucket[fileno]))
# store off the second listener in another structure
self.secondaries[evtype].setdefault(fileno, []).append(listener)
else:
bucket[fileno] = listener
return listener
def _obsolete(self, fileno):
""" We've received an indication that 'fileno' has been obsoleted.
Any current listeners must be defanged, and notifications to
their greenlets queued up to send.
"""
found = False
for evtype, bucket in self.secondaries.items():
if fileno in bucket:
for listener in bucket[fileno]:
found = True
self.closed.append(listener)
listener.defang()
del bucket[fileno]
# For the primary listeners, we actually need to call remove,
# which may modify the underlying OS polling objects.
for evtype, bucket in self.listeners.items():
if fileno in bucket:
listener = bucket[fileno]
found = True
self.closed.append(listener)
self.remove(listener)
listener.defang()
return found
def notify_close(self, fileno):
""" We might want to do something when a fileno is closed.
However, currently it suffices to obsolete listeners only
when we detect an old fileno being recycled, on open.
"""
pass
def remove(self, listener):
if listener.spent:
# trampoline may trigger this in its finally section.
return
fileno = listener.fileno
evtype = listener.evtype
if listener is self.listeners[evtype][fileno]:
del self.listeners[evtype][fileno]
# migrate a secondary listener to be the primary listener
if fileno in self.secondaries[evtype]:
sec = self.secondaries[evtype][fileno]
if sec:
self.listeners[evtype][fileno] = sec.pop(0)
if not sec:
del self.secondaries[evtype][fileno]
else:
self.secondaries[evtype][fileno].remove(listener)
if not self.secondaries[evtype][fileno]:
del self.secondaries[evtype][fileno]
def mark_as_reopened(self, fileno):
""" If a file descriptor is returned by the OS as the result of some
open call (or equivalent), that signals that it might be being
recycled.
Catch the case where the fd was previously in use.
"""
self._obsolete(fileno)
def remove_descriptor(self, fileno):
""" Completely remove all listeners for this fileno. For internal use
only."""
# gather any listeners we have
listeners = []
listeners.append(self.listeners[READ].get(fileno, noop))
listeners.append(self.listeners[WRITE].get(fileno, noop))
listeners.extend(self.secondaries[READ].get(fileno, ()))
listeners.extend(self.secondaries[WRITE].get(fileno, ()))
for listener in listeners:
try:
# listener.cb may want to remove(listener)
listener.cb(fileno)
except Exception:
self.squelch_generic_exception(sys.exc_info())
# NOW this fileno is now dead to all
self.listeners[READ].pop(fileno, None)
self.listeners[WRITE].pop(fileno, None)
self.secondaries[READ].pop(fileno, None)
self.secondaries[WRITE].pop(fileno, None)
def close_one(self):
""" Triggered from the main run loop. If a listener's underlying FD was
closed somehow, throw an exception back to the trampoline, which should
be able to manage it appropriately.
"""
listener = self.closed.pop()
if not listener.greenlet.dead:
# There's no point signalling a greenlet that's already dead.
listener.tb(eventlet.hubs.IOClosed(errno.ENOTCONN, "Operation on closed file"))
def ensure_greenlet(self):
if self.greenlet.dead:
# create new greenlet sharing same parent as original
new = greenlet.greenlet(self.run, self.greenlet.parent)
# need to assign as parent of old greenlet
# for those greenlets that are currently
# children of the dead hub and may subsequently
# exit without further switching to hub.
self.greenlet.parent = new
self.greenlet = new
def switch(self):
cur = greenlet.getcurrent()
assert cur is not self.greenlet, 'Cannot switch to MAINLOOP from MAINLOOP'
switch_out = getattr(cur, 'switch_out', None)
if switch_out is not None:
try:
switch_out()
except:
self.squelch_generic_exception(sys.exc_info())
self.ensure_greenlet()
try:
if self.greenlet.parent is not cur:
cur.parent = self.greenlet
except ValueError:
pass # gets raised if there is a greenlet parent cycle
return self.greenlet.switch()
def squelch_exception(self, fileno, exc_info):
traceback.print_exception(*exc_info)
sys.stderr.write("Removing descriptor: %r\n" % (fileno,))
sys.stderr.flush()
try:
self.remove_descriptor(fileno)
except Exception as e:
sys.stderr.write("Exception while removing descriptor! %r\n" % (e,))
sys.stderr.flush()
def wait(self, seconds=None):
raise NotImplementedError("Implement this in a subclass")
def default_sleep(self):
return 60.0
def sleep_until(self):
t = self.timers
if not t:
return None
return t[0][0]
def run(self, *a, **kw):
"""Run the runloop until abort is called.
"""
# accept and discard variable arguments because they will be
# supplied if other greenlets have run and exited before the
# hub's greenlet gets a chance to run
if self.running:
raise RuntimeError("Already running!")
try:
self.running = True
self.stopping = False
while not self.stopping:
while self.closed:
# We ditch all of these first.
self.close_one()
self.prepare_timers()
if self.debug_blocking:
self.block_detect_pre()
self.fire_timers(self.clock())
if self.debug_blocking:
self.block_detect_post()
self.prepare_timers()
wakeup_when = self.sleep_until()
if wakeup_when is None:
sleep_time = self.default_sleep()
else:
sleep_time = wakeup_when - self.clock()
if sleep_time > 0:
self.wait(sleep_time)
else:
self.wait(0)
else:
self.timers_canceled = 0
del self.timers[:]
del self.next_timers[:]
finally:
self.running = False
self.stopping = False
def abort(self, wait=False):
"""Stop the runloop. If run is executing, it will exit after
completing the next runloop iteration.
Set *wait* to True to cause abort to switch to the hub immediately and
wait until it's finished processing. Waiting for the hub will only
work from the main greenthread; all other greenthreads will become
unreachable.
"""
if self.running:
self.stopping = True
if wait:
assert self.greenlet is not greenlet.getcurrent(
), "Can't abort with wait from inside the hub's greenlet."
# schedule an immediate timer just so the hub doesn't sleep
self.schedule_call_global(0, lambda: None)
# switch to it; when done the hub will switch back to its parent,
# the main greenlet
self.switch()
def squelch_generic_exception(self, exc_info):
if self.debug_exceptions:
traceback.print_exception(*exc_info)
sys.stderr.flush()
def squelch_timer_exception(self, timer, exc_info):
if self.debug_exceptions:
traceback.print_exception(*exc_info)
sys.stderr.flush()
def add_timer(self, timer):
scheduled_time = self.clock() + timer.seconds
self.next_timers.append((scheduled_time, timer))
return scheduled_time
def timer_canceled(self, timer):
self.timers_canceled += 1
len_timers = len(self.timers) + len(self.next_timers)
if len_timers > 1000 and len_timers / 2 <= self.timers_canceled:
self.timers_canceled = 0
self.timers = [t for t in self.timers if not t[1].called]
self.next_timers = [t for t in self.next_timers if not t[1].called]
heapq.heapify(self.timers)
def prepare_timers(self):
heappush = heapq.heappush
t = self.timers
for item in self.next_timers:
if item[1].called:
self.timers_canceled -= 1
else:
heappush(t, item)
del self.next_timers[:]
def schedule_call_local(self, seconds, cb, *args, **kw):
"""Schedule a callable to be called after 'seconds' seconds have
elapsed. Cancel the timer if greenlet has exited.
seconds: The number of seconds to wait.
cb: The callable to call after the given time.
*args: Arguments to pass to the callable when called.
**kw: Keyword arguments to pass to the callable when called.
"""
t = timer.LocalTimer(seconds, cb, *args, **kw)
self.add_timer(t)
return t
def schedule_call_global(self, seconds, cb, *args, **kw):
"""Schedule a callable to be called after 'seconds' seconds have
elapsed. The timer will NOT be canceled if the current greenlet has
exited before the timer fires.
seconds: The number of seconds to wait.
cb: The callable to call after the given time.
*args: Arguments to pass to the callable when called.
**kw: Keyword arguments to pass to the callable when called.
"""
t = timer.Timer(seconds, cb, *args, **kw)
self.add_timer(t)
return t
def fire_timers(self, when):
t = self.timers
heappop = heapq.heappop
while t:
next = t[0]
exp = next[0]
timer = next[1]
if when < exp:
break
heappop(t)
try:
if timer.called:
self.timers_canceled -= 1
else:
timer()
except self.SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_timer_exception(timer, sys.exc_info())
# for debugging:
def get_readers(self):
return self.listeners[READ].values()
def get_writers(self):
return self.listeners[WRITE].values()
def get_timers_count(hub):
return len(hub.timers) + len(hub.next_timers)
def set_debug_listeners(self, value):
if value:
self.lclass = DebugListener
else:
self.lclass = FdListener
def set_timer_exceptions(self, value):
self.debug_exceptions = value
| BaseHub |
python | pytorch__pytorch | torch/_dynamo/variables/distributed.py | {
"start": 3826,
"end": 4924
} | class ____(DistributedVariable):
"""
Tracks torch.distributed.GroupMember and torch.distributed.group, which are
instances of the metaclass _WorldMeta.
"""
@classmethod
def is_group_member_type(cls, value: object) -> bool:
if not cls.is_available():
return False
from torch.distributed.distributed_c10d import _WorldMeta
return type(value) is _WorldMeta
def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker:
if name == "WORLD":
assert self.source
source = AttrSource(base=self.source, member="WORLD")
install_guard(source.make_guard(GuardBuilder.ID_MATCH))
return ProcessGroupVariable(self.value.WORLD)
elif name == "NON_GROUP_MEMBER":
assert self.source
source = AttrSource(base=self.source, member="NON_GROUP_MEMBER")
install_guard(source.make_guard(GuardBuilder.ID_MATCH))
return EnumVariable(self.value.NON_GROUP_MEMBER)
return super().var_getattr(tx, name)
| WorldMetaClassVariable |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | {
"start": 66343,
"end": 71739
} | class ____(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV2Test, self).setUp()
def tearDown(self):
if not self._enabled:
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV2Test, self).tearDown()
def test_while_outside_loop(self):
def _f():
return while_loop.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return _f() + i
self._test_loop_fn(loop_fn, 3)
def test_invariant_while(self):
def loop_fn(_):
return while_loop.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return while_loop.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
def test_while_with_stateful_ops(self):
def loop_fn(_):
j, _ = while_loop.while_loop(
lambda j, x: j < 4, lambda j, x:
(j + 1, x + random_ops.random_uniform([])), [0, 0.])
return j
self._test_loop_fn(loop_fn, 3)
def test_while_with_variable(self):
v = resource_variable_ops.ResourceVariable(5.)
def loop_fn(_):
_, output = while_loop.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + v), [0, 0.])
return output
self._test_loop_fn(loop_fn, 3)
def test_while_unstacked_condition(self):
def loop_fn(i):
return while_loop.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3)
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
return while_loop.while_loop(
lambda j, _: j < lengths_i, lambda j, t:
(j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
self._test_loop_fn(loop_fn, 3)
def test_while_change_input_invariance(self):
# This tests cases where a loop invariant input to while has loop dependent
# operations applied to it inside the while body.
# It also test inputs that are passed through.
def loop_fn(i):
return while_loop.while_loop(
lambda j, *_: j < i, lambda j, x, y, z, w:
(j + 1, x + i, y + x, z, w), [
0,
constant_op.constant(0),
constant_op.constant(1), i,
constant_op.constant(2)
])
self._test_loop_fn(loop_fn, 3)
def test_while_shape_invariants(self):
def loop_fn(i):
return while_loop.while_loop(
lambda j, *_: j < 4,
lambda j, x, y: (j + 1, x + i, y + 1),
[0, constant_op.constant([0, 1]),
constant_op.constant([2, 3])],
shape_invariants=[
None,
tensor_shape.TensorShape([2]),
tensor_shape.TensorShape([2])
])
self._test_loop_fn(loop_fn, 3)
def test_while_jacobian(self):
# Note that we wrap the code below in a tf.function since we don't want the
# while_loop call to be evaluated eagerly using a python loop.
@def_function.function
def _f(x, y, use_pfor):
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = while_loop.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
grad = gradient_ops.gradients(out_i, x)
return array_ops.reshape(grad[0], [-1])
if use_pfor:
return pfor_control_flow_ops.pfor(loop_fn, iters=3)
else:
return pfor_control_flow_ops.for_loop(
loop_fn, iters=3, loop_fn_dtypes=out.dtype)
x = constant_op.constant(np.random.uniform(size=(1, 3)))
y = constant_op.constant(np.random.uniform(size=(3, 3)))
self.assertAllClose(_f(x, y, True), _f(x, y, False))
def test_scan(self):
np.random.seed(seed=42)
data = np.random.randn(3).astype(np.float32)
def log_prob(x):
return math_ops.reduce_sum(functional_ops.scan_v2(
lambda _, yi: (x - yi)**2,
elems=data,
initializer=constant_op.constant(0.)))
x = variables.Variable(array_ops.ones([2]))
self.evaluate(x.initializer)
v_log_prob = lambda x: pfor_control_flow_ops.vectorized_map(log_prob, x)
theoretical, numerical = gradient_checker_v2.compute_gradient(
v_log_prob, (x,), delta=1e-3)
self.assertAllClose(theoretical, numerical, rtol=1e-2)
def test_scan_captured_variable(self):
if not context.executing_eagerly():
self.skipTest("Test only written for 2.x")
v = variables.Variable(math_ops.range(10, dtype=dtypes.float32))
def loop_fn(idx):
del idx
return functional_ops.scan_v2(lambda _, i: array_ops.gather(v, i),
elems=math_ops.range(v.shape[0]),
initializer=0.0)
with backprop.GradientTape() as tape:
result = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([2.] * 10, tape.gradient(result, v))
@test_util.run_all_in_graph_and_eager_modes
| WhileV2Test |
python | walkccc__LeetCode | solutions/2911. Minimum Changes to Make K Semi-palindromes/2911.py | {
"start": 0,
"end": 1476
} | class ____:
def minimumChanges(self, s: str, k: int) -> int:
n = len(s)
# factors[i] := factors of i
factors = self._getFactors(n)
# cost[i][j] := changes to make s[i..j] a semi-palindrome
cost = self._getCost(s, n, factors)
# dp[i][j] := the minimum changes to split s[i:] into j valid parts
dp = [[n] * (k + 1) for _ in range(n + 1)]
dp[n][0] = 0
for i in range(n - 1, -1, -1):
for j in range(1, k + 1):
for l in range(i + 1, n):
dp[i][j] = min(dp[i][j], dp[l + 1][j - 1] + cost[i][l])
return dp[0][k]
def _getFactors(self, n: int) -> list[list[int]]:
factors = [[1] for _ in range(n + 1)]
for d in range(2, n):
for i in range(d * 2, n + 1, d):
factors[i].append(d)
return factors
def _getCost(self, s: str, n: int, factors: list[list[int]]) -> list[list[int]]:
cost = [[0] * n for _ in range(n)]
for i, j in itertools.combinations(range(n), 2):
length = j - i + 1
minCost = length
for d in factors[length]:
minCost = min(minCost, self._getCostD(s, i, j, d))
cost[i][j] = minCost
return cost
def _getCostD(self, s: str, i: int, j: int, d: int) -> int:
"""Returns the cost to make s[i..j] a semi-palindrome of `d`."""
cost = 0
for offset in range(d):
l = i + offset
r = j - d + 1 + offset
while l < r:
if s[l] != s[r]:
cost += 1
l += d
r -= d
return cost
| Solution |
python | allegroai__clearml | clearml/utilities/version.py | {
"start": 550,
"end": 1603
} | class ____(object):
def __init__(self, key: Any) -> None:
self._key = key
def __hash__(self) -> int:
return hash(self._key)
def __lt__(self, other: "_BaseVersion") -> bool:
return self._compare(other, lambda s, o: s < o)
def __le__(self, other: "_BaseVersion") -> bool:
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other: "_BaseVersion") -> bool:
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other: "_BaseVersion") -> bool:
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other: "_BaseVersion") -> bool:
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other: "_BaseVersion") -> bool:
return self._compare(other, lambda s, o: s != o)
def _compare(self, other: "_BaseVersion", method: Callable[[Any, Any], bool]) -> Optional[bool]:
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
| _BaseVersion |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B019.py | {
"start": 2148,
"end": 2270
} | class ____(type):
@functools.lru_cache
def lru_cached_instance_method_on_metaclass(cls, x: int):
...
| Metaclass |
python | getsentry__sentry | src/sentry/buffer/redis.py | {
"start": 7887,
"end": 26018
} | class ____(Buffer):
key_expire = 60 * 60 # 1 hour
pending_key = "b:p"
def __init__(self, incr_batch_size: int = 2, **options: object):
self.is_redis_cluster, self.cluster, options = get_dynamic_cluster_from_options(
"SENTRY_BUFFER_OPTIONS", options
)
self.incr_batch_size = incr_batch_size
assert self.incr_batch_size > 0
def validate(self) -> None:
validate_dynamic_cluster(self.is_redis_cluster, self.cluster)
def _coerce_val(self, value: BufferField) -> bytes:
if isinstance(value, models.Model):
value = value.pk
return force_bytes(value, errors="replace")
def _make_key(self, model: type[models.Model], filters: dict[str, Any]) -> str:
"""
Returns a Redis-compatible key for the model given filters.
"""
md5 = md5_text(
"&".join(f"{k}={_coerce_val(v)!r}" for k, v in sorted(filters.items()))
).hexdigest()
model_key = _get_model_key(model=model)
return f"b:k:{model_key}:{md5}"
def _extract_model_from_key(self, key: str) -> str | None:
"""
Extracts the model metadata from a Redis key.
"""
try:
parts = key.split(":")
if len(parts) != 4 or parts[0] != "b" or parts[1] != "k":
return None
return parts[2]
except Exception:
return None
def _make_lock_key(self, key: str) -> str:
return f"l:{key}"
def _lock_key(
self, client: RedisCluster[T] | rb.RoutingClient, key: str, ex: int
) -> None | str:
lock_key = self._make_lock_key(key)
# prevent a stampede due to scheduled tasks + periodic task
if not client.set(lock_key, "1", nx=True, ex=ex):
return None
return lock_key
@classmethod
def _dump_values(cls, values: dict[Any, Any]) -> dict[Any, tuple[str, str]]:
result = {}
for k, v in values.items():
result[k] = cls._dump_value(v)
return result
@classmethod
def _dump_value(cls, value: str | datetime | date | int | float) -> tuple[str, str]:
if isinstance(value, str):
type_ = "s"
elif isinstance(value, datetime):
type_ = "dt"
value = value.strftime("%s.%f")
elif isinstance(value, date):
type_ = "d"
value = value.strftime("%s.%f")
elif isinstance(value, int):
type_ = "i"
elif isinstance(value, float):
type_ = "f"
else:
raise TypeError(type(value))
return type_, str(value)
@classmethod
def _load_values(
cls, payload: dict[str, tuple[str, Any]]
) -> dict[str, str | datetime | date | int | float]:
result = {}
for k, (t, v) in payload.items():
result[k] = cls._load_value((t, v))
return result
@classmethod
def _load_value(cls, payload: tuple[str, Any]) -> str | datetime | date | int | float:
(type_, value) = payload
if type_ == "s":
return force_str(value)
elif type_ == "dt":
return datetime.fromtimestamp(float(value)).replace(tzinfo=timezone.utc)
elif type_ == "d":
return date.fromtimestamp(float(value))
elif type_ == "i":
return int(value)
elif type_ == "f":
return float(value)
else:
raise TypeError(f"invalid type: {type_}")
def get(
self,
model: type[models.Model],
columns: list[str],
filters: dict[str, Any],
) -> dict[str, int]:
"""
Fetches buffered values for a model/filter. Passed columns must be integer columns.
"""
key = make_key(model, filters)
pipe = self.get_redis_connection(key, transaction=False)
for col in columns:
pipe.hget(key, f"i+{col}")
results = pipe.execute()
return {
col: (int(results[i]) if results[i] is not None else 0) for i, col in enumerate(columns)
}
def get_redis_connection(self, key: str, transaction: bool = True) -> Pipeline:
if is_instance_redis_cluster(self.cluster, self.is_redis_cluster):
conn = self.cluster
elif is_instance_rb_cluster(self.cluster, self.is_redis_cluster):
conn = self.cluster.get_local_client_for_key(key)
else:
raise AssertionError("unreachable")
pipe = conn.pipeline(transaction=transaction)
return pipe
def _execute_redis_operation_no_txn(
self, key: str, operation: RedisOperation, *args: Any, **kwargs: Any
) -> Any:
metrics_str = f"redis_buffer.{operation.value}"
metrics.incr(metrics_str)
pipe = self.get_redis_connection(self.pending_key, transaction=False)
getattr(pipe, operation.value)(key, *args, **kwargs)
if args:
pipe.expire(key, self.key_expire)
return pipe.execute()[0]
def _execute_sharded_redis_operation(
self,
keys: list[str],
operation: RedisOperation,
*args: Any,
**kwargs: Any,
) -> Any:
"""
Execute a Redis operation on a list of keys, using the same args and kwargs for each key.
"""
metrics_str = f"redis_buffer.{operation.value}"
metrics.incr(metrics_str, amount=len(keys))
pipe = self.get_redis_connection(self.pending_key, transaction=False)
for key in keys:
getattr(pipe, operation.value)(key, *args, **kwargs)
if args:
pipe.expire(key, self.key_expire)
return pipe.execute()
def push_to_sorted_set(self, key: str, value: list[int] | int) -> None:
now = time()
if isinstance(value, list):
value_dict = {v: now for v in value}
else:
value_dict = {value: now}
self._execute_redis_operation_no_txn(key, RedisOperation.SORTED_SET_ADD, value_dict)
def get_sorted_set(self, key: str, min: float, max: float) -> list[tuple[int, float]]:
redis_set = self._execute_redis_operation_no_txn(
key,
RedisOperation.SORTED_SET_GET_RANGE,
min=min,
max=max,
withscores=True,
)
decoded_set = []
for items in redis_set:
item = items[0]
if isinstance(item, bytes):
item = item.decode("utf-8")
data_and_timestamp = (int(item), items[1])
decoded_set.append(data_and_timestamp)
return decoded_set
def bulk_get_sorted_set(
self, keys: list[str], min: float, max: float
) -> dict[int, list[float]]:
data_to_timestamps: dict[int, list[float]] = defaultdict(list)
redis_set = self._execute_sharded_redis_operation(
keys,
RedisOperation.SORTED_SET_GET_RANGE,
min=min,
max=max,
withscores=True,
)
for result in redis_set:
for items in result:
item = items[0]
if isinstance(item, bytes):
item = item.decode("utf-8")
data_to_timestamps[int(item)].append(items[1])
return data_to_timestamps
def delete_key(self, key: str, min: float, max: float) -> None:
self._execute_redis_operation_no_txn(
key, RedisOperation.SORTED_SET_DELETE_RANGE, min=min, max=max
)
def delete_keys(self, keys: list[str], min: float, max: float) -> None:
self._execute_sharded_redis_operation(
keys,
RedisOperation.SORTED_SET_DELETE_RANGE,
min=min,
max=max,
)
def delete_hash(
self,
model: type[models.Model],
filters: dict[str, BufferField],
fields: list[str],
) -> None:
key = make_key(model, filters)
pipe = self.get_redis_connection(self.pending_key, transaction=False)
for field in fields:
getattr(pipe, RedisOperation.HASH_DELETE.value)(key, field)
pipe.expire(key, self.key_expire)
pipe.execute()
def push_to_hash(
self,
model: type[models.Model],
filters: dict[str, BufferField],
field: str,
value: str,
) -> None:
key = make_key(model, filters)
self._execute_redis_operation_no_txn(key, RedisOperation.HASH_ADD, field, value)
def push_to_hash_bulk(
self,
model: type[models.Model],
filters: dict[str, BufferField],
data: dict[str, str],
) -> None:
key = make_key(model, filters)
self._execute_redis_operation_no_txn(key, RedisOperation.HASH_ADD_BULK, data)
def get_hash(self, model: type[models.Model], field: dict[str, BufferField]) -> dict[str, str]:
key = make_key(model, field)
redis_hash = self._execute_redis_operation_no_txn(key, RedisOperation.HASH_GET_ALL)
decoded_hash = {}
for k, v in redis_hash.items():
if isinstance(k, bytes):
k = k.decode("utf-8")
if isinstance(v, bytes):
v = v.decode("utf-8")
decoded_hash[k] = v
return decoded_hash
def get_hash_length(self, model: type[models.Model], field: dict[str, BufferField]) -> int:
key = make_key(model, field)
return self._execute_redis_operation_no_txn(key, RedisOperation.HASH_LENGTH)
def incr(
self,
model: type[models.Model],
columns: dict[str, int],
filters: dict[str, BufferField],
extra: dict[str, Any] | None = None,
signal_only: bool | None = None,
) -> None:
"""
Increment the key by doing the following:
- Insert/update a hashmap based on (model, columns)
- Perform an incrby on counters if is_instance_redis_cluster(self.cluster, self.is_redis_cluster):
- Perform a set (last write wins) on extra
- Perform a set on signal_only (only if True)
- Add hashmap key to pending flushes
"""
key = make_key(model, filters)
# We can't use conn.map() due to wanting to support multiple pending
# keys (one per Redis partition)
pipe = self.get_redis_connection(key)
pipe.hsetnx(key, "m", f"{model.__module__}.{model.__name__}")
_validate_json_roundtrip(filters, model)
if is_instance_redis_cluster(self.cluster, self.is_redis_cluster):
pipe.hsetnx(key, "f", json.dumps(self._dump_values(filters)))
else:
pipe.hsetnx(key, "f", pickle.dumps(filters, protocol=5))
for column, amount in columns.items():
pipe.hincrby(key, "i+" + column, amount)
if extra:
# Group tries to serialize 'score', so we'd need some kind of processing
# hook here
# e.g. "update score if last_seen or times_seen is changed"
_validate_json_roundtrip(extra, model)
for column, value in extra.items():
if is_instance_redis_cluster(self.cluster, self.is_redis_cluster):
pipe.hset(key, "e+" + column, json.dumps(self._dump_value(value)))
else:
pipe.hset(key, "e+" + column, pickle.dumps(value, protocol=5))
if signal_only is True:
pipe.hset(key, "s", "1")
pipe.expire(key, self.key_expire)
pipe.zadd(self.pending_key, {key: time()})
pipe.execute()
metrics.incr(
"buffer.incr",
skip_internal=True,
tags={"module": model.__module__, "model": model.__name__},
)
def process_pending(self) -> None:
client = get_cluster_routing_client(self.cluster, self.is_redis_cluster)
lock_key = self._lock_key(client, self.pending_key, ex=60)
if not lock_key:
return
pending_buffers_router = redis_buffer_router.create_pending_buffers_router(
incr_batch_size=self.incr_batch_size
)
try:
keycount = 0
if is_instance_redis_cluster(self.cluster, self.is_redis_cluster):
keys: list[str] = self.cluster.zrange(self.pending_key, 0, -1)
keycount += len(keys)
for key in keys:
model_key = self._extract_model_from_key(key=key)
pending_buffer = pending_buffers_router.get_pending_buffer(model_key=model_key)
pending_buffer.append(item=key)
if pending_buffer.full():
process_incr.apply_async(
kwargs={"batch_keys": pending_buffer.flush()},
headers={"sentry-propagate-traces": False},
)
if keys:
self.cluster.zrem(self.pending_key, *keys)
elif is_instance_rb_cluster(self.cluster, self.is_redis_cluster):
with self.cluster.all() as conn:
results = conn.zrange(self.pending_key, 0, -1)
with self.cluster.all() as conn:
for host_id, keysb in results.value.items():
if not keysb:
continue
keycount += len(keysb)
for keyb in keysb:
key = keyb.decode("utf-8")
model_key = self._extract_model_from_key(key=key)
pending_buffer = pending_buffers_router.get_pending_buffer(
model_key=model_key
)
pending_buffer.append(item=key)
if pending_buffer.full():
process_incr.apply_async(
kwargs={"batch_keys": pending_buffer.flush()},
headers={"sentry-propagate-traces": False},
)
conn.target([host_id]).zrem(self.pending_key, *keysb)
else:
raise AssertionError("unreachable")
# process any non-empty pending buffers
for pending_buffer_value in pending_buffers_router.pending_buffers():
pending_buffer = pending_buffer_value.pending_buffer
model_key = pending_buffer_value.model_key
if not pending_buffer.empty():
process_incr.apply_async(
kwargs={"batch_keys": pending_buffer.flush()},
headers={"sentry-propagate-traces": False},
)
metrics.distribution("buffer.pending-size", keycount)
finally:
client.delete(lock_key)
def process(self, key: str | None = None, batch_keys: list[str] | None = None, **kwargs: Any) -> None: # type: ignore[override]
# NOTE: This method has a totally different signature than the base class
assert not (key is None and batch_keys is None)
assert not (key is not None and batch_keys is not None)
if key is not None:
batch_keys = [key]
if batch_keys is not None:
for key in batch_keys:
self._process_single_incr(key)
def _base_process(
self,
model: type[models.Model],
columns: dict[str, int],
filters: dict[str, Any],
extra: dict[str, Any] | None = None,
signal_only: bool | None = None,
) -> Any:
return super().process(model, columns, filters, extra, signal_only)
def _process_single_incr(self, key: str) -> None:
client = get_cluster_routing_client(self.cluster, self.is_redis_cluster)
lock_key = self._lock_key(client, key, ex=10)
if not lock_key:
metrics.incr("buffer.revoked", tags={"reason": "locked"}, skip_internal=False)
logger.debug("buffer.revoked.locked", extra={"redis_key": key})
return
try:
pipe = self.get_redis_connection(key, transaction=False)
pipe.hgetall(key)
pipe.zrem(self.pending_key, key)
pipe.delete(key)
values = pipe.execute()[0]
# XXX(python3): In python2 this isn't as important since redis will
# return string tyes (be it, byte strings), but in py3 we get bytes
# back, and really we just want to deal with keys as strings.
values = {force_str(k): v for k, v in values.items()}
if not values:
metrics.incr("buffer.revoked", tags={"reason": "empty"}, skip_internal=False)
logger.debug("buffer.revoked.empty", extra={"redis_key": key})
return
model = import_string(force_str(values.pop("m")))
if values["f"].startswith(b"{" if not self.is_redis_cluster else "{"):
filters = self._load_values(json.loads(force_str(values.pop("f"))))
else:
# TODO(dcramer): legacy pickle support - remove in Sentry 9.1
filters = pickle.loads(force_bytes(values.pop("f")))
incr_values = {}
extra_values = {}
signal_only = None
for k, v in values.items():
if k.startswith("i+"):
incr_values[k[2:]] = int(v)
elif k.startswith("e+"):
if v.startswith(b"[" if not self.is_redis_cluster else "["):
extra_values[k[2:]] = self._load_value(json.loads(force_str(v)))
else:
# TODO(dcramer): legacy pickle support - remove in Sentry 9.1
extra_values[k[2:]] = pickle.loads(force_bytes(v))
elif k == "s":
signal_only = bool(int(v)) # Should be 1 if set
self._base_process(model, incr_values, filters, extra_values, signal_only)
finally:
client.delete(lock_key)
| RedisBuffer |
python | modin-project__modin | modin/experimental/core/io/text/custom_text_dispatcher.py | {
"start": 1110,
"end": 4150
} | class ____(TextFileDispatcher):
"""Class handles utils for reading custom text files."""
@classmethod
def _read(cls, filepath_or_buffer, columns, custom_parser, **kwargs):
r"""
Read data from `filepath_or_buffer` according to the passed `read_custom_text` `kwargs` parameters.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
`filepath_or_buffer` parameter of `read_custom_text` function.
columns : list or callable(file-like object, \*\*kwargs -> list
Column names of list type or callable that create column names from opened file
and passed `kwargs`.
custom_parser : callable(file-like object, \*\*kwargs -> pandas.DataFrame
Function that takes as input a part of the `filepath_or_buffer` file loaded into
memory in file-like object form.
**kwargs : dict
Parameters of `read_custom_text` function.
Returns
-------
BaseQueryCompiler
Query compiler with imported data for further processing.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
filepath_or_buffer_md = (
cls.get_path(filepath_or_buffer)
if isinstance(filepath_or_buffer, str)
else cls.get_path_or_buffer(filepath_or_buffer)
)
compression_infered = cls.infer_compression(
filepath_or_buffer, kwargs["compression"]
)
with OpenFile(filepath_or_buffer_md, "rb", compression_infered) as f:
splits, _ = cls.partitioned_file(
f,
num_partitions=NPartitions.get(),
is_quoting=kwargs.pop("is_quoting"),
nrows=kwargs["nrows"],
)
if callable(columns):
with OpenFile(filepath_or_buffer_md, "rb", compression_infered) as f:
columns = columns(f, **kwargs)
if not isinstance(columns, pandas.Index):
columns = pandas.Index(columns)
empty_pd_df = pandas.DataFrame(columns=columns)
index_name = empty_pd_df.index.name
column_widths, num_splits = cls._define_metadata(empty_pd_df, columns)
# kwargs that will be passed to the workers
partition_kwargs = dict(
kwargs,
fname=filepath_or_buffer_md,
num_splits=num_splits,
nrows=None,
compression=compression_infered,
)
partition_ids, index_ids, dtypes_ids = cls._launch_tasks(
splits, callback=custom_parser, **partition_kwargs
)
new_query_compiler = cls._get_new_qc(
partition_ids=partition_ids,
index_ids=index_ids,
dtypes_ids=dtypes_ids,
index_col=None,
index_name=index_name,
column_widths=column_widths,
column_names=columns,
nrows=kwargs["nrows"],
)
return new_query_compiler
| ExperimentalCustomTextDispatcher |
python | Lightning-AI__lightning | src/lightning/pytorch/callbacks/finetuning.py | {
"start": 13988,
"end": 21076
} | class ____(BaseFinetuning):
r"""Finetune a backbone model based on a learning rate user-defined scheduling.
When the backbone learning rate reaches the current model learning rate
and ``should_align`` is set to True, it will align with it for the rest of the training.
Args:
unfreeze_backbone_at_epoch: Epoch at which the backbone will be unfreezed.
lambda_func: Scheduling function for increasing backbone learning rate.
backbone_initial_ratio_lr:
Used to scale down the backbone learning rate compared to rest of model
backbone_initial_lr: Optional, Initial learning rate for the backbone.
By default, we will use ``current_learning / backbone_initial_ratio_lr``
should_align: Whether to align with current learning rate when backbone learning
reaches it.
initial_denom_lr: When unfreezing the backbone, the initial learning rate will
``current_learning_rate / initial_denom_lr``.
train_bn: Whether to make Batch Normalization trainable.
verbose: Display current learning rate for model and backbone
rounding: Precision for displaying learning rate
Example::
>>> import torch
>>> import torch.nn as nn
>>> from lightning.pytorch import LightningModule, Trainer
>>> from lightning.pytorch.callbacks import BackboneFinetuning
>>> import torchvision.models as models
>>>
>>> class TransferLearningModel(LightningModule):
... def __init__(self, num_classes=10):
... super().__init__()
... # REQUIRED: Your model must have a 'backbone' attribute
... self.backbone = models.resnet50(weights=None)
... # Remove the final classification layer from backbone
... self.backbone = nn.Sequential(*list(self.backbone.children())[:-1])
...
... # Add your task-specific head
... self.head = nn.Sequential(
... nn.Flatten(),
... nn.Linear(2048, 512),
... nn.ReLU(),
... nn.Linear(512, num_classes)
... )
...
... def forward(self, x):
... # Extract features with backbone
... features = self.backbone(x)
... # Classify with head
... return self.head(features)
...
... def configure_optimizers(self):
... # Initially only optimize the head - backbone will be added by callback
... return torch.optim.Adam(self.head.parameters(), lr=1e-3)
...
>>> # Setup the callback
>>> multiplicative = lambda epoch: 1.5
>>> backbone_finetuning = BackboneFinetuning(
... unfreeze_backbone_at_epoch=10, # Start unfreezing at epoch 10
... lambda_func=multiplicative, # Gradually increase backbone LR
... backbone_initial_ratio_lr=0.1, # Start backbone at 10% of head LR
... )
>>> model = TransferLearningModel()
>>> trainer = Trainer(callbacks=[backbone_finetuning])
"""
def __init__(
self,
unfreeze_backbone_at_epoch: int = 10,
lambda_func: Callable = multiplicative,
backbone_initial_ratio_lr: float = 10e-2,
backbone_initial_lr: Optional[float] = None,
should_align: bool = True,
initial_denom_lr: float = 10.0,
train_bn: bool = True,
verbose: bool = False,
rounding: int = 12,
) -> None:
super().__init__()
self.unfreeze_backbone_at_epoch: int = unfreeze_backbone_at_epoch
self.lambda_func: Callable = lambda_func
self.backbone_initial_ratio_lr: float = backbone_initial_ratio_lr
self.backbone_initial_lr: Optional[float] = backbone_initial_lr
self.should_align: bool = should_align
self.initial_denom_lr: float = initial_denom_lr
self.train_bn: bool = train_bn
self.verbose: bool = verbose
self.rounding: int = rounding
self.previous_backbone_lr: Optional[float] = None
@override
def state_dict(self) -> dict[str, Any]:
return {
"internal_optimizer_metadata": self._internal_optimizer_metadata,
"previous_backbone_lr": self.previous_backbone_lr,
}
@override
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
self.previous_backbone_lr = state_dict["previous_backbone_lr"]
super().load_state_dict(state_dict)
@override
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""
Raises:
MisconfigurationException:
If LightningModule has no nn.Module `backbone` attribute.
"""
if hasattr(pl_module, "backbone") and isinstance(pl_module.backbone, Module):
return super().on_fit_start(trainer, pl_module)
raise MisconfigurationException("The LightningModule should have a nn.Module `backbone` attribute")
@override
def freeze_before_training(self, pl_module: "pl.LightningModule") -> None:
self.freeze(pl_module.backbone)
@override
def finetune_function(self, pl_module: "pl.LightningModule", epoch: int, optimizer: Optimizer) -> None:
"""Called when the epoch begins."""
if epoch == self.unfreeze_backbone_at_epoch:
current_lr = optimizer.param_groups[0]["lr"]
initial_backbone_lr = (
self.backbone_initial_lr
if self.backbone_initial_lr is not None
else current_lr * self.backbone_initial_ratio_lr
)
self.previous_backbone_lr = initial_backbone_lr
self.unfreeze_and_add_param_group(
pl_module.backbone,
optimizer,
initial_backbone_lr,
train_bn=self.train_bn,
initial_denom_lr=self.initial_denom_lr,
)
if self.verbose:
log.info(
f"Current lr: {round(current_lr, self.rounding)}, "
f"Backbone lr: {round(initial_backbone_lr, self.rounding)}"
)
elif epoch > self.unfreeze_backbone_at_epoch:
current_lr = optimizer.param_groups[0]["lr"]
next_current_backbone_lr = self.lambda_func(epoch + 1) * self.previous_backbone_lr
next_current_backbone_lr = (
current_lr
if (self.should_align and next_current_backbone_lr > current_lr)
else next_current_backbone_lr
)
optimizer.param_groups[-1]["lr"] = next_current_backbone_lr
self.previous_backbone_lr = next_current_backbone_lr
if self.verbose:
log.info(
f"Current lr: {round(current_lr, self.rounding)}, "
f"Backbone lr: {round(next_current_backbone_lr, self.rounding)}"
)
| BackboneFinetuning |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_auth_tokens.py | {
"start": 4062,
"end": 7623
} | class ____(APITestCase):
endpoint = "sentry-api-0-org-auth-tokens"
method = "POST"
def test_simple(self) -> None:
payload = {"name": "test token"}
self.login_as(self.user)
response = self.get_success_response(
self.organization.slug, status_code=status.HTTP_201_CREATED, **payload
)
assert response.content
token = response.data
assert token.get("token") is not None
assert token.get("tokenLastCharacters") is not None
assert token.get("dateCreated") is not None
assert token.get("dateLastUsed") is None
assert token.get("projectLastUsed") is None
assert token.get("scopes") == ["org:ci"]
assert token.get("name") == "test token"
tokenDb = OrgAuthToken.objects.get(id=token.get("id"))
assert tokenDb.name == "test token"
assert tokenDb.token_hashed is not None
assert tokenDb.token_hashed != token.get("token")
assert tokenDb.get_scopes() == token.get("scopes")
assert tokenDb.created_by is not None
assert tokenDb.created_by.id == self.user.id
# Assert that region and control URLs are both set correctly
token_payload = parse_token(token=token.get("token"))
assert token_payload is not None
assert token_payload.get("region_url", None)
assert token_payload.get("region_url") == get_region_by_name(name="us").address
assert token_payload.get("url") == options.get("system.url-prefix")
def test_no_name(self) -> None:
payload: dict[str, str] = {}
self.login_as(self.user)
response = self.get_error_response(
self.organization.slug, status_code=status.HTTP_400_BAD_REQUEST, **payload
)
assert response.content
assert response.data == {"detail": "The name cannot be blank."}
def test_blank_name(self) -> None:
payload = {"name": ""}
self.login_as(self.user)
response = self.get_error_response(
self.organization.slug, status_code=status.HTTP_400_BAD_REQUEST, **payload
)
assert response.content
assert response.data == {"detail": "The name cannot be blank."}
def test_name_too_long(self) -> None:
payload = {"name": "a" * 300}
self.login_as(self.user)
response = self.get_error_response(
self.organization.slug, status_code=status.HTTP_400_BAD_REQUEST, **payload
)
assert response.content
assert response.data == {"detail": "The name cannot be longer than 255 characters."}
def test_no_auth(self) -> None:
response = self.get_error_response(self.organization.slug)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_other_org(self) -> None:
other_org = self.create_organization()
payload = {"name": "test token"}
self.login_as(self.user)
response = self.get_error_response(other_org.slug, **payload)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_deny_token_access(self) -> None:
personal_token = ApiToken.objects.create(user=self.user, scope_list=["org:read"])
payload = {"name": "test token"}
response = self.get_error_response(
self.organization.slug,
**payload,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {personal_token.token}"},
)
assert response.status_code == status.HTTP_403_FORBIDDEN
@control_silo_test
| OrganizationAuthTokenCreateTest |
python | google__pytype | pytype/rewrite/stack.py | {
"start": 210,
"end": 1885
} | class ____:
"""Data stack."""
def __init__(self):
self._stack: list[_Var] = []
def push(self, var: _Var) -> None:
self._stack.append(var)
def pop(self) -> _Var:
return self._stack.pop()
def popn(self, n: int) -> Sequence[_Var]:
if not n:
return ()
if len(self._stack) < n:
self._stack_size_error(f'pop {n} values')
values = self._stack[-n:]
self._stack = self._stack[:-n]
return values
def pop_and_discard(self) -> None:
_ = self._stack.pop()
def rotn(self, n: int) -> None:
"""Rotate the top n values by one."""
if n <= 1:
raise IndexError(f'rotn(n) requires n > 1, got: {n}')
if len(self._stack) < n:
self._stack_size_error(f'rotate {n} values')
top = self._stack[-1]
rot = self._stack[-n:-1]
self._stack = self._stack[:-n] + [top] + rot
def top(self) -> _Var:
return self._stack[-1]
def peek(self, n: int) -> _Var:
if n <= 0:
raise IndexError(f'peek(n) requires positive n, got: {n}')
if n > len(self._stack):
self._stack_size_error(f'peek value {n} places down')
return self._stack[-n]
def replace(self, n: int, var: _Var) -> None:
if n <= 0:
raise IndexError(f'replace(n) requires positive n, got: {n}')
if n > len(self._stack):
self._stack_size_error(f'replace value {n} places down')
self._stack[-n] = var
def _stack_size_error(self, msg):
msg = f'Trying to {msg} in a stack of size {len(self._stack)}'
raise IndexError(msg)
def __bool__(self):
return bool(self._stack)
def __len__(self):
return len(self._stack)
def __repr__(self):
return f'DataStack{self._stack}'
| DataStack |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/pythonic_config/resource.py | {
"start": 40570,
"end": 42778
} | class ____(ResourceRequirement):
class_name: str
attr_name: str
partial_resource: CoercibleToResource
def is_satisfied(self, resource_defs: Mapping[str, "ResourceDefinition"]):
from dagster._config.pythonic_config.resource import coerce_to_resource
return coerce_to_resource(self.partial_resource) in resource_defs.values()
def ensure_satisfied(self, resource_defs: Mapping[str, "ResourceDefinition"]):
if not self.is_satisfied(resource_defs):
raise DagsterInvalidDefinitionError(
f"Failed to resolve resource nested at {self.class_name}.{self.attr_name}. "
"Any partially configured, nested resources must be provided as a top level resource."
)
def _get_class_name(cls: type) -> str:
"""Returns the fully qualified class name of the given class."""
return str(cls)[8:-2]
def get_resource_type_name(resource: ResourceDefinition) -> str:
"""Returns a string that can be used to identify the type of a resource.
For class-based resources, this is the fully qualified class name.
For Pythonic resources, this is the module name and resource function name.
"""
from dagster._config.pythonic_config.io_manager import (
ConfigurableIOManagerFactoryResourceDefinition,
)
if type(resource) in (ResourceDefinition, IOManagerDefinition):
original_resource_fn = (
resource._hardcoded_resource_type # noqa: SLF001
if resource._hardcoded_resource_type # noqa: SLF001
else resource.resource_fn
)
module_name = check.not_none(inspect.getmodule(original_resource_fn)).__name__
resource_type = f"{module_name}.{original_resource_fn.__name__}"
# if it's a Pythonic resource, get the underlying Pythonic class name
elif isinstance(
resource,
(
ConfigurableResourceFactoryResourceDefinition,
ConfigurableIOManagerFactoryResourceDefinition,
),
):
resource_type = _get_class_name(resource.configurable_resource_cls)
else:
resource_type = _get_class_name(type(resource))
return resource_type
| PartialResourceDependencyRequirement |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 236118,
"end": 238652
} | class ____(Operation):
def __init__(self, mode="valid", *, name=None):
super().__init__(name=name)
self.mode = mode
def call(self, x1, x2):
return backend.numpy.correlate(x1, x2, mode=self.mode)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
if len(x1_shape) != 1:
raise ValueError(
"`x1` must be a 1-dimensional tensor, but received"
+ f"shape {x1_shape}"
)
if len(x2_shape) != 1:
raise ValueError(
"`x2` must be a 1-dimensional tensor, but received"
+ f"shape {x2_shape}"
)
x1_len, x2_len = x1_shape[0], x2_shape[0]
output_shape = (
np.maximum(x1_len, x2_len) - np.minimum(x1_len, x2_len) + 1,
)
if self.mode == "same":
output_shape = (np.maximum(x1_len, x2_len),)
elif self.mode == "full":
output_shape = (x1_len + x2_len - 1,)
if self.mode not in ("valid", "same", "full"):
raise ValueError(
"`mode` must be either `valid`, `same`, or `full`, but"
f"received: {self.mode}"
)
output_dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
if output_dtype == "int64":
output_dtype = "float64"
elif output_dtype not in ["bfloat16", "float16", "float64"]:
output_dtype = "float32"
return KerasTensor(output_shape, dtype=output_dtype)
@keras_export(["keras.ops.correlate", "keras.ops.numpy.correlate"])
def correlate(x1, x2, mode="valid"):
"""Compute the cross-correlation of two 1-dimensional tensors.
Args:
x1: First 1-dimensional input tensor of length M.
x2: Second 1-dimensional input tensor of length N.
mode: Either `valid`, `same` or `full`.
By default the mode is set to `valid`, which returns
an output of length max(M, N) - min(M, N) + 1.
`same` returns an output of length max(M, N).
`full` mode returns the convolution at each point of
overlap, with an output length of N+M-1
Returns:
Output tensor, cross-correlation of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Correlate(mode=mode).symbolic_call(x1, x2)
return backend.numpy.correlate(x1, x2, mode=mode)
| Correlate |
python | python-openxml__python-docx | tests/test_comments.py | {
"start": 583,
"end": 6562
} | class ____:
"""Unit-test suite for `docx.comments.Comments` objects."""
@pytest.mark.parametrize(
("cxml", "count"),
[
("w:comments", 0),
("w:comments/w:comment", 1),
("w:comments/(w:comment,w:comment,w:comment)", 3),
],
)
def it_knows_how_many_comments_it_contains(self, cxml: str, count: int, package_: Mock):
comments_elm = cast(CT_Comments, element(cxml))
comments = Comments(
comments_elm,
CommentsPart(
PackURI("/word/comments.xml"),
CT.WML_COMMENTS,
comments_elm,
package_,
),
)
assert len(comments) == count
def it_is_iterable_over_the_comments_it_contains(self, package_: Mock):
comments_elm = cast(CT_Comments, element("w:comments/(w:comment,w:comment)"))
comments = Comments(
comments_elm,
CommentsPart(
PackURI("/word/comments.xml"),
CT.WML_COMMENTS,
comments_elm,
package_,
),
)
comment_iter = iter(comments)
comment1 = next(comment_iter)
assert type(comment1) is Comment, "expected a `Comment` object"
comment2 = next(comment_iter)
assert type(comment2) is Comment, "expected a `Comment` object"
with pytest.raises(StopIteration):
next(comment_iter)
def it_can_get_a_comment_by_id(self, package_: Mock):
comments_elm = cast(
CT_Comments,
element("w:comments/(w:comment{w:id=1},w:comment{w:id=2},w:comment{w:id=3})"),
)
comments = Comments(
comments_elm,
CommentsPart(
PackURI("/word/comments.xml"),
CT.WML_COMMENTS,
comments_elm,
package_,
),
)
comment = comments.get(2)
assert type(comment) is Comment, "expected a `Comment` object"
assert comment._comment_elm is comments_elm.comment_lst[1]
def but_it_returns_None_when_no_comment_with_that_id_exists(self, package_: Mock):
comments_elm = cast(
CT_Comments,
element("w:comments/(w:comment{w:id=1},w:comment{w:id=2},w:comment{w:id=3})"),
)
comments = Comments(
comments_elm,
CommentsPart(
PackURI("/word/comments.xml"),
CT.WML_COMMENTS,
comments_elm,
package_,
),
)
comment = comments.get(4)
assert comment is None, "expected None when no comment with that id exists"
def it_can_add_a_new_comment(self, package_: Mock):
comments_elm = cast(CT_Comments, element("w:comments"))
comments_part = CommentsPart(
PackURI("/word/comments.xml"),
CT.WML_COMMENTS,
comments_elm,
package_,
)
now_before = dt.datetime.now(dt.timezone.utc).replace(microsecond=0)
comments = Comments(comments_elm, comments_part)
comment = comments.add_comment()
now_after = dt.datetime.now(dt.timezone.utc).replace(microsecond=0)
# -- a comment is unconditionally added, and returned for any further adjustment --
assert isinstance(comment, Comment)
# -- it is "linked" to the comments part so it can add images and hyperlinks, etc. --
assert comment.part is comments_part
# -- comment numbering starts at 0, and is incremented for each new comment --
assert comment.comment_id == 0
# -- author is a required attribut, but is the empty string by default --
assert comment.author == ""
# -- initials is an optional attribute, but defaults to the empty string, same as Word --
assert comment.initials == ""
# -- timestamp is also optional, but defaults to now-UTC --
assert comment.timestamp is not None
assert now_before <= comment.timestamp <= now_after
# -- by default, a new comment contains a single empty paragraph --
assert [p.text for p in comment.paragraphs] == [""]
# -- that paragraph has the "CommentText" style, same as Word applies --
comment_elm = comment._comment_elm
assert len(comment_elm.p_lst) == 1
p = comment_elm.p_lst[0]
assert p.style == "CommentText"
# -- and that paragraph contains a single run with the necessary annotation reference --
assert len(p.r_lst) == 1
r = comment_elm.p_lst[0].r_lst[0]
assert r.style == "CommentReference"
assert r[-1].tag == qn("w:annotationRef")
def and_it_can_add_text_to_the_comment_when_adding_it(self, comments: Comments, package_: Mock):
comment = comments.add_comment(text="para 1\n\npara 2")
assert len(comment.paragraphs) == 3
assert [p.text for p in comment.paragraphs] == ["para 1", "", "para 2"]
assert all(p._p.style == "CommentText" for p in comment.paragraphs)
def and_it_sets_the_author_and_their_initials_when_adding_a_comment_when_provided(
self, comments: Comments, package_: Mock
):
comment = comments.add_comment(author="Steve Canny", initials="SJC")
assert comment.author == "Steve Canny"
assert comment.initials == "SJC"
# -- fixtures --------------------------------------------------------------------------------
@pytest.fixture
def comments(self, package_: Mock) -> Comments:
comments_elm = cast(CT_Comments, element("w:comments"))
comments_part = CommentsPart(
PackURI("/word/comments.xml"),
CT.WML_COMMENTS,
comments_elm,
package_,
)
return Comments(comments_elm, comments_part)
@pytest.fixture
def package_(self, request: FixtureRequest):
return instance_mock(request, Package)
| DescribeComments |
python | Pylons__pyramid | src/pyramid/threadlocal.py | {
"start": 2056,
"end": 2466
} | class ____:
def __init__(self, request):
self.request = request
def begin(self):
request = self.request
registry = request.registry
manager.push({'registry': registry, 'request': request})
return request
def end(self):
manager.pop()
def __enter__(self):
return self.begin()
def __exit__(self, *args):
self.end()
| RequestContext |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 19061,
"end": 19539
} | class ____(OrganizationRoleNameMixin, wtforms.Form):
def __init__(self, *args, orgtype, **kwargs):
super().__init__(*args, **kwargs)
if orgtype != OrganizationType.Company:
# Remove "Billing Manager" choice if organization is not a "Company"
self.role_name.choices = [
choice
for choice in self.role_name.choices
if "Billing Manager" not in choice
]
| ChangeOrganizationRoleForm |
python | wandb__wandb | wandb/vendor/pygments/lexers/haskell.py | {
"start": 23443,
"end": 24158
} | class ____(LiterateLexer):
"""
For Literate Cryptol (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
.. versionadded:: 2.0
"""
name = 'Literate Cryptol'
aliases = ['lcry', 'literate-cryptol', 'lcryptol']
filenames = ['*.lcry']
mimetypes = ['text/x-literate-cryptol']
def __init__(self, **options):
crylexer = CryptolLexer(**options)
LiterateLexer.__init__(self, crylexer, **options)
| LiterateCryptolLexer |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 76901,
"end": 76997
} | class ____(blas_ilp64_opt_info):
symbol_prefix = ''
symbol_suffix = '64_'
| blas64__opt_info |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py | {
"start": 4722,
"end": 4918
} | class ____(NamedTuple):
class_name: str
description: str
decision_type: AutoMaterializeDecisionType
@whitelist_for_serdes(serializer=BackcompatNullSerializer)
| AutoMaterializeRuleSnapshot |
python | getsentry__sentry | src/sentry/analytics/events/first_replay_sent.py | {
"start": 74,
"end": 270
} | class ____(analytics.Event):
organization_id: int
project_id: int
platform: str | None = None
user_id: int | None = None
analytics.register(FirstReplaySentEvent)
| FirstReplaySentEvent |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_plugin_deprecation_info.py | {
"start": 191,
"end": 3348
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-plugin-deprecation-info"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.plugin_name = "test-plugin"
self.organization = self.create_organization(owner=self.user)
self.project_with_plugin = self.create_project(
organization=self.organization, name="Project With Plugin"
)
ProjectOption.objects.set_value(
self.project_with_plugin, f"{self.plugin_name}:enabled", True
)
self.project_without_plugin = self.create_project(
organization=self.organization, name="Project Without Plugin"
)
def reverse_url(self, organization_slug=None):
return reverse(
self.endpoint,
kwargs={
"organization_id_or_slug": organization_slug or self.organization.slug,
"plugin_slug": "Test-Plugin",
},
)
def test_project_with_linked_issue(self):
group_with_plugin = self.create_group(project=self.project_with_plugin)
group_without_plugin = self.create_group(project=self.project_without_plugin)
GroupMeta.objects.create(
group=group_with_plugin, key=f"{self.plugin_name}:tid", value="ticket-123"
)
GroupMeta.objects.create(
group=group_without_plugin, key=f"{self.plugin_name}:tid", value="ticket-456"
)
url = self.reverse_url()
response = self.client.get(url)
assert response.status_code == 200
assert response.data["affected_rules"] == []
# Should only return the group from the enabled project
affected_groups = response.data["affected_groups"]
assert len(affected_groups) == 1
assert f"/issues/{group_with_plugin.id}/" in affected_groups[0]
def test_project_with_plugin_rule(self):
rule_action_data = [
{
"id": "sentry.rules.actions.notify_event_service.NotifyEventServiceAction",
"service": self.plugin_name,
"name": f"Send a notification via {self.plugin_name}",
}
]
rule = self.create_project_rule(
project=self.project_with_plugin,
action_data=rule_action_data,
name="Test Plugin Alert Rule",
)
url = self.reverse_url()
response = self.client.get(url)
assert response.status_code == 200
# Should return the rule URL in affected_rules
affected_rules = response.data["affected_rules"]
assert len(affected_rules) == 1
expected_rule_url = f"/organizations/{self.organization.slug}/alerts/rules/{self.project_with_plugin.slug}/{rule.id}/details/"
assert expected_rule_url in affected_rules[0]
def test_permission_denied_for_non_member(self):
non_member_user = self.create_user("non-member@example.com")
self.login_as(non_member_user)
url = self.reverse_url()
response = self.client.get(url)
assert response.status_code == 403
| OrganizationPluginDeprecationInfoEndpointTest |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/plugins/test_utils.py | {
"start": 2578,
"end": 26543
} | class ____(dict):
def __str__(self):
castable = []
for key, val in self.items():
try:
str(key), str(val)
castable.append((key, val))
except (TypeError, NotImplementedError):
continue
return str(dict(castable))
@patch("airflow.providers.openlineage.utils.utils.metadata.distributions")
def test_get_all_packages_installed(mock_distributions):
mock_distributions.return_value = [MagicMock(metadata={"Name": "package1"}, version="1.0.0")]
assert _get_all_packages_installed() == {"package1": "1.0.0"}
@patch("airflow.providers.openlineage.utils.utils.conf.debug_mode", return_value=False)
def test_get_airflow_debug_facet_not_in_debug_mode(mock_debug_mode):
assert get_airflow_debug_facet() == {}
@patch("airflow.providers.openlineage.utils.utils._get_all_packages_installed")
@patch("airflow.providers.openlineage.utils.utils.conf.debug_mode")
def test_get_airflow_debug_facet_logging_set_to_debug(mock_debug_mode, mock_get_packages):
mock_debug_mode.return_value = True
mock_get_packages.return_value = {"package1": "1.0.0"}
result = get_airflow_debug_facet()
expected_result = {"debug": AirflowDebugRunFacet(packages={"package1": "1.0.0"})}
assert result == expected_result
@pytest.mark.db_test
@pytest.mark.need_serialized_dag
def test_get_dagrun_start_end(dag_maker):
start_date = datetime.datetime(2022, 1, 1)
end_date = datetime.datetime(2022, 1, 1, hour=2)
with dag_maker("test", start_date=start_date, end_date=end_date, schedule="@once") as dag:
pass
dag_maker.sync_dagbag_to_db()
run_id = str(uuid.uuid1())
if AIRFLOW_V_3_1_PLUS:
data_interval = get_next_data_interval(dag.timetable, dag_maker.dag_model)
else:
data_interval = dag.get_next_data_interval(dag_maker.dag_model)
if AIRFLOW_V_3_0_PLUS:
dagrun_kwargs = {
"logical_date": data_interval.start,
"run_after": data_interval.end,
"triggered_by": DagRunTriggeredByType.TEST,
}
else:
dagrun_kwargs = {"execution_date": data_interval.start}
dagrun = dag.create_dagrun(
state=State.NONE,
run_id=run_id,
run_type=DagRunType.MANUAL,
data_interval=data_interval,
**dagrun_kwargs,
)
assert dagrun.data_interval_start is not None
start_date_tz = datetime.datetime(2022, 1, 1, tzinfo=timezone.utc)
end_date_tz = datetime.datetime(2022, 1, 1, hour=2, tzinfo=timezone.utc)
assert dagrun.data_interval_start, dagrun.data_interval_end == (start_date_tz, end_date_tz)
def test_parse_version():
assert parse_version("2.3.0") >= parse_version("2.3.0.dev0")
assert parse_version("2.3.0.dev0") >= parse_version("2.3.0.dev0")
assert parse_version("2.3.0.beta1") >= parse_version("2.3.0.dev0")
assert parse_version("2.3.1") >= parse_version("2.3.0.dev0")
assert parse_version("2.4.0") >= parse_version("2.3.0.dev0")
assert parse_version("3.0.0") >= parse_version("2.3.0.dev0")
assert parse_version("2.2.0") < parse_version("2.3.0.dev0")
assert parse_version("2.1.3") < parse_version("2.3.0.dev0")
assert parse_version("2.2.4") < parse_version("2.3.0.dev0")
assert parse_version("1.10.15") < parse_version("2.3.0.dev0")
assert parse_version("2.2.4.dev0") < parse_version("2.3.0.dev0")
def test_safe_dict():
assert str(SafeStrDict({"a": 1})) == str({"a": 1})
class NotImplemented:
def __str__(self):
raise NotImplementedError
assert str(SafeStrDict({"a": NotImplemented()})) == str({})
def test_info_json_encodable():
class TestInfo(InfoJsonEncodable):
excludes = ["exclude_1", "exclude_2", "imastring"]
casts = {"iwanttobeint": lambda x: int(x.imastring)}
renames = {"_faulty_name": "goody_name"}
@define
class Test:
exclude_1: str
imastring: str
_faulty_name: str
donotcare: str
obj = Test("val", "123", "not_funny", "abc")
assert json.loads(json.dumps(TestInfo(obj))) == {
"iwanttobeint": 123,
"goody_name": "not_funny",
"donotcare": "abc",
}
def test_info_json_encodable_without_slots():
class TestInfo(InfoJsonEncodable):
excludes = ["exclude_1", "exclude_2", "imastring"]
casts = {"iwanttobeint": lambda x: int(x.imastring)}
renames = {"_faulty_name": "goody_name"}
@define(slots=False)
class Test:
exclude_1: str
imastring: str
_faulty_name: str
donotcare: str
obj = Test("val", "123", "not_funny", "abc")
assert json.loads(json.dumps(TestInfo(obj))) == {
"iwanttobeint": 123,
"goody_name": "not_funny",
"donotcare": "abc",
}
def test_info_json_encodable_list_does_flatten():
class TestInfo(InfoJsonEncodable):
includes = ["alist"]
@define
class Test:
alist: list[str]
obj = Test(["a", "b", "c"])
assert json.loads(json.dumps(TestInfo(obj))) == {"alist": "['a', 'b', 'c']"}
def test_info_json_encodable_list_does_include_nonexisting():
class TestInfo(InfoJsonEncodable):
includes = ["exists", "doesnotexist"]
@define
class Test:
exists: str
obj = Test("something")
assert json.loads(json.dumps(TestInfo(obj))) == {"exists": "something"}
def test_is_name_redactable():
class NotMixin:
def __init__(self):
self.password = "passwd"
class Mixined(RedactMixin):
_skip_redact = ["password"]
def __init__(self):
self.password = "passwd"
self.transparent = "123"
assert _is_name_redactable("password", NotMixin())
assert not _is_name_redactable("password", Mixined())
assert _is_name_redactable("transparent", Mixined())
@pytest.mark.enable_redact
def test_redact_with_exclusions(monkeypatch):
sm = SecretsMasker()
if AIRFLOW_V_3_1_PLUS:
sm.sensitive_variables_fields = list(DEFAULT_SENSITIVE_FIELDS)
redactor = OpenLineageRedactor.from_masker(sm)
class NotMixin:
def __init__(self):
self.password = "passwd"
class Proxy:
pass
def default(self, o):
if isinstance(o, NotMixin):
return o.__dict__
raise TypeError
assert redactor.redact(NotMixin()).password == "passwd"
monkeypatch.setattr(JSONEncoder, "default", default)
assert redactor.redact(NotMixin()).password == "***"
assert redactor.redact(Proxy()) == "<<non-redactable: Proxy>>"
assert redactor.redact({"a": "a", "b": Proxy()}) == {"a": "a", "b": "<<non-redactable: Proxy>>"}
class Mixined(RedactMixin):
_skip_redact = ["password"]
def __init__(self):
self.password = "passwd"
self.transparent = "123"
@define
class NestedMixined(RedactMixin):
_skip_redact = ["nested_field"]
password: str
nested_field: Any
assert redactor.redact(Mixined()).password == "passwd"
assert redactor.redact(Mixined()).transparent == "123"
assert redactor.redact({"password": "passwd"}) == {"password": "***"}
redacted_nested = redactor.redact(NestedMixined("passwd", NestedMixined("passwd", None)))
assert redacted_nested == NestedMixined("***", NestedMixined("passwd", None))
def test_get_fully_qualified_class_name():
from airflow.providers.openlineage.plugins.adapter import OpenLineageAdapter
result = get_fully_qualified_class_name(BashOperator(task_id="test", bash_command="exit 0;"))
assert result == "airflow.providers.standard.operators.bash.BashOperator"
result = get_fully_qualified_class_name(OpenLineageAdapter())
assert result == "airflow.providers.openlineage.plugins.adapter.OpenLineageAdapter"
@patch("airflow.providers.openlineage.conf.disabled_operators")
def test_is_operator_disabled(mock_disabled_operators):
mock_disabled_operators.return_value = {}
op = BashOperator(task_id="test", bash_command="exit 0;")
assert is_operator_disabled(op) is False
mock_disabled_operators.return_value = {"random_string"}
assert is_operator_disabled(op) is False
mock_disabled_operators.return_value = {
"airflow.providers.standard.operators.bash.BashOperator",
"airflow.providers.standard.operators.python.PythonOperator",
}
assert is_operator_disabled(op) is True
@patch("airflow.providers.openlineage.conf.include_full_task_info")
def test_includes_full_task_info(mock_include_full_task_info):
mock_include_full_task_info.return_value = True
# There should be no 'bash_command' in excludes and it's not in includes - so
# it's a good choice for checking TaskInfo vs TaskInfoComplete
assert (
"bash_command"
in get_airflow_run_facet(
MagicMock(),
MagicMock(),
MagicMock(),
BashOperator(task_id="bash_op", bash_command="sleep 1"),
MagicMock(),
)["airflow"].task
)
@patch("airflow.providers.openlineage.conf.include_full_task_info")
def test_does_not_include_full_task_info(mock_include_full_task_info):
mock_include_full_task_info.return_value = False
# There should be no 'bash_command' in excludes and it's not in includes - so
# it's a good choice for checking TaskInfo vs TaskInfoComplete
assert (
"bash_command"
not in get_airflow_run_facet(
MagicMock(),
MagicMock(),
MagicMock(),
BashOperator(task_id="bash_op", bash_command="sleep 1"),
MagicMock(),
)["airflow"].task
)
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="This test checks serialization only in 3.0 conditions")
def test_serialize_timetable_complex_with_alias():
from airflow.providers.common.compat.assets import AssetAlias, AssetAll, AssetAny
from airflow.timetables.simple import AssetTriggeredTimetable
asset = AssetAny(
Asset(name="2", uri="test://2", group="test-group"),
AssetAlias(name="example-alias", group="test-group"),
Asset(name="3", uri="test://3", group="test-group"),
AssetAll(AssetAlias("another"), Asset("4")),
)
dag = MagicMock()
dag.timetable = AssetTriggeredTimetable(asset)
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"asset_condition": {
"__type": DagAttributeTypes.ASSET_ANY,
"objects": [
{
"__type": DagAttributeTypes.ASSET,
"extra": {},
"uri": "test://2/",
"name": "2",
"group": "test-group",
},
{
"__type": DagAttributeTypes.ASSET_ALIAS,
"name": "example-alias",
"group": "test-group",
},
{
"__type": DagAttributeTypes.ASSET,
"extra": {},
"uri": "test://3/",
"name": "3",
"group": "test-group",
},
{
"__type": DagAttributeTypes.ASSET_ALL,
"objects": [
{
"__type": DagAttributeTypes.ASSET_ALIAS,
"name": "another",
"group": "asset",
},
{
"__type": DagAttributeTypes.ASSET,
"extra": {},
"uri": "4",
"name": "4",
"group": "asset",
},
],
},
],
}
}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="This test checks serialization only in 3.0 conditions")
def test_serialize_timetable_single_asset():
dag = DAG(dag_id="test", start_date=datetime.datetime(2025, 1, 1), schedule=Asset("a"))
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"asset_condition": {
"__type": DagAttributeTypes.ASSET,
"uri": "a",
"name": "a",
"group": "asset",
"extra": {},
}
}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="This test checks serialization only in 3.0 conditions")
def test_serialize_timetable_list_of_assets():
dag = DAG(dag_id="test", start_date=datetime.datetime(2025, 1, 1), schedule=[Asset("a"), Asset("b")])
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"asset_condition": {
"__type": DagAttributeTypes.ASSET_ALL,
"objects": [
{"__type": DagAttributeTypes.ASSET, "uri": "a", "name": "a", "group": "asset", "extra": {}},
{"__type": DagAttributeTypes.ASSET, "uri": "b", "name": "b", "group": "asset", "extra": {}},
],
}
}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="This test checks serialization only in 3.0 conditions")
def test_serialize_timetable_with_complex_logical_condition():
dag = DAG(
dag_id="test",
start_date=datetime.datetime(2025, 1, 1),
schedule=(Asset("ds1", extra={"some_extra": 1}) | Asset("ds2"))
& (Asset("ds3") | Asset("ds4", extra={"another_extra": 345})),
)
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"asset_condition": {
"__type": DagAttributeTypes.ASSET_ALL,
"objects": [
{
"__type": DagAttributeTypes.ASSET_ANY,
"objects": [
{
"__type": DagAttributeTypes.ASSET,
"uri": "ds1",
"extra": {"some_extra": 1},
"name": "ds1",
"group": "asset",
},
{
"__type": DagAttributeTypes.ASSET,
"uri": "ds2",
"extra": {},
"name": "ds2",
"group": "asset",
},
],
},
{
"__type": DagAttributeTypes.ASSET_ANY,
"objects": [
{
"__type": DagAttributeTypes.ASSET,
"uri": "ds3",
"extra": {},
"name": "ds3",
"group": "asset",
},
{
"__type": DagAttributeTypes.ASSET,
"uri": "ds4",
"extra": {"another_extra": 345},
"name": "ds4",
"group": "asset",
},
],
},
],
}
}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="This test checks serialization only in 3.0 conditions")
def test_serialize_timetable_with_dataset_or_time_schedule():
from airflow.timetables.assets import AssetOrTimeSchedule
from airflow.timetables.trigger import CronTriggerTimetable
dag = DAG(
dag_id="test",
start_date=datetime.datetime(2025, 1, 1),
schedule=AssetOrTimeSchedule(
timetable=CronTriggerTimetable("0 0 * 3 *", timezone="UTC"),
assets=(Asset("ds1", extra={"some_extra": 1}) | Asset("ds2"))
& (Asset("ds3") | Asset("ds4", extra={"another_extra": 345})),
),
)
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"timetable": {
Encoding.TYPE: "airflow.timetables.trigger.CronTriggerTimetable",
Encoding.VAR: {
"expression": "0 0 * 3 *",
"timezone": "UTC",
"interval": 0.0,
"run_immediately": False,
},
},
"asset_condition": {
"__type": DagAttributeTypes.ASSET_ALL,
"objects": [
{
"__type": DagAttributeTypes.ASSET_ANY,
"objects": [
{
"__type": DagAttributeTypes.ASSET,
"uri": "ds1",
"extra": {"some_extra": 1},
"name": "ds1",
"group": "asset",
},
{
"__type": DagAttributeTypes.ASSET,
"uri": "ds2",
"extra": {},
"name": "ds2",
"group": "asset",
},
],
},
{
"__type": DagAttributeTypes.ASSET_ANY,
"objects": [
{
"__type": DagAttributeTypes.ASSET,
"uri": "ds3",
"extra": {},
"name": "ds3",
"group": "asset",
},
{
"__type": DagAttributeTypes.ASSET,
"uri": "ds4",
"extra": {"another_extra": 345},
"name": "ds4",
"group": "asset",
},
],
},
],
},
}
@pytest.mark.skipif(
AIRFLOW_V_3_0_PLUS,
reason="This test checks serialization only in 2.10 conditions",
)
def test_serialize_timetable_2_10_complex_with_alias():
from airflow.providers.common.compat.assets import AssetAlias, AssetAll, AssetAny
from airflow.timetables.simple import DatasetTriggeredTimetable
asset = AssetAny(
Asset("2"),
AssetAlias("example-alias"),
Asset("3"),
AssetAll(AssetAlias("this-should-not-be-seen"), Asset("4")),
)
dag = MagicMock()
dag.timetable = DatasetTriggeredTimetable(asset)
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"dataset_condition": {
"__type": DagAttributeTypes.DATASET_ANY,
"objects": [
{"__type": DagAttributeTypes.DATASET, "extra": None, "uri": "2"},
{"__type": DagAttributeTypes.DATASET_ANY, "objects": []},
{"__type": DagAttributeTypes.DATASET, "extra": None, "uri": "3"},
{
"__type": DagAttributeTypes.DATASET_ALL,
"objects": [
{"__type": DagAttributeTypes.DATASET_ANY, "objects": []},
{"__type": DagAttributeTypes.DATASET, "extra": None, "uri": "4"},
],
},
],
}
}
@pytest.mark.skipif(
AIRFLOW_V_3_0_PLUS,
reason="This test checks serialization only in 2.10 conditions",
)
def test_serialize_timetable_2_10_single_asset():
dag = DAG(dag_id="test", start_date=datetime.datetime(2025, 1, 1), schedule=Asset("a"))
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"dataset_condition": {"__type": DagAttributeTypes.DATASET, "uri": "a", "extra": None}
}
@pytest.mark.skipif(
AIRFLOW_V_3_0_PLUS,
reason="This test checks serialization only in 2.10 conditions",
)
def test_serialize_timetable_2_10_list_of_assets():
dag = DAG(dag_id="test", start_date=datetime.datetime(2025, 1, 1), schedule=[Asset("a"), Asset("b")])
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"dataset_condition": {
"__type": DagAttributeTypes.DATASET_ALL,
"objects": [
{"__type": DagAttributeTypes.DATASET, "extra": None, "uri": "a"},
{"__type": DagAttributeTypes.DATASET, "extra": None, "uri": "b"},
],
}
}
@pytest.mark.skipif(
AIRFLOW_V_3_0_PLUS,
reason="This test checks serialization only in 2.10 conditions",
)
def test_serialize_timetable_2_10_with_complex_logical_condition():
dag = DAG(
dag_id="test",
start_date=datetime.datetime(2025, 1, 1),
schedule=(Asset("ds1", extra={"some_extra": 1}) | Asset("ds2"))
& (Asset("ds3") | Asset("ds4", extra={"another_extra": 345})),
)
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"dataset_condition": {
"__type": DagAttributeTypes.DATASET_ALL,
"objects": [
{
"__type": DagAttributeTypes.DATASET_ANY,
"objects": [
{"__type": DagAttributeTypes.DATASET, "uri": "ds1", "extra": {"some_extra": 1}},
{"__type": DagAttributeTypes.DATASET, "uri": "ds2", "extra": None},
],
},
{
"__type": DagAttributeTypes.DATASET_ANY,
"objects": [
{"__type": DagAttributeTypes.DATASET, "uri": "ds3", "extra": None},
{"__type": DagAttributeTypes.DATASET, "uri": "ds4", "extra": {"another_extra": 345}},
],
},
],
}
}
@pytest.mark.skipif(
AIRFLOW_V_3_0_PLUS,
reason="This test checks serialization only in 2.10 conditions",
)
def test_serialize_timetable_2_10_with_dataset_or_time_schedule():
from airflow.timetables.datasets import DatasetOrTimeSchedule
from airflow.timetables.trigger import CronTriggerTimetable
dag = DAG(
dag_id="test",
start_date=datetime.datetime(2025, 1, 1),
schedule=DatasetOrTimeSchedule(
timetable=CronTriggerTimetable("0 0 * 3 *", timezone="UTC"),
datasets=(Asset("ds1", extra={"some_extra": 1}) | Asset("ds2"))
& (Asset("ds3") | Asset("ds4", extra={"another_extra": 345})),
),
)
dag_info = DagInfo(dag)
assert dag_info.timetable == {
"timetable": {
"__type": "airflow.timetables.trigger.CronTriggerTimetable",
"__var": {"expression": "0 0 * 3 *", "timezone": "UTC", "interval": 0.0},
},
"dataset_condition": {
"__type": DagAttributeTypes.DATASET_ALL,
"objects": [
{
"__type": DagAttributeTypes.DATASET_ANY,
"objects": [
{"__type": DagAttributeTypes.DATASET, "uri": "ds1", "extra": {"some_extra": 1}},
{"__type": DagAttributeTypes.DATASET, "uri": "ds2", "extra": None},
],
},
{
"__type": DagAttributeTypes.DATASET_ANY,
"objects": [
{"__type": DagAttributeTypes.DATASET, "uri": "ds3", "extra": None},
{"__type": DagAttributeTypes.DATASET, "uri": "ds4", "extra": {"another_extra": 345}},
],
},
],
},
}
@pytest.mark.parametrize(
("airflow_version", "ol_version"),
[
("2.9.3", "1.12.2"),
("2.10.1", "1.13.0"),
("3.0.0", "1.14.0"),
],
)
def test_get_processing_engine_facet(airflow_version, ol_version):
with patch("airflow.providers.openlineage.utils.utils.AIRFLOW_VERSION", airflow_version):
with patch("airflow.providers.openlineage.utils.utils.OPENLINEAGE_PROVIDER_VERSION", ol_version):
result = get_processing_engine_facet()
assert result["processing_engine"].version == airflow_version
assert result["processing_engine"].openlineageAdapterVersion == ol_version
| SafeStrDict |
python | Netflix__metaflow | test/core/tests/foreach_in_switch.py | {
"start": 82,
"end": 1146
} | class ____(MetaflowTest):
PRIORITY = 2
ONLY_GRAPHS = ["foreach_in_switch"]
@steps(0, ["start-foreach-in-switch"], required=True)
def step_start(self):
self.mode = "process"
@steps(0, ["process-items"], required=True)
def step_process(self):
self.items_to_process = ["item_1", "item_2"]
@steps(0, ["do-work"], required=True)
def step_do_work(self):
self.work_result = f"Processed {self.input}"
@steps(0, ["join-work"], required=True)
def step_join_work(self, inputs):
self.final_result = sorted([inp.work_result for inp in inputs])
@steps(0, ["skip-processing"], required=True)
def step_skip(self):
self.final_result = "Skipped"
@steps(1, ["end-foreach-in-switch"], required=True)
def step_end(self):
assert_equals(self.final_result, ["Processed item_1", "Processed item_2"])
def check_results(self, flow, checker):
checker.assert_artifact(
"end", "final_result", ["Processed item_1", "Processed item_2"]
)
| ForeachInSwitchTest |
python | realpython__materials | python-type-checking/game_003.py | {
"start": 1139,
"end": 2273
} | class ____:
def __init__(self, *names):
"""Set up the deck and deal cards to 4 players"""
deck = Deck.create(shuffle=True)
self.names = (list(names) + "P1 P2 P3 P4".split())[:4]
self.hands = {
n: Player(n, h)
for n, h in zip(self.names, deck.deal(4), strict=False)
}
def play(self):
"""Play a card game"""
start_player = random.choice(self.names)
turn_order = self.player_order(start=start_player)
# Play cards from each player's hand until empty
while self.hands[start_player].hand.cards:
for name in turn_order:
self.hands[name].play_card()
print()
def player_order(self, start=None):
"""Rotate player order so that start goes first"""
if start is None:
start = random.choice(self.names)
start_idx = self.names.index(start)
return self.names[start_idx:] + self.names[:start_idx]
if __name__ == "__main__":
# Read player names from command line
player_names = sys.argv[1:]
game = Game(*player_names)
game.play()
| Game |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.