language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | chardet__chardet | chardet/codingstatemachinedict.py | {
"start": 38,
"end": 264
} | class ____(TypedDict, total=False):
class_table: Tuple[int, ...]
class_factor: int
state_table: Tuple[int, ...]
char_len_table: Tuple[int, ...]
name: str
language: str # Optional key
| CodingStateMachineDict |
python | encode__django-rest-framework | rest_framework/metadata.py | {
"start": 918,
"end": 5862
} | class ____(BaseMetadata):
"""
This is the default metadata implementation.
It returns an ad-hoc set of information about the view.
There are not any formalized standards for `OPTIONS` responses
for us to base this on.
"""
label_lookup = ClassLookupDict({
serializers.Field: 'field',
serializers.BooleanField: 'boolean',
serializers.CharField: 'string',
serializers.UUIDField: 'string',
serializers.URLField: 'url',
serializers.EmailField: 'email',
serializers.RegexField: 'regex',
serializers.SlugField: 'slug',
serializers.IntegerField: 'integer',
serializers.FloatField: 'float',
serializers.DecimalField: 'decimal',
serializers.DateField: 'date',
serializers.DateTimeField: 'datetime',
serializers.TimeField: 'time',
serializers.DurationField: 'duration',
serializers.ChoiceField: 'choice',
serializers.MultipleChoiceField: 'multiple choice',
serializers.FileField: 'file upload',
serializers.ImageField: 'image upload',
serializers.ListField: 'list',
serializers.DictField: 'nested object',
serializers.Serializer: 'nested object',
})
def determine_metadata(self, request, view):
metadata = {
"name": view.get_view_name(),
"description": view.get_view_description(),
"renders": [renderer.media_type for renderer in view.renderer_classes],
"parses": [parser.media_type for parser in view.parser_classes],
}
if hasattr(view, 'get_serializer'):
actions = self.determine_actions(request, view)
if actions:
metadata['actions'] = actions
return metadata
def determine_actions(self, request, view):
"""
For generic class based views we return information about
the fields that are accepted for 'PUT' and 'POST' methods.
"""
actions = {}
for method in {'PUT', 'POST'} & set(view.allowed_methods):
view.request = clone_request(request, method)
try:
# Test global permissions
if hasattr(view, 'check_permissions'):
view.check_permissions(view.request)
# Test object permissions
if method == 'PUT' and hasattr(view, 'get_object'):
view.get_object()
except (exceptions.APIException, PermissionDenied, Http404):
pass
else:
# If user has appropriate permissions for the view, include
# appropriate metadata about the fields that should be supplied.
serializer = view.get_serializer()
actions[method] = self.get_serializer_info(serializer)
finally:
view.request = request
return actions
def get_serializer_info(self, serializer):
"""
Given an instance of a serializer, return a dictionary of metadata
about its fields.
"""
if hasattr(serializer, 'child'):
# If this is a `ListSerializer` then we want to examine the
# underlying child serializer instance instead.
serializer = serializer.child
return {
field_name: self.get_field_info(field)
for field_name, field in serializer.fields.items()
if not isinstance(field, serializers.HiddenField)
}
def get_field_info(self, field):
"""
Given an instance of a serializer field, return a dictionary
of metadata about it.
"""
field_info = {
"type": self.label_lookup[field],
"required": getattr(field, "required", False),
}
attrs = [
'read_only', 'label', 'help_text',
'min_length', 'max_length',
'min_value', 'max_value',
'max_digits', 'decimal_places'
]
for attr in attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_str(value, strings_only=True)
if getattr(field, 'child', None):
field_info['child'] = self.get_field_info(field.child)
elif getattr(field, 'fields', None):
field_info['children'] = self.get_serializer_info(field)
if (not field_info.get('read_only') and
not isinstance(field, (serializers.RelatedField, serializers.ManyRelatedField)) and
hasattr(field, 'choices')):
field_info['choices'] = [
{
'value': choice_value,
'display_name': force_str(choice_name, strings_only=True)
}
for choice_value, choice_name in field.choices.items()
]
return field_info
| SimpleMetadata |
python | python__mypy | mypyc/irbuild/ll_builder.py | {
"start": 5513,
"end": 115494
} | class ____:
"""A "low-level" IR builder class.
LowLevelIRBuilder provides core abstractions we use for constructing
IR as well as a number of higher-level ones (accessing attributes,
calling functions and methods, and coercing between types, for
example).
The core principle of the low-level IR builder is that all of its
facilities operate solely on the mypyc IR level and not the mypy AST
level---it has *no knowledge* of mypy types or expressions.
The mypyc.irbuilder.builder.IRBuilder class wraps an instance of this
class and provides additional functionality to transform mypy AST nodes
to IR.
"""
def __init__(self, errors: Errors | None, options: CompilerOptions) -> None:
self.errors = errors
self.options = options
self.args: list[Register] = []
self.blocks: list[BasicBlock] = []
# Stack of except handler entry blocks
self.error_handlers: list[BasicBlock | None] = [None]
# Values that we need to keep alive as long as we have borrowed
# temporaries. Use flush_keep_alives() to mark the end of the live range.
self.keep_alives: list[Value] = []
def set_module(self, module_name: str, module_path: str) -> None:
"""Set the name and path of the current module."""
self.module_name = module_name
self.module_path = module_path
# Basic operations
def add(self, op: Op) -> Value:
"""Add an op."""
assert not self.blocks[-1].terminated, "Can't add to finished block"
self.blocks[-1].ops.append(op)
return op
def goto(self, target: BasicBlock) -> None:
"""Add goto to a basic block."""
if not self.blocks[-1].terminated:
self.add(Goto(target))
def activate_block(self, block: BasicBlock) -> None:
"""Add a basic block and make it the active one (target of adds)."""
if self.blocks:
assert self.blocks[-1].terminated
block.error_handler = self.error_handlers[-1]
self.blocks.append(block)
def goto_and_activate(self, block: BasicBlock) -> None:
"""Add goto a block and make it the active block."""
self.goto(block)
self.activate_block(block)
def keep_alive(self, values: list[Value], *, steal: bool = False) -> None:
self.add(KeepAlive(values, steal=steal))
def load_mem(self, ptr: Value, value_type: RType, *, borrow: bool = False) -> Value:
return self.add(LoadMem(value_type, ptr, borrow=borrow))
def push_error_handler(self, handler: BasicBlock | None) -> None:
self.error_handlers.append(handler)
def pop_error_handler(self) -> BasicBlock | None:
return self.error_handlers.pop()
def self(self) -> Register:
"""Return reference to the 'self' argument.
This only works in a method.
"""
return self.args[0]
def flush_keep_alives(self) -> None:
if self.keep_alives:
self.add(KeepAlive(self.keep_alives.copy()))
self.keep_alives = []
def debug_print(self, toprint: str | Value) -> None:
if isinstance(toprint, str):
toprint = self.load_str(toprint)
self.primitive_op(debug_print_op, [toprint], -1)
# Type conversions
def box(self, src: Value) -> Value:
if src.type.is_unboxed:
if isinstance(src, Integer) and is_tagged(src.type):
return self.add(LoadLiteral(src.value >> 1, rtype=object_rprimitive))
return self.add(Box(src))
else:
return src
def unbox_or_cast(
self,
src: Value,
target_type: RType,
line: int,
*,
can_borrow: bool = False,
unchecked: bool = False,
) -> Value:
if target_type.is_unboxed:
return self.add(Unbox(src, target_type, line))
else:
if can_borrow:
self.keep_alives.append(src)
return self.add(Cast(src, target_type, line, borrow=can_borrow, unchecked=unchecked))
def coerce(
self,
src: Value,
target_type: RType,
line: int,
force: bool = False,
*,
can_borrow: bool = False,
) -> Value:
"""Generate a coercion/cast from one type to other (only if needed).
For example, int -> object boxes the source int; int -> int emits nothing;
object -> int unboxes the object. All conversions preserve object value.
If force is true, always generate an op (even if it is just an assignment) so
that the result will have exactly target_type as the type.
Returns the register with the converted value (may be same as src).
"""
src_type = src.type
if src_type.is_unboxed and not target_type.is_unboxed:
# Unboxed -> boxed
return self.box(src)
if (src_type.is_unboxed and target_type.is_unboxed) and not is_runtime_subtype(
src_type, target_type
):
if (
isinstance(src, Integer)
and is_short_int_rprimitive(src_type)
and is_fixed_width_rtype(target_type)
):
value = src.numeric_value()
if not check_native_int_range(target_type, value):
self.error(f'Value {value} is out of range for "{target_type}"', line)
return Integer(src.value >> 1, target_type)
elif is_int_rprimitive(src_type) and is_fixed_width_rtype(target_type):
return self.coerce_int_to_fixed_width(src, target_type, line)
elif is_fixed_width_rtype(src_type) and is_int_rprimitive(target_type):
return self.coerce_fixed_width_to_int(src, line)
elif is_short_int_rprimitive(src_type) and is_fixed_width_rtype(target_type):
return self.coerce_short_int_to_fixed_width(src, target_type, line)
elif (
isinstance(src_type, RPrimitive)
and isinstance(target_type, RPrimitive)
and src_type.is_native_int
and target_type.is_native_int
and src_type.size == target_type.size
and src_type.is_signed == target_type.is_signed
):
# Equivalent types
return src
elif is_bool_or_bit_rprimitive(src_type) and is_tagged(target_type):
shifted = self.int_op(
bool_rprimitive, src, Integer(1, bool_rprimitive), IntOp.LEFT_SHIFT
)
return self.add(Extend(shifted, target_type, signed=False))
elif is_bool_or_bit_rprimitive(src_type) and is_fixed_width_rtype(target_type):
return self.add(Extend(src, target_type, signed=False))
elif isinstance(src, Integer) and is_float_rprimitive(target_type):
if is_tagged(src_type):
return Float(float(src.value // 2))
return Float(float(src.value))
elif is_tagged(src_type) and is_float_rprimitive(target_type):
return self.int_to_float(src, line)
elif (
isinstance(src_type, RTuple)
and isinstance(target_type, RTuple)
and len(src_type.types) == len(target_type.types)
):
# Coerce between two tuple types by coercing each item separately
values = []
for i in range(len(src_type.types)):
v = None
if isinstance(src, TupleSet):
item = src.items[i]
# We can't reuse register values, since they can be modified.
if not isinstance(item, Register):
v = item
if v is None:
v = TupleGet(src, i)
self.add(v)
values.append(v)
return self.add(
TupleSet(
[self.coerce(v, t, line) for v, t in zip(values, target_type.types)], line
)
)
# To go between any other unboxed types, we go through a boxed
# in-between value, for simplicity.
tmp = self.box(src)
return self.unbox_or_cast(tmp, target_type, line)
if (not src_type.is_unboxed and target_type.is_unboxed) or not is_subtype(
src_type, target_type
):
return self.unbox_or_cast(src, target_type, line, can_borrow=can_borrow)
elif force:
tmp = Register(target_type)
self.add(Assign(tmp, src))
return tmp
return src
def coerce_int_to_fixed_width(self, src: Value, target_type: RType, line: int) -> Value:
assert is_fixed_width_rtype(target_type), target_type
assert isinstance(target_type, RPrimitive), target_type
res = Register(target_type)
fast, slow, end = BasicBlock(), BasicBlock(), BasicBlock()
check = self.check_tagged_short_int(src, line)
self.add(Branch(check, fast, slow, Branch.BOOL))
self.activate_block(fast)
size = target_type.size
if size < int_rprimitive.size:
# Add a range check when the target type is smaller than the source type
fast2, fast3 = BasicBlock(), BasicBlock()
upper_bound = 1 << (size * 8 - 1)
if not target_type.is_signed:
upper_bound *= 2
check2 = self.add(ComparisonOp(src, Integer(upper_bound, src.type), ComparisonOp.SLT))
self.add(Branch(check2, fast2, slow, Branch.BOOL))
self.activate_block(fast2)
if target_type.is_signed:
lower_bound = -upper_bound
else:
lower_bound = 0
check3 = self.add(ComparisonOp(src, Integer(lower_bound, src.type), ComparisonOp.SGE))
self.add(Branch(check3, fast3, slow, Branch.BOOL))
self.activate_block(fast3)
tmp = self.int_op(
c_pyssize_t_rprimitive,
src,
Integer(1, c_pyssize_t_rprimitive),
IntOp.RIGHT_SHIFT,
line,
)
tmp = self.add(Truncate(tmp, target_type))
else:
if size > int_rprimitive.size:
tmp = self.add(Extend(src, target_type, signed=True))
else:
tmp = src
tmp = self.int_op(target_type, tmp, Integer(1, target_type), IntOp.RIGHT_SHIFT, line)
self.add(Assign(res, tmp))
self.goto(end)
self.activate_block(slow)
if is_int64_rprimitive(target_type) or (
is_int32_rprimitive(target_type) and size == int_rprimitive.size
):
# Slow path calls a library function that handles more complex logic
ptr = self.int_op(
pointer_rprimitive, src, Integer(1, pointer_rprimitive), IntOp.XOR, line
)
ptr2 = Register(c_pointer_rprimitive)
self.add(Assign(ptr2, ptr))
if is_int64_rprimitive(target_type):
conv_op = int_to_int64_op
else:
conv_op = int_to_int32_op
tmp = self.call_c(conv_op, [ptr2], line)
self.add(Assign(res, tmp))
self.add(KeepAlive([src]))
self.goto(end)
elif is_int32_rprimitive(target_type):
# Slow path just always generates an OverflowError
self.call_c(int32_overflow, [], line)
self.add(Unreachable())
elif is_int16_rprimitive(target_type):
# Slow path just always generates an OverflowError
self.call_c(int16_overflow, [], line)
self.add(Unreachable())
elif is_uint8_rprimitive(target_type):
# Slow path just always generates an OverflowError
self.call_c(uint8_overflow, [], line)
self.add(Unreachable())
else:
assert False, target_type
self.activate_block(end)
return res
def coerce_short_int_to_fixed_width(self, src: Value, target_type: RType, line: int) -> Value:
if is_int64_rprimitive(target_type) or (
PLATFORM_SIZE == 4 and is_int32_rprimitive(target_type)
):
return self.int_op(target_type, src, Integer(1, target_type), IntOp.RIGHT_SHIFT, line)
# TODO: i32 on 64-bit platform
assert False, (src.type, target_type, PLATFORM_SIZE)
def coerce_fixed_width_to_int(self, src: Value, line: int) -> Value:
if (
(is_int32_rprimitive(src.type) and PLATFORM_SIZE == 8)
or is_int16_rprimitive(src.type)
or is_uint8_rprimitive(src.type)
):
# Simple case -- just sign extend and shift.
extended = self.add(Extend(src, c_pyssize_t_rprimitive, signed=src.type.is_signed))
return self.int_op(
int_rprimitive,
extended,
Integer(1, c_pyssize_t_rprimitive),
IntOp.LEFT_SHIFT,
line,
)
src_type = src.type
assert is_fixed_width_rtype(src_type), src_type
assert isinstance(src_type, RPrimitive), src_type
res = Register(int_rprimitive)
fast, fast2, slow, end = BasicBlock(), BasicBlock(), BasicBlock(), BasicBlock()
c1 = self.add(ComparisonOp(src, Integer(MAX_SHORT_INT, src_type), ComparisonOp.SLE))
self.add(Branch(c1, fast, slow, Branch.BOOL))
self.activate_block(fast)
c2 = self.add(ComparisonOp(src, Integer(MIN_SHORT_INT, src_type), ComparisonOp.SGE))
self.add(Branch(c2, fast2, slow, Branch.BOOL))
self.activate_block(slow)
if is_int64_rprimitive(src_type):
conv_op = int64_to_int_op
elif is_int32_rprimitive(src_type):
assert PLATFORM_SIZE == 4
conv_op = ssize_t_to_int_op
else:
assert False, src_type
x = self.call_c(conv_op, [src], line)
self.add(Assign(res, x))
self.goto(end)
self.activate_block(fast2)
if int_rprimitive.size < src_type.size:
tmp = self.add(Truncate(src, c_pyssize_t_rprimitive))
else:
tmp = src
s = self.int_op(int_rprimitive, tmp, Integer(1, tmp.type), IntOp.LEFT_SHIFT, line)
self.add(Assign(res, s))
self.goto(end)
self.activate_block(end)
return res
def coerce_nullable(self, src: Value, target_type: RType, line: int) -> Value:
"""Generate a coercion from a potentially null value."""
if src.type.is_unboxed == target_type.is_unboxed and (
(target_type.is_unboxed and is_runtime_subtype(src.type, target_type))
or (not target_type.is_unboxed and is_subtype(src.type, target_type))
):
return src
target = Register(target_type)
valid, invalid, out = BasicBlock(), BasicBlock(), BasicBlock()
self.add(Branch(src, invalid, valid, Branch.IS_ERROR))
self.activate_block(valid)
coerced = self.coerce(src, target_type, line)
self.add(Assign(target, coerced, line))
self.goto(out)
self.activate_block(invalid)
error = self.add(LoadErrorValue(target_type))
self.add(Assign(target, error, line))
self.goto_and_activate(out)
return target
# Attribute access
def get_attr(
self, obj: Value, attr: str, result_type: RType, line: int, *, borrow: bool = False
) -> Value:
"""Get a native or Python attribute of an object."""
if (
isinstance(obj.type, RInstance)
and obj.type.class_ir.is_ext_class
and obj.type.class_ir.has_attr(attr)
):
op = GetAttr(obj, attr, line, borrow=borrow)
# For non-refcounted attribute types, the borrow might be
# disabled even if requested, so don't check 'borrow'.
if op.is_borrowed:
self.keep_alives.append(obj)
return self.add(op)
elif isinstance(obj.type, RUnion):
return self.union_get_attr(obj, obj.type, attr, result_type, line)
else:
return self.py_get_attr(obj, attr, line)
def union_get_attr(
self, obj: Value, rtype: RUnion, attr: str, result_type: RType, line: int
) -> Value:
"""Get an attribute of an object with a union type."""
def get_item_attr(value: Value) -> Value:
return self.get_attr(value, attr, result_type, line)
return self.decompose_union_helper(obj, rtype, result_type, get_item_attr, line)
def py_get_attr(self, obj: Value, attr: str, line: int) -> Value:
"""Get a Python attribute (slow).
Prefer get_attr() which generates optimized code for native classes.
"""
key = self.load_str(attr)
return self.primitive_op(py_getattr_op, [obj, key], line)
# isinstance() checks
def isinstance_helper(self, obj: Value, class_irs: list[ClassIR], line: int) -> Value:
"""Fast path for isinstance() that checks against a list of native classes."""
if not class_irs:
return self.false()
ret = self.isinstance_native(obj, class_irs[0], line)
for class_ir in class_irs[1:]:
def other() -> Value:
return self.isinstance_native(obj, class_ir, line)
ret = self.shortcircuit_helper("or", bool_rprimitive, lambda: ret, other, line)
return ret
def get_type_of_obj(self, obj: Value, line: int) -> Value:
ob_type_address = self.add(GetElementPtr(obj, PyObject, "ob_type", line))
ob_type = self.load_mem(ob_type_address, object_rprimitive, borrow=True)
self.add(KeepAlive([obj]))
return ob_type
def type_is_op(self, obj: Value, type_obj: Value, line: int) -> Value:
typ = self.get_type_of_obj(obj, line)
return self.add(ComparisonOp(typ, type_obj, ComparisonOp.EQ, line))
def isinstance_native(self, obj: Value, class_ir: ClassIR, line: int) -> Value:
"""Fast isinstance() check for a native class.
If there are three or fewer concrete (non-trait) classes among the class
and all its children, use even faster type comparison checks `type(obj)
is typ`.
"""
concrete = all_concrete_classes(class_ir)
if concrete is None or len(concrete) > FAST_ISINSTANCE_MAX_SUBCLASSES + 1:
return self.primitive_op(
fast_isinstance_op, [obj, self.get_native_type(class_ir)], line
)
if not concrete:
# There can't be any concrete instance that matches this.
return self.false()
type_obj = self.get_native_type(concrete[0])
ret = self.type_is_op(obj, type_obj, line)
for c in concrete[1:]:
def other() -> Value:
return self.type_is_op(obj, self.get_native_type(c), line)
ret = self.shortcircuit_helper("or", bool_rprimitive, lambda: ret, other, line)
return ret
# Calls
def _construct_varargs(
self,
args: Sequence[tuple[Value, ArgKind, str | None]],
line: int,
*,
has_star: bool,
has_star2: bool,
) -> tuple[Value | None, Value | None]:
"""Construct *args and **kwargs from a collection of arguments
This is pretty complicated, and almost all of the complication here stems from
one of two things (but mostly the second):
* The handling of ARG_STAR/ARG_STAR2. We want to create as much of the args/kwargs
values in one go as we can, so we collect values until our hand is forced, and
then we emit creation of the list/tuple, and expand it from there if needed.
* Support potentially nullable argument values. This has very narrow applicability,
as this will never be done by our compiled Python code, but is critically used
by gen_glue_method when generating glue methods to mediate between the function
signature of a parent class and its subclasses.
For named-only arguments, this is quite simple: if it is
null, don't put it in the dict.
For positional-or-named arguments, things are much more complicated.
* First, anything that was passed as a positional arg
must be forwarded along as a positional arg. It *must
not* be converted to a named arg. This is because mypy
does not enforce that positional-or-named arguments
have the same name in subclasses, and it is not
uncommon for code to have different names in
subclasses (a bunch of mypy's visitors do this, for
example!). This is arguably a bug in both mypy and code doing
this, and they ought to be using positional-only arguments, but
positional-only arguments are new and ugly.
* On the flip side, we're willing to accept the
infelicity of sometimes turning an argument that was
passed by keyword into a positional argument. It's wrong,
but it's very marginal, and avoiding it would require passing
a bitmask of which arguments were named with every function call,
or something similar.
(See some discussion of this in testComplicatedArgs)
Thus, our strategy for positional-or-named arguments is to
always pass them as positional, except in the one
situation where we can not, and where we can be absolutely
sure they were passed by name: when an *earlier*
positional argument was missing its value.
This means that if we have a method `f(self, x: int=..., y: object=...)`:
* x and y present: args=(x, y), kwargs={}
* x present, y missing: args=(x,), kwargs={}
* x missing, y present: args=(), kwargs={'y': y}
To implement this, when we have multiple optional
positional arguments, we maintain a flag in a register
that tracks whether an argument has been missing, and for
each such optional argument (except the first), we check
the flag to determine whether to append the argument to
the *args list or add it to the **kwargs dict. What a
mess!
This is what really makes everything here such a tangle;
otherwise the *args and **kwargs code could be separated.
The arguments has_star and has_star2 indicate whether the target function
takes an ARG_STAR and ARG_STAR2 argument, respectively.
(These will always be true when making a pycall, and be based
on the actual target signature for a native call.)
"""
star_result: Value | None = None
star2_result: Value | None = None
# We aggregate values that need to go into *args and **kwargs
# in these lists. Once all arguments are processed (in the
# happiest case), or we encounter an ARG_STAR/ARG_STAR2 or a
# nullable arg, then we create the list and/or dict.
star_values: list[Value] = []
star2_keys: list[Value] = []
star2_values: list[Value] = []
seen_empty_reg: Register | None = None
for value, kind, name in args:
if kind == ARG_STAR:
if star_result is None:
# star args fastpath
if len(args) == 1:
# fn(*args)
if is_list_rprimitive(value.type):
value = self.primitive_op(list_tuple_op, [value], line)
elif not is_tuple_rprimitive(value.type) and not isinstance(
value.type, RTuple
):
value = self.primitive_op(sequence_tuple_op, [value], line)
return value, None
elif len(args) == 2 and args[1][1] == ARG_STAR2:
# fn(*args, **kwargs)
# TODO: extend to cover(*args, **k, **w, **a, **r, **g, **s)
if is_tuple_rprimitive(value.type) or isinstance(value.type, RTuple):
star_result = value
elif is_list_rprimitive(value.type):
star_result = self.primitive_op(list_tuple_op, [value], line)
else:
star_result = self.primitive_op(sequence_tuple_op, [value], line)
star2_arg = args[1]
star2_value = star2_arg[0]
if is_dict_rprimitive(star2_value.type):
star2_fastpath_op = dict_copy_op
else:
star2_fastpath_op = dict_copy
return star_result, self.primitive_op(
star2_fastpath_op, [star2_value], line
)
# elif ...: TODO extend this to optimize fn(*args, k=1, **kwargs) case
# TODO optimize this case using the length utils - currently in review
star_result = self.new_list_op(star_values, line)
self.primitive_op(list_extend_op, [star_result, value], line)
elif kind == ARG_STAR2:
if star2_result is None:
if len(args) == 1:
# early exit with fastpath if the only arg is ARG_STAR2
# TODO: can we maintain an empty tuple in memory and just reuse it again and again?
if is_dict_rprimitive(value.type):
star2_fastpath_op = dict_copy_op
else:
star2_fastpath_op = dict_copy
return self.new_tuple([], line), self.primitive_op(
star2_fastpath_op, [value], line
)
star2_result = self._create_dict(star2_keys, star2_values, line)
self.call_c(dict_update_in_display_op, [star2_result, value], line=line)
else:
nullable = kind.is_optional()
maybe_pos = kind.is_positional() and has_star
maybe_named = kind.is_named() or (kind.is_optional() and name and has_star2)
# If the argument is nullable, we need to create the
# relevant args/kwargs objects so that we can
# conditionally modify them.
if nullable:
if maybe_pos and star_result is None:
star_result = self.new_list_op(star_values, line)
if maybe_named and star2_result is None:
star2_result = self._create_dict(star2_keys, star2_values, line)
# Easy cases: just collect the argument.
if maybe_pos and star_result is None:
star_values.append(value)
continue
if maybe_named and star2_result is None:
assert name is not None
key = self.load_str(name)
star2_keys.append(key)
star2_values.append(value)
continue
# OK, anything that is nullable or *after* a nullable arg needs to be here
# TODO: We could try harder to avoid creating basic blocks in the common case
new_seen_empty_reg = seen_empty_reg
out = BasicBlock()
if nullable:
# If this is the first nullable positional arg we've seen, create
# a register to track whether anything has been null.
# (We won't *check* the register until the next argument, though.)
if maybe_pos and not seen_empty_reg:
new_seen_empty_reg = Register(bool_rprimitive)
self.add(Assign(new_seen_empty_reg, self.false(), line))
skip = BasicBlock() if maybe_pos else out
keep = BasicBlock()
self.add(Branch(value, skip, keep, Branch.IS_ERROR))
self.activate_block(keep)
# If this could be positional or named and we /might/ have seen a missing
# positional arg, then we need to compile *both* a positional and named
# version! What a pain!
if maybe_pos and maybe_named and seen_empty_reg:
pos_block, named_block = BasicBlock(), BasicBlock()
self.add(Branch(seen_empty_reg, named_block, pos_block, Branch.BOOL))
else:
pos_block = named_block = BasicBlock()
self.goto(pos_block)
if maybe_pos:
self.activate_block(pos_block)
assert star_result
self.translate_special_method_call(
star_result, "append", [value], result_type=None, line=line
)
self.goto(out)
if maybe_named and (not maybe_pos or seen_empty_reg):
self.activate_block(named_block)
assert name is not None
key = self.load_str(name)
assert star2_result
self.translate_special_method_call(
star2_result, "__setitem__", [key, value], result_type=None, line=line
)
self.goto(out)
if nullable and maybe_pos and new_seen_empty_reg:
assert skip is not out
self.activate_block(skip)
self.add(Assign(new_seen_empty_reg, self.true(), line))
self.goto(out)
self.activate_block(out)
seen_empty_reg = new_seen_empty_reg
assert not (star_result or star_values) or has_star
assert not (star2_result or star2_values) or has_star2
if has_star:
# If we managed to make it this far without creating a
# *args list, then we can directly create a
# tuple. Otherwise create the tuple from the list.
if star_result is None:
star_result = self.new_tuple(star_values, line)
elif not is_tuple_rprimitive(star_result.type):
# if star_result is a tuple we took the fast path
star_result = self.primitive_op(list_tuple_op, [star_result], line)
if has_star2 and star2_result is None and len(star2_keys) > 0:
# TODO: use dict_copy_op for simple cases of **kwargs
star2_result = self._create_dict(star2_keys, star2_values, line)
return star_result, star2_result
def py_call(
self,
function: Value,
arg_values: list[Value],
line: int,
arg_kinds: list[ArgKind] | None = None,
arg_names: Sequence[str | None] | None = None,
) -> Value:
"""Call a Python function (non-native and slow).
Use py_call_op or py_call_with_kwargs_op for Python function call.
"""
result = self._py_vector_call(function, arg_values, line, arg_kinds, arg_names)
if result is not None:
return result
# If all arguments are positional, we can use py_call_op.
if arg_kinds is None or all(kind == ARG_POS for kind in arg_kinds):
return self.call_c(py_call_op, [function] + arg_values, line)
# Otherwise fallback to py_call_with_posargs_op or py_call_with_kwargs_op.
assert arg_names is not None
pos_args_tuple, kw_args_dict = self._construct_varargs(
list(zip(arg_values, arg_kinds, arg_names)), line, has_star=True, has_star2=True
)
assert pos_args_tuple
if kw_args_dict is None:
return self.call_c(py_call_with_posargs_op, [function, pos_args_tuple], line)
return self.call_c(py_call_with_kwargs_op, [function, pos_args_tuple, kw_args_dict], line)
def _py_vector_call(
self,
function: Value,
arg_values: list[Value],
line: int,
arg_kinds: list[ArgKind] | None = None,
arg_names: Sequence[str | None] | None = None,
) -> Value | None:
"""Call function using the vectorcall API if possible.
Return the return value if successful. Return None if a non-vectorcall
API should be used instead.
"""
# We can do this if all args are positional or named (no *args or **kwargs, not optional).
if arg_kinds is None or all(
not kind.is_star() and not kind.is_optional() for kind in arg_kinds
):
if arg_values:
# Create a C array containing all arguments as boxed values.
coerced_args = [self.coerce(arg, object_rprimitive, line) for arg in arg_values]
arg_ptr = self.setup_rarray(object_rprimitive, coerced_args, object_ptr=True)
else:
arg_ptr = Integer(0, object_pointer_rprimitive)
num_pos = num_positional_args(arg_values, arg_kinds)
keywords = self._vectorcall_keywords(arg_names)
value = self.call_c(
py_vectorcall_op,
[function, arg_ptr, Integer(num_pos, c_size_t_rprimitive), keywords],
line,
)
if arg_values:
# Make sure arguments won't be freed until after the call.
# We need this because RArray doesn't support automatic
# memory management.
self.add(KeepAlive(coerced_args))
return value
return None
def _vectorcall_keywords(self, arg_names: Sequence[str | None] | None) -> Value:
"""Return a reference to a tuple literal with keyword argument names.
Return null pointer if there are no keyword arguments.
"""
if arg_names:
kw_list = [name for name in arg_names if name is not None]
if kw_list:
return self.add(LoadLiteral(tuple(kw_list), object_rprimitive))
return Integer(0, object_rprimitive)
def py_method_call(
self,
obj: Value,
method_name: str,
arg_values: list[Value],
line: int,
arg_kinds: list[ArgKind] | None,
arg_names: Sequence[str | None] | None,
) -> Value:
"""Call a Python method (non-native and slow)."""
result = self._py_vector_method_call(
obj, method_name, arg_values, line, arg_kinds, arg_names
)
if result is not None:
return result
if arg_kinds is None or all(kind == ARG_POS for kind in arg_kinds):
# Use legacy method call API
method_name_reg = self.load_str(method_name)
return self.call_c(py_method_call_op, [obj, method_name_reg] + arg_values, line)
else:
# Use py_call since it supports keyword arguments (and vectorcalls).
method = self.py_get_attr(obj, method_name, line)
return self.py_call(method, arg_values, line, arg_kinds=arg_kinds, arg_names=arg_names)
def _py_vector_method_call(
self,
obj: Value,
method_name: str,
arg_values: list[Value],
line: int,
arg_kinds: list[ArgKind] | None,
arg_names: Sequence[str | None] | None,
) -> Value | None:
"""Call method using the vectorcall API if possible.
Return the return value if successful. Return None if a non-vectorcall
API should be used instead.
"""
if arg_kinds is None or all(
not kind.is_star() and not kind.is_optional() for kind in arg_kinds
):
method_name_reg = self.load_str(method_name)
coerced_args = [
self.coerce(arg, object_rprimitive, line) for arg in [obj] + arg_values
]
arg_ptr = self.setup_rarray(object_rprimitive, coerced_args, object_ptr=True)
num_pos = num_positional_args(arg_values, arg_kinds)
keywords = self._vectorcall_keywords(arg_names)
value = self.call_c(
py_vectorcall_method_op,
[
method_name_reg,
arg_ptr,
Integer((num_pos + 1) | PY_VECTORCALL_ARGUMENTS_OFFSET, c_size_t_rprimitive),
keywords,
],
line,
)
# Make sure arguments won't be freed until after the call.
# We need this because RArray doesn't support automatic
# memory management.
self.add(KeepAlive(coerced_args))
return value
return None
def call(
self,
decl: FuncDecl,
args: Sequence[Value],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None],
line: int,
*,
bitmap_args: list[Register] | None = None,
) -> Value:
"""Call a native function.
If bitmap_args is given, they override the values of (some) of the bitmap
arguments used to track the presence of values for certain arguments. By
default, the values of the bitmap arguments are inferred from args.
"""
# Normalize args to positionals.
args = self.native_args_to_positional(
args, arg_kinds, arg_names, decl.sig, line, bitmap_args=bitmap_args
)
return self.add(Call(decl, args, line))
def native_args_to_positional(
self,
args: Sequence[Value],
arg_kinds: list[ArgKind],
arg_names: Sequence[str | None],
sig: FuncSignature,
line: int,
*,
bitmap_args: list[Register] | None = None,
) -> list[Value]:
"""Prepare arguments for a native call.
Given args/kinds/names and a target signature for a native call, map
keyword arguments to their appropriate place in the argument list,
fill in error values for unspecified default arguments,
package arguments that will go into *args/**kwargs into a tuple/dict,
and coerce arguments to the appropriate type.
"""
sig_args = sig.args
n = sig.num_bitmap_args
if n:
sig_args = sig_args[:-n]
sig_arg_kinds = [arg.kind for arg in sig_args]
sig_arg_names = [arg.name for arg in sig_args]
concrete_kinds = [concrete_arg_kind(arg_kind) for arg_kind in arg_kinds]
formal_to_actual = map_actuals_to_formals(
concrete_kinds,
arg_names,
sig_arg_kinds,
sig_arg_names,
lambda n: AnyType(TypeOfAny.special_form),
)
# First scan for */** and construct those
has_star = has_star2 = False
star_arg_entries = []
for lst, arg in zip(formal_to_actual, sig_args):
if arg.kind.is_star():
star_arg_entries.extend([(args[i], arg_kinds[i], arg_names[i]) for i in lst])
has_star = has_star or arg.kind == ARG_STAR
has_star2 = has_star2 or arg.kind == ARG_STAR2
star_arg, star2_arg = self._construct_varargs(
star_arg_entries, line, has_star=has_star, has_star2=has_star2
)
# Flatten out the arguments, loading error values for default
# arguments, constructing tuples/dicts for star args, and
# coercing everything to the expected type.
output_args: list[Value] = []
for lst, arg in zip(formal_to_actual, sig_args):
if arg.kind == ARG_STAR:
assert star_arg
output_arg = star_arg
elif arg.kind == ARG_STAR2:
output_arg = star2_arg or self._create_dict([], [], line)
elif not lst:
if is_fixed_width_rtype(arg.type):
output_arg = Integer(0, arg.type)
elif is_float_rprimitive(arg.type):
output_arg = Float(0.0)
else:
output_arg = self.add(LoadErrorValue(arg.type, is_borrowed=True))
else:
base_arg = args[lst[0]]
if arg_kinds[lst[0]].is_optional():
output_arg = self.coerce_nullable(base_arg, arg.type, line)
else:
output_arg = self.coerce(base_arg, arg.type, line)
output_args.append(output_arg)
for i in reversed(range(n)):
if bitmap_args and i < len(bitmap_args):
# Use override provided by caller
output_args.append(bitmap_args[i])
continue
# Infer values of bitmap args
bitmap = 0
c = 0
for lst, arg in zip(formal_to_actual, sig_args):
if arg.kind.is_optional() and arg.type.error_overlap:
if i * BITMAP_BITS <= c < (i + 1) * BITMAP_BITS:
if lst:
bitmap |= 1 << (c & (BITMAP_BITS - 1))
c += 1
output_args.append(Integer(bitmap, bitmap_rprimitive))
return output_args
def gen_method_call(
self,
base: Value,
name: str,
arg_values: list[Value],
result_type: RType | None,
line: int,
arg_kinds: list[ArgKind] | None = None,
arg_names: list[str | None] | None = None,
can_borrow: bool = False,
) -> Value:
"""Generate either a native or Python method call."""
# If we have *args, then fallback to Python method call.
if arg_kinds is not None and any(kind.is_star() for kind in arg_kinds):
return self.py_method_call(base, name, arg_values, line, arg_kinds, arg_names)
# If the base type is one of ours, do a MethodCall
fast_name = FAST_PREFIX + name
if (
isinstance(base.type, RInstance)
and (base.type.class_ir.is_ext_class or base.type.class_ir.has_method(fast_name))
and not base.type.class_ir.builtin_base
):
name = name if base.type.class_ir.is_ext_class else fast_name
if base.type.class_ir.has_method(name):
decl = base.type.class_ir.method_decl(name)
if arg_kinds is None:
assert arg_names is None, "arg_kinds not present but arg_names is"
arg_kinds = [ARG_POS for _ in arg_values]
arg_names = [None for _ in arg_values]
else:
assert arg_names is not None, "arg_kinds present but arg_names is not"
# Normalize args to positionals.
assert decl.bound_sig
arg_values = self.native_args_to_positional(
arg_values, arg_kinds, arg_names, decl.bound_sig, line
)
return self.add(MethodCall(base, name, arg_values, line))
elif base.type.class_ir.has_attr(name):
function = self.add(GetAttr(base, name, line))
return self.py_call(
function, arg_values, line, arg_kinds=arg_kinds, arg_names=arg_names
)
elif isinstance(base.type, RUnion):
return self.union_method_call(
base, base.type, name, arg_values, result_type, line, arg_kinds, arg_names
)
# Try to do a special-cased method call
if not arg_kinds or arg_kinds == [ARG_POS] * len(arg_values):
target = self.translate_special_method_call(
base, name, arg_values, result_type, line, can_borrow=can_borrow
)
if target:
return target
# Fall back to Python method call
return self.py_method_call(base, name, arg_values, line, arg_kinds, arg_names)
def union_method_call(
self,
base: Value,
obj_type: RUnion,
name: str,
arg_values: list[Value],
return_rtype: RType | None,
line: int,
arg_kinds: list[ArgKind] | None,
arg_names: list[str | None] | None,
) -> Value:
"""Generate a method call with a union type for the object."""
# Union method call needs a return_rtype for the type of the output register.
# If we don't have one, use object_rprimitive.
return_rtype = return_rtype or object_rprimitive
def call_union_item(value: Value) -> Value:
return self.gen_method_call(
value, name, arg_values, return_rtype, line, arg_kinds, arg_names
)
return self.decompose_union_helper(base, obj_type, return_rtype, call_union_item, line)
# Loading various values
def none(self) -> Value:
"""Load unboxed None value (type: none_rprimitive)."""
return Integer(1, none_rprimitive)
def true(self) -> Value:
"""Load unboxed True value (type: bool_rprimitive)."""
return Integer(1, bool_rprimitive)
def false(self) -> Value:
"""Load unboxed False value (type: bool_rprimitive)."""
return Integer(0, bool_rprimitive)
def none_object(self) -> Value:
"""Load Python None value (type: object_rprimitive)."""
return self.add(LoadAddress(none_object_op.type, none_object_op.src, line=-1))
def true_object(self) -> Value:
"""Load Python True object (type: object_rprimitive)."""
return self.add(LoadGlobal(object_rprimitive, "Py_True"))
def false_object(self) -> Value:
"""Load Python False object (type: object_rprimitive)."""
return self.add(LoadGlobal(object_rprimitive, "Py_False"))
def load_int(self, value: int) -> Value:
"""Load a tagged (Python) integer literal value."""
if value > MAX_LITERAL_SHORT_INT or value < MIN_LITERAL_SHORT_INT:
return self.add(LoadLiteral(value, int_rprimitive))
else:
return Integer(value)
def load_float(self, value: float) -> Value:
"""Load a float literal value."""
return Float(value)
def load_str(self, value: str) -> Value:
"""Load a str literal value.
This is useful for more than just str literals; for example, method calls
also require a PyObject * form for the name of the method.
"""
return self.add(LoadLiteral(value, str_rprimitive))
def load_bytes(self, value: bytes) -> Value:
"""Load a bytes literal value."""
return self.add(LoadLiteral(value, bytes_rprimitive))
def load_complex(self, value: complex) -> Value:
"""Load a complex literal value."""
return self.add(LoadLiteral(value, object_rprimitive))
def load_static_checked(
self,
typ: RType,
identifier: str,
module_name: str | None = None,
namespace: str = NAMESPACE_STATIC,
line: int = -1,
error_msg: str | None = None,
) -> Value:
if error_msg is None:
error_msg = f'name "{identifier}" is not defined'
ok_block, error_block = BasicBlock(), BasicBlock()
value = self.add(LoadStatic(typ, identifier, module_name, namespace, line=line))
self.add(Branch(value, error_block, ok_block, Branch.IS_ERROR, rare=True))
self.activate_block(error_block)
self.add(RaiseStandardError(RaiseStandardError.NAME_ERROR, error_msg, line))
self.add(Unreachable())
self.activate_block(ok_block)
return value
def load_module(self, name: str) -> Value:
return self.add(LoadStatic(object_rprimitive, name, namespace=NAMESPACE_MODULE))
def get_native_type(self, cls: ClassIR) -> Value:
"""Load native type object."""
fullname = f"{cls.module_name}.{cls.name}"
return self.load_native_type_object(fullname)
def load_native_type_object(self, fullname: str) -> Value:
module, name = fullname.rsplit(".", 1)
return self.add(LoadStatic(object_rprimitive, name, module, NAMESPACE_TYPE))
# Other primitive operations
def binary_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value:
"""Perform a binary operation.
Generate specialized operations based on operand types, with a fallback
to generic operations.
"""
ltype = lreg.type
rtype = rreg.type
# Special case tuple comparison here so that nested tuples can be supported
if isinstance(ltype, RTuple) and isinstance(rtype, RTuple) and op in ("==", "!="):
return self.compare_tuples(lreg, rreg, op, line)
# Special case == and != when we can resolve the method call statically
if op in ("==", "!="):
value = self.translate_eq_cmp(lreg, rreg, op, line)
if value is not None:
return value
# Special case various ops
if op in ("is", "is not"):
return self.translate_is_op(lreg, rreg, op, line)
if (
is_bool_or_bit_rprimitive(ltype)
and is_bool_or_bit_rprimitive(rtype)
and op in BOOL_BINARY_OPS
):
if op in ComparisonOp.signed_ops:
return self.bool_comparison_op(lreg, rreg, op, line)
else:
return self.bool_bitwise_op(lreg, rreg, op[0], line)
if isinstance(rtype, RInstance) and op in ("in", "not in"):
return self.translate_instance_contains(rreg, lreg, op, line)
if is_fixed_width_rtype(ltype):
if op in FIXED_WIDTH_INT_BINARY_OPS:
op = op.removesuffix("=")
if op != "//":
op_id = int_op_to_id[op]
else:
op_id = IntOp.DIV
if is_bool_or_bit_rprimitive(rtype):
rreg = self.coerce(rreg, ltype, line)
rtype = ltype
if is_fixed_width_rtype(rtype) or is_tagged(rtype):
return self.fixed_width_int_op(ltype, lreg, rreg, op_id, line)
if isinstance(rreg, Integer):
return self.fixed_width_int_op(
ltype, lreg, self.coerce(rreg, ltype, line), op_id, line
)
elif op in ComparisonOp.signed_ops:
if is_int_rprimitive(rtype):
rreg = self.coerce_int_to_fixed_width(rreg, ltype, line)
elif is_bool_or_bit_rprimitive(rtype):
rreg = self.coerce(rreg, ltype, line)
op_id = ComparisonOp.signed_ops[op]
if is_fixed_width_rtype(rreg.type):
return self.comparison_op(lreg, rreg, op_id, line)
if isinstance(rreg, Integer):
return self.comparison_op(lreg, self.coerce(rreg, ltype, line), op_id, line)
elif is_fixed_width_rtype(rtype):
if op in FIXED_WIDTH_INT_BINARY_OPS:
op = op.removesuffix("=")
if op != "//":
op_id = int_op_to_id[op]
else:
op_id = IntOp.DIV
if isinstance(lreg, Integer):
return self.fixed_width_int_op(
rtype, self.coerce(lreg, rtype, line), rreg, op_id, line
)
if is_tagged(ltype):
return self.fixed_width_int_op(rtype, lreg, rreg, op_id, line)
if is_bool_or_bit_rprimitive(ltype):
lreg = self.coerce(lreg, rtype, line)
return self.fixed_width_int_op(rtype, lreg, rreg, op_id, line)
elif op in ComparisonOp.signed_ops:
if is_int_rprimitive(ltype):
lreg = self.coerce_int_to_fixed_width(lreg, rtype, line)
elif is_bool_or_bit_rprimitive(ltype):
lreg = self.coerce(lreg, rtype, line)
op_id = ComparisonOp.signed_ops[op]
if isinstance(lreg, Integer):
return self.comparison_op(self.coerce(lreg, rtype, line), rreg, op_id, line)
if is_fixed_width_rtype(lreg.type):
return self.comparison_op(lreg, rreg, op_id, line)
if is_float_rprimitive(ltype) or is_float_rprimitive(rtype):
if isinstance(lreg, Integer):
lreg = Float(float(lreg.numeric_value()))
elif isinstance(rreg, Integer):
rreg = Float(float(rreg.numeric_value()))
elif is_int_rprimitive(lreg.type):
lreg = self.int_to_float(lreg, line)
elif is_int_rprimitive(rreg.type):
rreg = self.int_to_float(rreg, line)
if is_float_rprimitive(lreg.type) and is_float_rprimitive(rreg.type):
if op in float_comparison_op_to_id:
return self.compare_floats(lreg, rreg, float_comparison_op_to_id[op], line)
if op.endswith("="):
base_op = op[:-1]
else:
base_op = op
if base_op in float_op_to_id:
return self.float_op(lreg, rreg, base_op, line)
dunder_op = self.dunder_op(lreg, rreg, op, line)
if dunder_op:
return dunder_op
primitive_ops_candidates = binary_ops.get(op, [])
target = self.matching_primitive_op(primitive_ops_candidates, [lreg, rreg], line)
assert target, "Unsupported binary operation: %s" % op
return target
def dunder_op(self, lreg: Value, rreg: Value | None, op: str, line: int) -> Value | None:
"""
Dispatch a dunder method if applicable.
For example for `a + b` it will use `a.__add__(b)` which can lead to higher performance
due to the fact that the method could be already compiled and optimized instead of going
all the way through `PyNumber_Add(a, b)` python api (making a jump into the python DL).
"""
ltype = lreg.type
if not isinstance(ltype, RInstance):
return None
method_name = op_methods.get(op) if rreg else unary_op_methods.get(op)
if method_name is None:
return None
if not ltype.class_ir.has_method(method_name):
return None
decl = ltype.class_ir.method_decl(method_name)
if not rreg and len(decl.sig.args) != 1:
return None
if rreg and (len(decl.sig.args) != 2 or not is_subtype(rreg.type, decl.sig.args[1].type)):
return None
if rreg and is_subtype(not_implemented_op.type, decl.sig.ret_type):
# If the method is able to return NotImplemented, we should not optimize it.
# We can just let go so it will be handled through the python api.
return None
args = [rreg] if rreg else []
return self.gen_method_call(lreg, method_name, args, decl.sig.ret_type, line)
def check_tagged_short_int(self, val: Value, line: int, negated: bool = False) -> Value:
"""Check if a tagged integer is a short integer.
Return the result of the check (value of type 'bit').
"""
int_tag = Integer(1, c_pyssize_t_rprimitive, line)
bitwise_and = self.int_op(c_pyssize_t_rprimitive, val, int_tag, IntOp.AND, line)
zero = Integer(0, c_pyssize_t_rprimitive, line)
op = ComparisonOp.NEQ if negated else ComparisonOp.EQ
check = self.comparison_op(bitwise_and, zero, op, line)
return check
def compare_strings(self, lhs: Value, rhs: Value, op: str, line: int) -> Value:
"""Compare two strings"""
if op == "==":
# We can specialize this case if one or both values are string literals
literal_fastpath = False
def is_string_literal(value: Value) -> TypeGuard[LoadLiteral]:
return isinstance(value, LoadLiteral) and is_str_rprimitive(value.type)
if is_string_literal(lhs):
if is_string_literal(rhs):
# we can optimize out the check entirely in some constant-folded cases
return self.true() if lhs.value == rhs.value else self.false()
# if lhs argument is string literal, switch sides to match specializer C api
lhs, rhs = rhs, lhs
literal_fastpath = True
elif is_string_literal(rhs):
literal_fastpath = True
if literal_fastpath:
literal_string = cast(str, cast(LoadLiteral, rhs).value)
literal_length = Integer(len(literal_string), c_pyssize_t_rprimitive, line)
return self.primitive_op(str_eq_literal, [lhs, rhs, literal_length], line)
return self.primitive_op(str_eq, [lhs, rhs], line)
elif op == "!=":
# perform a standard equality check, then negate
eq = self.compare_strings(lhs, rhs, "==", line)
return self.add(ComparisonOp(eq, self.false(), ComparisonOp.EQ, line))
# TODO: modify 'str' to use same interface as 'compare_bytes' as it would avoid
# call to PyErr_Occurred() below
compare_result = self.call_c(unicode_compare, [lhs, rhs], line)
error_constant = Integer(-1, c_int_rprimitive, line)
compare_error_check = self.add(
ComparisonOp(compare_result, error_constant, ComparisonOp.EQ, line)
)
exception_check, propagate, final_compare = BasicBlock(), BasicBlock(), BasicBlock()
branch = Branch(compare_error_check, exception_check, final_compare, Branch.BOOL)
branch.negated = False
self.add(branch)
self.activate_block(exception_check)
check_error_result = self.call_c(err_occurred_op, [], line)
null = Integer(0, pointer_rprimitive, line)
compare_error_check = self.add(
ComparisonOp(check_error_result, null, ComparisonOp.NEQ, line)
)
branch = Branch(compare_error_check, propagate, final_compare, Branch.BOOL)
branch.negated = False
self.add(branch)
self.activate_block(propagate)
self.call_c(keep_propagating_op, [], line)
self.goto(final_compare)
self.activate_block(final_compare)
op_type = ComparisonOp.EQ if op == "==" else ComparisonOp.NEQ
return self.add(ComparisonOp(compare_result, Integer(0, c_int_rprimitive), op_type, line))
def compare_bytes(self, lhs: Value, rhs: Value, op: str, line: int) -> Value:
compare_result = self.call_c(bytes_compare, [lhs, rhs], line)
op_type = ComparisonOp.EQ if op == "==" else ComparisonOp.NEQ
return self.add(ComparisonOp(compare_result, Integer(1, c_int_rprimitive), op_type, line))
def compare_tuples(self, lhs: Value, rhs: Value, op: str, line: int = -1) -> Value:
"""Compare two tuples item by item"""
# type cast to pass mypy check
assert isinstance(lhs.type, RTuple) and isinstance(rhs.type, RTuple), (lhs.type, rhs.type)
equal = True if op == "==" else False
result = Register(bool_rprimitive)
# tuples of different lengths
if len(lhs.type.types) != len(rhs.type.types):
self.add(Assign(result, self.false() if equal else self.true(), line))
return result
# empty tuples
if len(lhs.type.types) == 0 and len(rhs.type.types) == 0:
self.add(Assign(result, self.true() if equal else self.false(), line))
return result
length = len(lhs.type.types)
false_assign, true_assign, out = BasicBlock(), BasicBlock(), BasicBlock()
check_blocks = [BasicBlock() for _ in range(length)]
lhs_items = [self.add(TupleGet(lhs, i, line)) for i in range(length)]
rhs_items = [self.add(TupleGet(rhs, i, line)) for i in range(length)]
if equal:
early_stop, final = false_assign, true_assign
else:
early_stop, final = true_assign, false_assign
for i in range(len(lhs.type.types)):
if i != 0:
self.activate_block(check_blocks[i])
lhs_item = lhs_items[i]
rhs_item = rhs_items[i]
compare = self.binary_op(lhs_item, rhs_item, op, line)
# Cast to bool if necessary since most types uses comparison returning a object type
# See generic_ops.py for more information
if not is_bool_or_bit_rprimitive(compare.type):
compare = self.primitive_op(bool_op, [compare], line)
if i < len(lhs.type.types) - 1:
branch = Branch(compare, early_stop, check_blocks[i + 1], Branch.BOOL)
else:
branch = Branch(compare, early_stop, final, Branch.BOOL)
# if op is ==, we branch on false, else branch on true
branch.negated = equal
self.add(branch)
self.activate_block(false_assign)
self.add(Assign(result, self.false(), line))
self.goto(out)
self.activate_block(true_assign)
self.add(Assign(result, self.true(), line))
self.goto_and_activate(out)
return result
def translate_instance_contains(self, inst: Value, item: Value, op: str, line: int) -> Value:
res = self.gen_method_call(inst, "__contains__", [item], None, line)
if not is_bool_or_bit_rprimitive(res.type):
res = self.primitive_op(bool_op, [res], line)
if op == "not in":
res = self.bool_bitwise_op(res, Integer(1, rtype=bool_rprimitive), "^", line)
return res
def bool_bitwise_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value:
if op == "&":
code = IntOp.AND
elif op == "|":
code = IntOp.OR
elif op == "^":
code = IntOp.XOR
else:
assert False, op
return self.add(IntOp(bool_rprimitive, lreg, rreg, code, line))
def bool_comparison_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value:
op_id = ComparisonOp.signed_ops[op]
return self.comparison_op(lreg, rreg, op_id, line)
def _non_specialized_unary_op(self, value: Value, op: str, line: int) -> Value:
if isinstance(value.type, RInstance):
result = self.dunder_op(value, None, op, line)
if result is not None:
return result
primitive_ops_candidates = unary_ops.get(op, [])
target = self.matching_primitive_op(primitive_ops_candidates, [value], line)
assert target, "Unsupported unary operation: %s" % op
return target
def unary_not(self, value: Value, line: int, *, likely_bool: bool = False) -> Value:
"""Perform unary 'not'.
Args:
likely_bool: The operand is likely a bool value, even if the type is something
more general, so specialize for bool values
"""
typ = value.type
if is_bool_or_bit_rprimitive(typ):
mask = Integer(1, typ, line)
return self.int_op(typ, value, mask, IntOp.XOR, line)
if is_tagged(typ) or is_fixed_width_rtype(typ):
return self.binary_op(value, Integer(0), "==", line)
if (
is_str_rprimitive(typ)
or is_list_rprimitive(typ)
or is_tuple_rprimitive(typ)
or is_dict_rprimitive(typ)
or isinstance(typ, RInstance)
):
bool_val = self.bool_value(value)
return self.unary_not(bool_val, line)
if is_optional_type(typ):
value_typ = optional_value_type(typ)
assert value_typ
if (
is_str_rprimitive(value_typ)
or is_list_rprimitive(value_typ)
or is_tuple_rprimitive(value_typ)
or is_dict_rprimitive(value_typ)
or isinstance(value_typ, RInstance)
):
# 'X | None' type: Check for None first and then specialize for X.
res = Register(bit_rprimitive)
cmp = self.add(ComparisonOp(value, self.none_object(), ComparisonOp.EQ, line))
none, not_none, out = BasicBlock(), BasicBlock(), BasicBlock()
self.add(Branch(cmp, none, not_none, Branch.BOOL))
self.activate_block(none)
self.add(Assign(res, self.true()))
self.goto(out)
self.activate_block(not_none)
val = self.unary_not(
self.unbox_or_cast(value, value_typ, line, can_borrow=True, unchecked=True),
line,
)
self.add(Assign(res, val))
self.goto(out)
self.activate_block(out)
return res
if likely_bool and is_object_rprimitive(typ):
# First quickly check if it's a bool, and otherwise fall back to generic op.
res = Register(bit_rprimitive)
false, not_false, true, other = BasicBlock(), BasicBlock(), BasicBlock(), BasicBlock()
out = BasicBlock()
cmp = self.add(ComparisonOp(value, self.true_object(), ComparisonOp.EQ, line))
self.add(Branch(cmp, false, not_false, Branch.BOOL))
self.activate_block(false)
self.add(Assign(res, self.false()))
self.goto(out)
self.activate_block(not_false)
cmp = self.add(ComparisonOp(value, self.false_object(), ComparisonOp.EQ, line))
self.add(Branch(cmp, true, other, Branch.BOOL))
self.activate_block(true)
self.add(Assign(res, self.true()))
self.goto(out)
self.activate_block(other)
val = self._non_specialized_unary_op(value, "not", line)
self.add(Assign(res, val))
self.goto(out)
self.activate_block(out)
return res
return self._non_specialized_unary_op(value, "not", line)
def unary_minus(self, value: Value, line: int) -> Value:
"""Perform unary '-'."""
typ = value.type
if isinstance(value, Integer):
# TODO: Overflow? Unsigned?
return Integer(-value.numeric_value(), typ, line)
elif isinstance(value, Float):
return Float(-value.value, line)
elif is_fixed_width_rtype(typ):
# Translate to '0 - x'
return self.int_op(typ, Integer(0, typ), value, IntOp.SUB, line)
elif is_float_rprimitive(typ):
return self.add(FloatNeg(value, line))
return self._non_specialized_unary_op(value, "-", line)
def unary_plus(self, value: Value, line: int) -> Value:
"""Perform unary '+'."""
typ = value.type
if (
is_tagged(typ)
or is_float_rprimitive(typ)
or is_bool_or_bit_rprimitive(typ)
or is_fixed_width_rtype(typ)
):
return value
return self._non_specialized_unary_op(value, "+", line)
def unary_invert(self, value: Value, line: int) -> Value:
"""Perform unary '~'."""
typ = value.type
if is_fixed_width_rtype(typ):
if typ.is_signed:
# Translate to 'x ^ -1'
return self.int_op(typ, value, Integer(-1, typ), IntOp.XOR, line)
else:
# Translate to 'x ^ 0xff...'
mask = (1 << (typ.size * 8)) - 1
return self.int_op(typ, value, Integer(mask, typ), IntOp.XOR, line)
return self._non_specialized_unary_op(value, "~", line)
def unary_op(self, value: Value, op: str, line: int) -> Value:
"""Perform a unary operation."""
if op == "not":
return self.unary_not(value, line)
elif op == "-":
return self.unary_minus(value, line)
elif op == "+":
return self.unary_plus(value, line)
elif op == "~":
return self.unary_invert(value, line)
raise RuntimeError("Unsupported unary operation: %s" % op)
def make_dict(self, key_value_pairs: Sequence[DictEntry], line: int) -> Value:
result: Value | None = None
keys: list[Value] = []
values: list[Value] = []
for key, value in key_value_pairs:
if key is not None:
# key:value
if result is None:
keys.append(key)
values.append(value)
continue
self.translate_special_method_call(
result, "__setitem__", [key, value], result_type=None, line=line
)
else:
# **value
if result is None:
result = self._create_dict(keys, values, line)
self.call_c(dict_update_in_display_op, [result, value], line=line)
if result is None:
result = self._create_dict(keys, values, line)
return result
def new_list_op_with_length(self, length: Value, line: int) -> Value:
"""This function returns an uninitialized list.
If the length is non-zero, the caller must initialize the list, before
it can be made visible to user code -- otherwise the list object is broken.
You might need further initialization with `new_list_set_item_op` op.
Args:
length: desired length of the new list. The rtype should be
c_pyssize_t_rprimitive
line: line number
"""
return self.call_c(new_list_op, [length], line)
def new_list_op(self, values: list[Value], line: int) -> Value:
length: list[Value] = [Integer(len(values), c_pyssize_t_rprimitive, line)]
if len(values) >= LIST_BUILDING_EXPANSION_THRESHOLD:
return self.call_c(list_build_op, length + values, line)
# If the length of the list is less than the threshold,
# LIST_BUILDING_EXPANSION_THRESHOLD, we directly expand the
# for-loop and inline the SetMem operation, which is faster
# than list_build_op, however generates more code.
result_list = self.call_c(new_list_op, length, line)
if not values:
return result_list
args = [self.coerce(item, object_rprimitive, line) for item in values]
ob_item_base = self.add(PrimitiveOp([result_list], list_items, line))
for i in range(len(values)):
self.primitive_op(
buf_init_item, [ob_item_base, Integer(i, c_pyssize_t_rprimitive), args[i]], line
)
self.add(KeepAlive([result_list]))
return result_list
def new_set_op(self, values: list[Value], line: int) -> Value:
return self.primitive_op(new_set_op, values, line)
def setup_rarray(
self, item_type: RType, values: Sequence[Value], *, object_ptr: bool = False
) -> Value:
"""Declare and initialize a new RArray, returning its address."""
array = Register(RArray(item_type, len(values)))
self.add(AssignMulti(array, list(values)))
return self.add(
LoadAddress(object_pointer_rprimitive if object_ptr else c_pointer_rprimitive, array)
)
def shortcircuit_helper(
self,
op: str,
expr_type: RType,
left: Callable[[], Value],
right: Callable[[], Value],
line: int,
) -> Value:
# Having actual Phi nodes would be really nice here!
target = Register(expr_type)
# left_body takes the value of the left side, right_body the right
left_body, right_body, next_block = BasicBlock(), BasicBlock(), BasicBlock()
# true_body is taken if the left is true, false_body if it is false.
# For 'and' the value is the right side if the left is true, and for 'or'
# it is the right side if the left is false.
true_body, false_body = (right_body, left_body) if op == "and" else (left_body, right_body)
left_value = left()
self.add_bool_branch(left_value, true_body, false_body)
self.activate_block(left_body)
left_coerced = self.coerce(left_value, expr_type, line)
self.add(Assign(target, left_coerced))
self.goto(next_block)
self.activate_block(right_body)
right_value = right()
right_coerced = self.coerce(right_value, expr_type, line)
self.add(Assign(target, right_coerced))
self.goto(next_block)
self.activate_block(next_block)
return target
def bool_value(self, value: Value) -> Value:
"""Return bool(value).
The result type can be bit_rprimitive or bool_rprimitive.
"""
if is_bool_or_bit_rprimitive(value.type):
result = value
elif is_runtime_subtype(value.type, int_rprimitive):
zero = Integer(0, short_int_rprimitive)
result = self.comparison_op(value, zero, ComparisonOp.NEQ, value.line)
elif is_fixed_width_rtype(value.type):
zero = Integer(0, value.type)
result = self.add(ComparisonOp(value, zero, ComparisonOp.NEQ))
elif is_str_rprimitive(value.type):
result = self.call_c(str_check_if_true, [value], value.line)
elif (
is_list_rprimitive(value.type)
or is_dict_rprimitive(value.type)
or is_tuple_rprimitive(value.type)
):
length = self.builtin_len(value, value.line)
zero = Integer(0)
result = self.binary_op(length, zero, "!=", value.line)
elif (
isinstance(value.type, RInstance)
and value.type.class_ir.is_ext_class
and value.type.class_ir.has_method("__bool__")
):
# Directly call the __bool__ method on classes that have it.
result = self.gen_method_call(value, "__bool__", [], bool_rprimitive, value.line)
elif is_float_rprimitive(value.type):
result = self.compare_floats(value, Float(0.0), FloatComparisonOp.NEQ, value.line)
else:
value_type = optional_value_type(value.type)
if value_type is not None:
not_none = self.translate_is_op(value, self.none_object(), "is not", value.line)
always_truthy = False
if isinstance(value_type, RInstance):
# check whether X.__bool__ is always just the default (object.__bool__)
if not value_type.class_ir.has_method(
"__bool__"
) and value_type.class_ir.is_method_final("__bool__"):
always_truthy = True
if always_truthy:
result = not_none
else:
# "X | None" where X may be falsey and requires a check
result = Register(bit_rprimitive)
true, false, end = BasicBlock(), BasicBlock(), BasicBlock()
branch = Branch(not_none, true, false, Branch.BOOL)
self.add(branch)
self.activate_block(true)
# unbox_or_cast instead of coerce because we want the
# type to change even if it is a subtype.
remaining = self.unbox_or_cast(value, value_type, value.line)
as_bool = self.bool_value(remaining)
self.add(Assign(result, as_bool))
self.goto(end)
self.activate_block(false)
self.add(Assign(result, Integer(0, bit_rprimitive)))
self.goto(end)
self.activate_block(end)
else:
result = self.primitive_op(bool_op, [value], value.line)
return result
def add_bool_branch(self, value: Value, true: BasicBlock, false: BasicBlock) -> None:
opt_value_type = optional_value_type(value.type)
if opt_value_type is None:
bool_value = self.bool_value(value)
self.add(Branch(bool_value, true, false, Branch.BOOL))
else:
# Special-case optional types
is_none = self.translate_is_op(value, self.none_object(), "is not", value.line)
branch = Branch(is_none, true, false, Branch.BOOL)
self.add(branch)
always_truthy = False
if isinstance(opt_value_type, RInstance):
# check whether X.__bool__ is always just the default (object.__bool__)
if not opt_value_type.class_ir.has_method(
"__bool__"
) and opt_value_type.class_ir.is_method_final("__bool__"):
always_truthy = True
if not always_truthy:
# Optional[X] where X may be falsey and requires a check
branch.true = BasicBlock()
self.activate_block(branch.true)
# unbox_or_cast instead of coerce because we want the
# type to change even if it is a subtype.
remaining = self.unbox_or_cast(value, opt_value_type, value.line)
self.add_bool_branch(remaining, true, false)
def call_c(
self,
desc: CFunctionDescription,
args: list[Value],
line: int,
result_type: RType | None = None,
) -> Value:
"""Call function using C/native calling convention (not a Python callable)."""
# Handle void function via singleton RVoid instance
coerced = []
# Coerce fixed number arguments
for i in range(min(len(args), len(desc.arg_types))):
formal_type = desc.arg_types[i]
arg = args[i]
arg = self.coerce(arg, formal_type, line)
coerced.append(arg)
# Reorder args if necessary
if desc.ordering is not None:
assert desc.var_arg_type is None
coerced = [coerced[i] for i in desc.ordering]
# Coerce any var_arg
var_arg_idx = -1
if desc.var_arg_type is not None:
var_arg_idx = len(desc.arg_types)
for i in range(len(desc.arg_types), len(args)):
arg = args[i]
arg = self.coerce(arg, desc.var_arg_type, line)
coerced.append(arg)
# Add extra integer constant if any
for item in desc.extra_int_constants:
val, typ = item
extra_int_constant = Integer(val, typ, line)
coerced.append(extra_int_constant)
error_kind = desc.error_kind
if error_kind == ERR_NEG_INT:
# Handled with an explicit comparison
error_kind = ERR_NEVER
target = self.add(
CallC(
desc.c_function_name,
coerced,
desc.return_type,
desc.steals,
desc.is_borrowed,
error_kind,
line,
var_arg_idx,
is_pure=desc.is_pure,
returns_null=desc.returns_null,
capsule=desc.capsule,
)
)
if desc.is_borrowed:
# If the result is borrowed, force the arguments to be
# kept alive afterwards, as otherwise the result might be
# immediately freed, at the risk of a dangling pointer.
for arg in coerced:
if not isinstance(arg, (Integer, LoadLiteral)):
self.keep_alives.append(arg)
if desc.error_kind == ERR_NEG_INT:
comp = ComparisonOp(target, Integer(0, desc.return_type, line), ComparisonOp.SGE, line)
comp.error_kind = ERR_FALSE
self.add(comp)
if desc.truncated_type is None:
result = target
else:
truncate = self.add(Truncate(target, desc.truncated_type))
result = truncate
if result_type and not is_runtime_subtype(result.type, result_type):
if is_none_rprimitive(result_type):
# Special case None return. The actual result may actually be a bool
# and so we can't just coerce it.
result = self.none()
else:
result = self.coerce(target, result_type, line, can_borrow=desc.is_borrowed)
return result
def matching_call_c(
self,
candidates: list[CFunctionDescription],
args: list[Value],
line: int,
result_type: RType | None = None,
can_borrow: bool = False,
) -> Value | None:
matching: CFunctionDescription | None = None
for desc in candidates:
if len(desc.arg_types) != len(args):
continue
if all(
is_subtype(actual.type, formal) for actual, formal in zip(args, desc.arg_types)
) and (not desc.is_borrowed or can_borrow):
if matching:
assert matching.priority != desc.priority, "Ambiguous:\n1) {}\n2) {}".format(
matching, desc
)
if desc.priority > matching.priority:
matching = desc
else:
matching = desc
if matching:
target = self.call_c(matching, args, line, result_type)
return target
return None
def primitive_op(
self,
desc: PrimitiveDescription,
args: list[Value],
line: int,
result_type: RType | None = None,
) -> Value:
"""Add a primitive op."""
# Does this primitive map into calling a Python C API
# or an internal mypyc C API function?
if desc.c_function_name:
# TODO: Generate PrimitiveOps here and transform them into CallC
# ops only later in the lowering pass
c_desc = CFunctionDescription(
desc.name,
desc.arg_types,
desc.return_type,
desc.var_arg_type,
desc.truncated_type,
desc.c_function_name,
desc.error_kind,
desc.steals,
desc.is_borrowed,
desc.ordering,
desc.extra_int_constants,
desc.priority,
is_pure=desc.is_pure,
returns_null=False,
capsule=desc.capsule,
)
return self.call_c(c_desc, args, line, result_type=result_type)
# This primitive gets transformed in a lowering pass to
# lower-level IR ops using a custom transform function.
coerced = []
# Coerce fixed number arguments
for i in range(min(len(args), len(desc.arg_types))):
formal_type = desc.arg_types[i]
arg = args[i]
assert formal_type is not None # TODO
arg = self.coerce(arg, formal_type, line)
coerced.append(arg)
assert desc.ordering is None
assert desc.var_arg_type is None
assert not desc.extra_int_constants
target = self.add(PrimitiveOp(coerced, desc, line=line))
if desc.is_borrowed:
# If the result is borrowed, force the arguments to be
# kept alive afterwards, as otherwise the result might be
# immediately freed, at the risk of a dangling pointer.
for arg in coerced:
if not isinstance(arg, (Integer, LoadLiteral)):
self.keep_alives.append(arg)
if desc.error_kind == ERR_NEG_INT:
comp = ComparisonOp(target, Integer(0, desc.return_type, line), ComparisonOp.SGE, line)
comp.error_kind = ERR_FALSE
self.add(comp)
assert desc.truncated_type is None
result = target
if result_type and not is_runtime_subtype(result.type, result_type):
if is_none_rprimitive(result_type):
# Special case None return. The actual result may actually be a bool
# and so we can't just coerce it.
result = self.none()
else:
result = self.coerce(result, result_type, line, can_borrow=desc.is_borrowed)
return result
def matching_primitive_op(
self,
candidates: list[PrimitiveDescription],
args: list[Value],
line: int,
result_type: RType | None = None,
*,
can_borrow: bool = False,
strict: bool = True,
) -> Value | None:
"""Find primitive operation that is compatible with types of args.
Return None if none of them match.
"""
matching: PrimitiveDescription | None = None
for desc in candidates:
if len(desc.arg_types) != len(args):
continue
if desc.experimental and not self.options.experimental_features:
continue
if all(
# formal is not None and # TODO
is_subtype(actual.type, formal, relaxed=not strict)
for actual, formal in zip(args, desc.arg_types)
) and (not desc.is_borrowed or can_borrow):
if matching:
assert matching.priority != desc.priority, "Ambiguous:\n1) {}\n2) {}".format(
matching, desc
)
if desc.priority > matching.priority:
matching = desc
else:
matching = desc
if matching:
return self.primitive_op(matching, args, line=line, result_type=result_type)
if strict and any(prim.is_ambiguous for prim in candidates):
# Also try a non-exact match if any primitives have ambiguous types.
return self.matching_primitive_op(
candidates, args, line, result_type, can_borrow=can_borrow, strict=False
)
return None
def int_op(self, type: RType, lhs: Value, rhs: Value, op: int, line: int = -1) -> Value:
"""Generate a native integer binary op.
Use native/C semantics, which sometimes differ from Python
semantics.
Args:
type: Either int64_rprimitive or int32_rprimitive
op: IntOp.* constant (e.g. IntOp.ADD)
"""
return self.add(IntOp(type, lhs, rhs, op, line))
def float_op(self, lhs: Value, rhs: Value, op: str, line: int) -> Value:
"""Generate a native float binary arithmetic operation.
This follows Python semantics (e.g. raise exception on division by zero).
Add a FloatOp directly if you want low-level semantics.
Args:
op: Binary operator (e.g. '+' or '*')
"""
op_id = float_op_to_id[op]
if op_id in (FloatOp.DIV, FloatOp.MOD):
if not (isinstance(rhs, Float) and rhs.value != 0.0):
c = self.compare_floats(rhs, Float(0.0), FloatComparisonOp.EQ, line)
err, ok = BasicBlock(), BasicBlock()
self.add(Branch(c, err, ok, Branch.BOOL, rare=True))
self.activate_block(err)
if op_id == FloatOp.DIV:
msg = "float division by zero"
else:
msg = "float modulo"
self.add(RaiseStandardError(RaiseStandardError.ZERO_DIVISION_ERROR, msg, line))
self.add(Unreachable())
self.activate_block(ok)
if op_id == FloatOp.MOD:
# Adjust the result to match Python semantics (FloatOp follows C semantics).
return self.float_mod(lhs, rhs, line)
else:
return self.add(FloatOp(lhs, rhs, op_id, line))
def float_mod(self, lhs: Value, rhs: Value, line: int) -> Value:
"""Perform x % y on floats using Python semantics."""
mod = self.add(FloatOp(lhs, rhs, FloatOp.MOD, line))
res = Register(float_rprimitive)
self.add(Assign(res, mod))
tricky, adjust, copysign, done = BasicBlock(), BasicBlock(), BasicBlock(), BasicBlock()
is_zero = self.add(FloatComparisonOp(res, Float(0.0), FloatComparisonOp.EQ, line))
self.add(Branch(is_zero, copysign, tricky, Branch.BOOL))
self.activate_block(tricky)
same_signs = self.is_same_float_signs(lhs, rhs, line)
self.add(Branch(same_signs, done, adjust, Branch.BOOL))
self.activate_block(adjust)
adj = self.float_op(res, rhs, "+", line)
self.add(Assign(res, adj))
self.add(Goto(done))
self.activate_block(copysign)
# If the remainder is zero, CPython ensures the result has the
# same sign as the denominator.
adj = self.primitive_op(copysign_op, [Float(0.0), rhs], line)
self.add(Assign(res, adj))
self.add(Goto(done))
self.activate_block(done)
return res
def compare_floats(self, lhs: Value, rhs: Value, op: int, line: int) -> Value:
return self.add(FloatComparisonOp(lhs, rhs, op, line))
def int_add(self, lhs: Value, rhs: Value | int) -> Value:
"""Helper to add two native integers.
The result has the type of lhs.
"""
if isinstance(rhs, int):
rhs = Integer(rhs, lhs.type)
return self.int_op(lhs.type, lhs, rhs, IntOp.ADD, line=-1)
def int_sub(self, lhs: Value, rhs: Value | int) -> Value:
"""Helper to subtract a native integer from another one.
The result has the type of lhs.
"""
if isinstance(rhs, int):
rhs = Integer(rhs, lhs.type)
return self.int_op(lhs.type, lhs, rhs, IntOp.SUB, line=-1)
def int_mul(self, lhs: Value, rhs: Value | int) -> Value:
"""Helper to multiply two native integers.
The result has the type of lhs.
"""
if isinstance(rhs, int):
rhs = Integer(rhs, lhs.type)
return self.int_op(lhs.type, lhs, rhs, IntOp.MUL, line=-1)
def fixed_width_int_op(
self, type: RPrimitive, lhs: Value, rhs: Value, op: int, line: int
) -> Value:
"""Generate a binary op using Python fixed-width integer semantics.
These may differ in overflow/rounding behavior from native/C ops.
Args:
type: Either int64_rprimitive or int32_rprimitive
op: IntOp.* constant (e.g. IntOp.ADD)
"""
lhs = self.coerce(lhs, type, line)
rhs = self.coerce(rhs, type, line)
if op == IntOp.DIV:
if isinstance(rhs, Integer) and rhs.value not in (-1, 0):
if not type.is_signed:
return self.int_op(type, lhs, rhs, IntOp.DIV, line)
else:
# Inline simple division by a constant, so that C
# compilers can optimize more
return self.inline_fixed_width_divide(type, lhs, rhs, line)
if is_int64_rprimitive(type):
prim = int64_divide_op
elif is_int32_rprimitive(type):
prim = int32_divide_op
elif is_int16_rprimitive(type):
prim = int16_divide_op
elif is_uint8_rprimitive(type):
self.check_for_zero_division(rhs, type, line)
return self.int_op(type, lhs, rhs, op, line)
else:
assert False, type
return self.call_c(prim, [lhs, rhs], line)
if op == IntOp.MOD:
if isinstance(rhs, Integer) and rhs.value not in (-1, 0):
if not type.is_signed:
return self.int_op(type, lhs, rhs, IntOp.MOD, line)
else:
# Inline simple % by a constant, so that C
# compilers can optimize more
return self.inline_fixed_width_mod(type, lhs, rhs, line)
if is_int64_rprimitive(type):
prim = int64_mod_op
elif is_int32_rprimitive(type):
prim = int32_mod_op
elif is_int16_rprimitive(type):
prim = int16_mod_op
elif is_uint8_rprimitive(type):
self.check_for_zero_division(rhs, type, line)
return self.int_op(type, lhs, rhs, op, line)
else:
assert False, type
return self.call_c(prim, [lhs, rhs], line)
return self.int_op(type, lhs, rhs, op, line)
def check_for_zero_division(self, rhs: Value, type: RType, line: int) -> None:
err, ok = BasicBlock(), BasicBlock()
is_zero = self.binary_op(rhs, Integer(0, type), "==", line)
self.add(Branch(is_zero, err, ok, Branch.BOOL))
self.activate_block(err)
self.add(
RaiseStandardError(
RaiseStandardError.ZERO_DIVISION_ERROR, "integer division or modulo by zero", line
)
)
self.add(Unreachable())
self.activate_block(ok)
def inline_fixed_width_divide(self, type: RType, lhs: Value, rhs: Value, line: int) -> Value:
# Perform floor division (native division truncates)
res = Register(type)
div = self.int_op(type, lhs, rhs, IntOp.DIV, line)
self.add(Assign(res, div))
same_signs = self.is_same_native_int_signs(type, lhs, rhs, line)
tricky, adjust, done = BasicBlock(), BasicBlock(), BasicBlock()
self.add(Branch(same_signs, done, tricky, Branch.BOOL))
self.activate_block(tricky)
mul = self.int_op(type, res, rhs, IntOp.MUL, line)
mul_eq = self.add(ComparisonOp(mul, lhs, ComparisonOp.EQ, line))
self.add(Branch(mul_eq, done, adjust, Branch.BOOL))
self.activate_block(adjust)
adj = self.int_op(type, res, Integer(1, type), IntOp.SUB, line)
self.add(Assign(res, adj))
self.add(Goto(done))
self.activate_block(done)
return res
def inline_fixed_width_mod(self, type: RType, lhs: Value, rhs: Value, line: int) -> Value:
# Perform floor modulus
res = Register(type)
mod = self.int_op(type, lhs, rhs, IntOp.MOD, line)
self.add(Assign(res, mod))
same_signs = self.is_same_native_int_signs(type, lhs, rhs, line)
tricky, adjust, done = BasicBlock(), BasicBlock(), BasicBlock()
self.add(Branch(same_signs, done, tricky, Branch.BOOL))
self.activate_block(tricky)
is_zero = self.add(ComparisonOp(res, Integer(0, type), ComparisonOp.EQ, line))
self.add(Branch(is_zero, done, adjust, Branch.BOOL))
self.activate_block(adjust)
adj = self.int_op(type, res, rhs, IntOp.ADD, line)
self.add(Assign(res, adj))
self.add(Goto(done))
self.activate_block(done)
return res
def is_same_native_int_signs(self, type: RType, a: Value, b: Value, line: int) -> Value:
neg1 = self.add(ComparisonOp(a, Integer(0, type), ComparisonOp.SLT, line))
neg2 = self.add(ComparisonOp(b, Integer(0, type), ComparisonOp.SLT, line))
return self.add(ComparisonOp(neg1, neg2, ComparisonOp.EQ, line))
def is_same_float_signs(self, a: Value, b: Value, line: int) -> Value:
neg1 = self.add(FloatComparisonOp(a, Float(0.0), FloatComparisonOp.LT, line))
neg2 = self.add(FloatComparisonOp(b, Float(0.0), FloatComparisonOp.LT, line))
return self.add(ComparisonOp(neg1, neg2, ComparisonOp.EQ, line))
def comparison_op(self, lhs: Value, rhs: Value, op: int, line: int) -> Value:
return self.add(ComparisonOp(lhs, rhs, op, line))
def builtin_len(self, val: Value, line: int, use_pyssize_t: bool = False) -> Value:
"""Generate len(val).
Return short_int_rprimitive by default.
Return c_pyssize_t if use_pyssize_t is true (unshifted).
"""
typ = val.type
size_value = None
if is_list_rprimitive(typ) or is_tuple_rprimitive(typ) or is_bytes_rprimitive(typ):
size_value = self.primitive_op(var_object_size, [val], line)
elif is_set_rprimitive(typ) or is_frozenset_rprimitive(typ):
elem_address = self.add(GetElementPtr(val, PySetObject, "used"))
size_value = self.load_mem(elem_address, c_pyssize_t_rprimitive)
self.add(KeepAlive([val]))
elif is_dict_rprimitive(typ):
size_value = self.call_c(dict_ssize_t_size_op, [val], line)
elif is_str_rprimitive(typ):
size_value = self.call_c(str_ssize_t_size_op, [val], line)
if size_value is not None:
if use_pyssize_t:
return size_value
offset = Integer(1, c_pyssize_t_rprimitive, line)
return self.int_op(short_int_rprimitive, size_value, offset, IntOp.LEFT_SHIFT, line)
if isinstance(typ, RInstance):
# TODO: Support use_pyssize_t
assert not use_pyssize_t
length = self.gen_method_call(val, "__len__", [], int_rprimitive, line)
length = self.coerce(length, int_rprimitive, line)
ok, fail = BasicBlock(), BasicBlock()
cond = self.binary_op(length, Integer(0), ">=", line)
self.add_bool_branch(cond, ok, fail)
self.activate_block(fail)
self.add(
RaiseStandardError(
RaiseStandardError.VALUE_ERROR, "__len__() should return >= 0", line
)
)
self.add(Unreachable())
self.activate_block(ok)
return length
op = self.matching_primitive_op(function_ops["builtins.len"], [val], line)
if op is not None:
return op
# Fallback generic case
if use_pyssize_t:
return self.call_c(generic_ssize_t_len_op, [val], line)
else:
return self.call_c(generic_len_op, [val], line)
def new_tuple(self, items: list[Value], line: int) -> Value:
if items:
size: Value = Integer(len(items), c_pyssize_t_rprimitive)
return self.call_c(new_tuple_op, [size] + items, line)
else:
return self.call_c(load_empty_tuple_constant_op, [], line)
def new_tuple_with_length(self, length: Value, line: int) -> Value:
"""This function returns an uninitialized tuple.
If the length is non-zero, the caller must initialize the tuple, before
it can be made visible to user code -- otherwise the tuple object is broken.
You might need further initialization with `new_tuple_set_item_op` op.
Args:
length: desired length of the new tuple. The rtype should be
c_pyssize_t_rprimitive
line: line number
"""
return self.call_c(new_tuple_with_length_op, [length], line)
def int_to_float(self, n: Value, line: int) -> Value:
return self.primitive_op(int_to_float_op, [n], line)
def set_immortal_if_free_threaded(self, v: Value, line: int) -> None:
"""Make an object immortal on free-threaded builds (to avoid contention)."""
if IS_FREE_THREADED and sys.version_info >= (3, 14):
self.primitive_op(set_immortal_op, [v], line)
# Internal helpers
def decompose_union_helper(
self,
obj: Value,
rtype: RUnion,
result_type: RType,
process_item: Callable[[Value], Value],
line: int,
) -> Value:
"""Generate isinstance() + specialized operations for union items.
Say, for Union[A, B] generate ops resembling this (pseudocode):
if isinstance(obj, A):
result = <result of process_item(cast(A, obj)>
else:
result = <result of process_item(cast(B, obj)>
Args:
obj: value with a union type
rtype: the union type
result_type: result of the operation
process_item: callback to generate op for a single union item (arg is coerced
to union item type)
line: line number
"""
# TODO: Optimize cases where a single operation can handle multiple union items
# (say a method is implemented in a common base class)
fast_items = []
rest_items = []
for item in rtype.items:
if isinstance(item, RInstance):
fast_items.append(item)
else:
# For everything but RInstance we fall back to C API
rest_items.append(item)
exit_block = BasicBlock()
result = Register(result_type)
for i, item in enumerate(fast_items):
more_types = i < len(fast_items) - 1 or rest_items
if more_types:
# We are not at the final item so we need one more branch
op = self.isinstance_native(obj, item.class_ir, line)
true_block, false_block = BasicBlock(), BasicBlock()
self.add_bool_branch(op, true_block, false_block)
self.activate_block(true_block)
coerced = self.coerce(obj, item, line)
temp = process_item(coerced)
temp2 = self.coerce(temp, result_type, line)
self.add(Assign(result, temp2))
self.goto(exit_block)
if more_types:
self.activate_block(false_block)
if rest_items:
# For everything else we use generic operation. Use force=True to drop the
# union type.
coerced = self.coerce(obj, object_rprimitive, line, force=True)
temp = process_item(coerced)
temp2 = self.coerce(temp, result_type, line)
self.add(Assign(result, temp2))
self.goto(exit_block)
self.activate_block(exit_block)
return result
def translate_special_method_call(
self,
base_reg: Value,
name: str,
args: list[Value],
result_type: RType | None,
line: int,
can_borrow: bool = False,
) -> Value | None:
"""Translate a method call which is handled nongenerically.
These are special in the sense that we have code generated specifically for them.
They tend to be method calls which have equivalents in C that are more direct
than calling with the PyObject api.
Return None if no translation found; otherwise return the target register.
"""
primitive_ops_candidates = method_call_ops.get(name, [])
primitive_op = self.matching_primitive_op(
primitive_ops_candidates, [base_reg] + args, line, result_type, can_borrow=can_borrow
)
return primitive_op
def translate_eq_cmp(self, lreg: Value, rreg: Value, expr_op: str, line: int) -> Value | None:
"""Add an equality comparison operation.
Note that this doesn't cover all possible types.
Args:
expr_op: either '==' or '!='
"""
ltype = lreg.type
rtype = rreg.type
if is_str_rprimitive(ltype) and is_str_rprimitive(rtype):
return self.compare_strings(lreg, rreg, expr_op, line)
if is_bytes_rprimitive(ltype) and is_bytes_rprimitive(rtype):
return self.compare_bytes(lreg, rreg, expr_op, line)
lopt = optional_value_type(ltype)
ropt = optional_value_type(rtype)
# Can we do a quick comparison of two optional types (special case None values)?
fast_opt_eq = False
if lopt is not None:
if ropt is not None and is_same_type(lopt, ropt) and self._never_equal_to_none(lopt):
fast_opt_eq = True
if is_same_type(lopt, rtype) and self._never_equal_to_none(lopt):
fast_opt_eq = True
elif ropt is not None:
if is_same_type(ropt, ltype) and self._never_equal_to_none(ropt):
fast_opt_eq = True
if fast_opt_eq:
return self._translate_fast_optional_eq_cmp(lreg, rreg, expr_op, line)
if not (isinstance(ltype, RInstance) and ltype == rtype):
return None
class_ir = ltype.class_ir
# Check whether any subclasses of the operand redefines __eq__
# or it might be redefined in a Python parent class or by
# dataclasses
cmp_varies_at_runtime = (
not class_ir.is_method_final("__eq__")
or not class_ir.is_method_final("__ne__")
or class_ir.inherits_python
or class_ir.is_augmented
)
if cmp_varies_at_runtime:
# We might need to call left.__eq__(right) or right.__eq__(left)
# depending on which is the more specific type.
return None
if not class_ir.has_method("__eq__"):
# There's no __eq__ defined, so just use object identity.
identity_ref_op = "is" if expr_op == "==" else "is not"
return self.translate_is_op(lreg, rreg, identity_ref_op, line)
return self.gen_method_call(lreg, op_methods[expr_op], [rreg], ltype, line)
def _never_equal_to_none(self, typ: RType) -> bool:
"""Are the values of type never equal to None?"""
# TODO: Support RInstance with no custom __eq__/__ne__ and other primitive types.
return is_str_rprimitive(typ) or is_bytes_rprimitive(typ)
def _translate_fast_optional_eq_cmp(
self, lreg: Value, rreg: Value, expr_op: str, line: int
) -> Value:
"""Generate eq/ne fast path between 'X | None' and ('X | None' or X).
Assume 'X' never compares equal to None.
"""
if not isinstance(lreg.type, RUnion):
lreg, rreg = rreg, lreg
value_typ = optional_value_type(lreg.type)
assert value_typ
res = Register(bool_rprimitive)
# Fast path: left value is None?
cmp = self.add(ComparisonOp(lreg, self.none_object(), ComparisonOp.EQ, line))
l_none = BasicBlock()
l_not_none = BasicBlock()
out = BasicBlock()
self.add(Branch(cmp, l_none, l_not_none, Branch.BOOL))
self.activate_block(l_none)
if not isinstance(rreg.type, RUnion):
val = self.false() if expr_op == "==" else self.true()
self.add(Assign(res, val))
else:
op = ComparisonOp.EQ if expr_op == "==" else ComparisonOp.NEQ
cmp = self.add(ComparisonOp(rreg, self.none_object(), op, line))
self.add(Assign(res, cmp))
self.goto(out)
self.activate_block(l_not_none)
if not isinstance(rreg.type, RUnion):
# Both operands are known to be not None, perform specialized comparison
eq = self.translate_eq_cmp(
self.unbox_or_cast(lreg, value_typ, line, can_borrow=True, unchecked=True),
rreg,
expr_op,
line,
)
assert eq is not None
self.add(Assign(res, eq))
else:
r_none = BasicBlock()
r_not_none = BasicBlock()
# Fast path: right value is None?
cmp = self.add(ComparisonOp(rreg, self.none_object(), ComparisonOp.EQ, line))
self.add(Branch(cmp, r_none, r_not_none, Branch.BOOL))
self.activate_block(r_none)
# None vs not-None
val = self.false() if expr_op == "==" else self.true()
self.add(Assign(res, val))
self.goto(out)
self.activate_block(r_not_none)
# Both operands are known to be not None, perform specialized comparison
eq = self.translate_eq_cmp(
self.unbox_or_cast(lreg, value_typ, line, can_borrow=True, unchecked=True),
self.unbox_or_cast(rreg, value_typ, line, can_borrow=True, unchecked=True),
expr_op,
line,
)
assert eq is not None
self.add(Assign(res, eq))
self.goto(out)
self.activate_block(out)
return res
def translate_is_op(self, lreg: Value, rreg: Value, expr_op: str, line: int) -> Value:
"""Create equality comparison operation between object identities
Args:
expr_op: either 'is' or 'is not'
"""
op = ComparisonOp.EQ if expr_op == "is" else ComparisonOp.NEQ
lhs = self.coerce(lreg, object_rprimitive, line)
rhs = self.coerce(rreg, object_rprimitive, line)
return self.add(ComparisonOp(lhs, rhs, op, line))
def _create_dict(self, keys: list[Value], values: list[Value], line: int) -> Value:
"""Create a dictionary(possibly empty) using keys and values"""
# keys and values should have the same number of items
size = len(keys)
if size > 0:
size_value: Value = Integer(size, c_pyssize_t_rprimitive)
# merge keys and values
items = [i for t in list(zip(keys, values)) for i in t]
return self.call_c(dict_build_op, [size_value] + items, line)
else:
return self.call_c(dict_new_op, [], line)
def error(self, msg: str, line: int) -> None:
assert self.errors is not None, "cannot generate errors in this compiler phase"
self.errors.error(msg, self.module_path, line)
def num_positional_args(arg_values: list[Value], arg_kinds: list[ArgKind] | None) -> int:
if arg_kinds is None:
return len(arg_values)
num_pos = 0
for kind in arg_kinds:
if kind == ARG_POS:
num_pos += 1
return num_pos
| LowLevelIRBuilder |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 92226,
"end": 92462
} | class ____(sgqlc.types.Enum):
"""The targets supported for rulesets
Enumeration Choices:
* `BRANCH`: Branch
* `TAG`: Tag
"""
__schema__ = github_schema
__choices__ = ("BRANCH", "TAG")
| RepositoryRulesetTarget |
python | google__jax | jax/experimental/array_serialization/serialization.py | {
"start": 3545,
"end": 5745
} | class ____(util.StrictABC):
"""Interface for checkpointing GDAs asynchronously.
This class manages the state of an ongoing asynchronous checkpoint.
For example, say a checkpoint happens on every step. If you checkpoint on
step 1 and after some computation the model is on checkpoint 2. But step 1's
checkpoint hasn't finished committing to the storage layer yet. So until that
is finished, checkpoint for step 2 will need to be blocked. Maintaining a
class allows to maintain that state.
Examples:
Below is a simplified training loop:
```
# Call this at the start of your program.
jax.distributed.initialize()
manager = GlobalAsyncCheckpointManager()
# Restore checkpoint if available or initialize the train_state from
# init_fn().
train_state = manager.deserialize(...)
while ...:
if step % num_steps_between_checkpoints == 0:
manager.serialize(train_state, temp_checkpoint_dir=...,
final_checkpoint_dir=...)
train_state = train_step(train_state, input)
# This is a non-blocking call.
manager.check_for_errors()
manager.serialize(train_state, temp_checkpoint_dir=...,
final_checkpoint_dir=...)
# Wait before the end of the program for the checkpoint to finish. This is a
# blocking call.
manager.wait_until_finished()
```
"""
@abc.abstractmethod
def check_for_errors(self):
"""Checks if any errors have been raised in the child thread.
This is a non-blocking call that can be called in the main thread.
"""
@abc.abstractmethod
def wait_until_finished(self):
"""Blocks until serialization has finished."""
@abc.abstractmethod
def serialize(self, arrays, tensorstore_specs, *,
on_commit_callback: Callable[[], None]):
"""Serializes GDAs to TensorStore."""
@abc.abstractmethod
def deserialize(self, shardings: Sequence[sharding.Sharding],
tensorstore_specs: Sequence[dict[str, Any]],
global_shapes: Sequence[array.Shape] | None = None,
dtypes: Sequence[typing.DTypeLike] | None = None):
"""Deserializes GDAs from TensorStore."""
| GlobalAsyncCheckpointManagerBase |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 8332,
"end": 10038
} | class ____(Benchmark):
params = [
[
(2,1000,1000),
(8,1000,1000),
(16,1000,1000)
],
[2, 10, 100, 400, 1000],
]
param_names = ['(m, n1, n2)', 'Nr']
def setup(self, mn1n2, Nr):
m, n1, n2 = mn1n2
data1 = np.random.uniform(size=(n1, m))
data2 = np.random.uniform(size=(n2, m))
self.w1 = np.ones(len(data1))
self.w2 = np.ones(len(data2))
self.T1d = cKDTree(data1, leafsize=1)
self.T2d = cKDTree(data2, leafsize=1)
self.T1s = cKDTree(data1, leafsize=8)
self.T2s = cKDTree(data2, leafsize=8)
self.r = np.linspace(0, 0.5, Nr)
def time_count_neighbors_deep(self, mn1n2, Nr):
"""
Count neighbors for a very deep kd-tree
dim | # points T1 | # points T2 | Nr
"""
self.T1d.count_neighbors(self.T2d, self.r)
def time_count_neighbors_shallow(self, mn1n2, Nr):
"""
Count neighbors for a shallow kd-tree
dim | # points T1 | # points T2 | Nr
"""
self.T1s.count_neighbors(self.T2s, self.r)
def generate_spherical_points(num_points):
# generate uniform points on sphere
# see: https://stackoverflow.com/a/23785326
rng = np.random.default_rng(123)
points = rng.normal(size=(num_points, 3))
points /= np.linalg.norm(points, axis=1)[:, np.newaxis]
return points
def generate_circle_points(num_points):
# try to avoid full circle degeneracy
# at 2 * pi
angles = np.linspace(0, 1.9999 * np.pi, num_points)
points = np.empty(shape=(num_points, 2))
points[..., 0] = np.cos(angles)
points[..., 1] = np.sin(angles)
return points
| CNeighbors |
python | django__django | django/db/migrations/writer.py | {
"start": 11739,
"end": 11933
} | class ____(migrations.Migration):
%(replaces_str)s%(initial_str)s%(atomic_str)s%(run_before_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| Migration |
python | getsentry__sentry | tests/sentry/loader/test_browsersdkversion.py | {
"start": 440,
"end": 2394
} | class ____(TestCase):
def test_get_all_browser_sdk_version_versions(self) -> None:
assert "latest" in get_all_browser_sdk_version_versions()
assert "4.x" in get_all_browser_sdk_version_versions()
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file", return_value=MOCK_VERSIONS
)
def test_get_highest_browser_sdk_version_from_versions(
self, load_version_from_file: mock.MagicMock
) -> None:
assert str(get_highest_browser_sdk_version(load_version_from_file())) == "10.2.3"
@mock.patch(
"sentry.loader.browsersdkversion.load_version_from_file", return_value=MOCK_VERSIONS
)
def test_get_highest_selected_version(self, load_version_from_file: mock.MagicMock) -> None:
assert str(match_selected_version_to_browser_sdk_version("4.x")) == "4.6.4"
assert str(match_selected_version_to_browser_sdk_version("5.x")) == "5.10.1"
assert str(match_selected_version_to_browser_sdk_version("10.x")) == "10.2.3"
assert (
str(match_selected_version_to_browser_sdk_version("latest")) == "5.10.1"
) # Should not select version 8, since v8 is the first version that doesn't support latest
@mock.patch("sentry.loader.browsersdkversion.load_version_from_file", return_value=[])
def test_get_highest_selected_version_no_version(
self, load_version_from_file: mock.MagicMock
) -> None:
settings.JS_SDK_LOADER_SDK_VERSION = "0.5.2"
assert (
str(match_selected_version_to_browser_sdk_version("4.x"))
== settings.JS_SDK_LOADER_SDK_VERSION
)
assert (
str(match_selected_version_to_browser_sdk_version("5.x"))
== settings.JS_SDK_LOADER_SDK_VERSION
)
assert (
str(match_selected_version_to_browser_sdk_version("latest"))
== settings.JS_SDK_LOADER_SDK_VERSION
)
| BrowserSdkVersionTestCase |
python | sympy__sympy | sympy/codegen/ast.py | {
"start": 57538,
"end": 57804
} | class ____(Token):
""" Represents 'std::runtime_error' in C++ and 'RuntimeError' in Python.
Note that the latter is uncommon, and you might want to use e.g. ValueError.
"""
__slots__ = _fields = ('message',)
_construct_message = String
| RuntimeError_ |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 4841,
"end": 5345
} | class ____(sgqlc.types.Enum):
"""Collaborators affiliation level with a subject.
Enumeration Choices:
* `ALL`: All collaborators the authenticated user can see.
* `DIRECT`: All collaborators with permissions to an organization-
owned subject, regardless of organization membership status.
* `OUTSIDE`: All outside collaborators of an organization-owned
subject.
"""
__schema__ = github_schema
__choices__ = ("ALL", "DIRECT", "OUTSIDE")
| CollaboratorAffiliation |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 26374,
"end": 26694
} | class ____(ISecuredView):
"""*internal only*. A multiview is a secured view that is a
collection of other views. Each of the views is associated with
zero or more predicates. Not an API."""
def add(view, predicates, order, accept=None, phash=None):
"""Add a view to the multiview."""
| IMultiView |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/c_config.py | {
"start": 16464,
"end": 26444
} | class ____(Task.Task):
color = 'PINK'
def run(self):
cmd = [self.inputs[0].abspath()] + getattr(self.generator, 'test_args', [])
if getattr(self.generator, 'rpath', None):
if getattr(self.generator, 'define_ret', False):
self.generator.bld.retval = self.generator.bld.cmd_and_log(cmd)
else:
self.generator.bld.retval = self.generator.bld.exec_command(cmd)
else:
env = self.env.env or {}
env.update(dict(os.environ))
for var in ('LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'PATH'):
env[var] = self.inputs[0].parent.abspath() + os.path.pathsep + env.get(var, '')
if getattr(self.generator, 'define_ret', False):
self.generator.bld.retval = self.generator.bld.cmd_and_log(cmd, env=env)
else:
self.generator.bld.retval = self.generator.bld.exec_command(cmd, env=env)
@feature('test_exec')
@after_method('apply_link')
def test_exec_fun(self):
self.create_task('test_exec', self.link_task.outputs[0])
@conf
def check_cxx(self, *k, **kw):
kw['compiler'] = 'cxx'
return self.check(*k, **kw)
@conf
def check_cc(self, *k, **kw):
kw['compiler'] = 'c'
return self.check(*k, **kw)
@conf
def set_define_comment(self, key, comment):
coms = self.env.DEFINE_COMMENTS
if not coms:
coms = self.env.DEFINE_COMMENTS = {}
coms[key] = comment or ''
@conf
def get_define_comment(self, key):
coms = self.env.DEFINE_COMMENTS or {}
return coms.get(key, '')
@conf
def define(self, key, val, quote=True, comment=''):
assert isinstance(key, str)
if not key:
return
if val is True:
val = 1
elif val in (False, None):
val = 0
if isinstance(val, int) or isinstance(val, float):
s = '%s=%s'
else:
s = quote and '%s="%s"' or '%s=%s'
app = s % (key, str(val))
ban = key + '='
lst = self.env.DEFINES
for x in lst:
if x.startswith(ban):
lst[lst.index(x)] = app
break
else:
self.env.append_value('DEFINES', app)
self.env.append_unique(DEFKEYS, key)
self.set_define_comment(key, comment)
@conf
def undefine(self, key, comment=''):
assert isinstance(key, str)
if not key:
return
ban = key + '='
lst = [x for x in self.env.DEFINES if not x.startswith(ban)]
self.env.DEFINES = lst
self.env.append_unique(DEFKEYS, key)
self.set_define_comment(key, comment)
@conf
def define_cond(self, key, val, comment=''):
assert isinstance(key, str)
if not key:
return
if val:
self.define(key, 1, comment=comment)
else:
self.undefine(key, comment=comment)
@conf
def is_defined(self, key):
assert key and isinstance(key, str)
ban = key + '='
for x in self.env.DEFINES:
if x.startswith(ban):
return True
return False
@conf
def get_define(self, key):
assert key and isinstance(key, str)
ban = key + '='
for x in self.env.DEFINES:
if x.startswith(ban):
return x[len(ban):]
return None
@conf
def have_define(self, key):
return (self.env.HAVE_PAT or 'HAVE_%s') % Utils.quote_define_name(key)
@conf
def write_config_header(
self, configfile='', guard='', top=False, defines=True, headers=False, remove=True, define_prefix=''
):
if not configfile:
configfile = WAF_CONFIG_H
waf_guard = guard or 'W_%s_WAF' % Utils.quote_define_name(configfile)
node = top and self.bldnode or self.path.get_bld()
node = node.make_node(configfile)
node.parent.mkdir()
lst = ['/* WARNING! All changes made to this file will be lost! */\n']
lst.append('#ifndef %s\n#define %s\n' % (waf_guard, waf_guard))
lst.append(self.get_config_header(defines, headers, define_prefix=define_prefix))
lst.append('\n#endif /* %s */\n' % waf_guard)
node.write('\n'.join(lst))
self.env.append_unique(Build.CFG_FILES, [node.abspath()])
if remove:
for key in self.env[DEFKEYS]:
self.undefine(key)
self.env[DEFKEYS] = []
@conf
def get_config_header(self, defines=True, headers=False, define_prefix=''):
lst = []
if self.env.WAF_CONFIG_H_PRELUDE:
lst.append(self.env.WAF_CONFIG_H_PRELUDE)
if headers:
for x in self.env[INCKEYS]:
lst.append('#include <%s>' % x)
if defines:
tbl = {}
for k in self.env.DEFINES:
a, _, b = k.partition('=')
tbl[a] = b
for k in self.env[DEFKEYS]:
caption = self.get_define_comment(k)
if caption:
caption = ' /* %s */' % caption
try:
txt = '#define %s%s %s%s' % (define_prefix, k, tbl[k], caption)
except KeyError:
txt = '/* #undef %s%s */%s' % (define_prefix, k, caption)
lst.append(txt)
return "\n".join(lst)
@conf
def cc_add_flags(conf):
conf.add_os_flags('CPPFLAGS', dup=False)
conf.add_os_flags('CFLAGS', dup=False)
@conf
def cxx_add_flags(conf):
conf.add_os_flags('CPPFLAGS', dup=False)
conf.add_os_flags('CXXFLAGS', dup=False)
@conf
def link_add_flags(conf):
conf.add_os_flags('LINKFLAGS', dup=False)
conf.add_os_flags('LDFLAGS', dup=False)
@conf
def cc_load_tools(conf):
if not conf.env.DEST_OS:
conf.env.DEST_OS = Utils.unversioned_sys_platform()
conf.load('c')
@conf
def cxx_load_tools(conf):
if not conf.env.DEST_OS:
conf.env.DEST_OS = Utils.unversioned_sys_platform()
conf.load('cxx')
@conf
def get_cc_version(conf, cc, gcc=False, icc=False, clang=False):
cmd = cc + ['-dM', '-E', '-']
env = conf.env.env or None
try:
out, err = conf.cmd_and_log(cmd, output=0, input='\n'.encode(), env=env)
except Errors.WafError:
conf.fatal('Could not determine the compiler version %r' % cmd)
if gcc:
if out.find('__INTEL_COMPILER') >= 0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__') < 0 and out.find('__clang__') < 0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER') < 0:
conf.fatal('Not icc/icpc')
if clang and out.find('__clang__') < 0:
conf.fatal('Not clang/clang++')
if not clang and out.find('__clang__') >= 0:
conf.fatal('Could not find gcc/g++ (only Clang), if renamed try eg: CC=gcc48 CXX=g++48 waf configure')
k = {}
if icc or gcc or clang:
out = out.splitlines()
for line in out:
lst = shlex.split(line)
if len(lst) > 2:
key = lst[1]
val = lst[2]
k[key] = val
def isD(var):
return var in k
if not conf.env.DEST_OS:
conf.env.DEST_OS = ''
for i in MACRO_TO_DESTOS:
if isD(i):
conf.env.DEST_OS = MACRO_TO_DESTOS[i]
break
else:
if isD('__APPLE__') and isD('__MACH__'):
conf.env.DEST_OS = 'darwin'
elif isD('__unix__'):
conf.env.DEST_OS = 'generic'
if isD('__ELF__'):
conf.env.DEST_BINFMT = 'elf'
elif isD('__WINNT__') or isD('__CYGWIN__') or isD('_WIN32'):
conf.env.DEST_BINFMT = 'pe'
if not conf.env.IMPLIBDIR:
conf.env.IMPLIBDIR = conf.env.LIBDIR
conf.env.LIBDIR = conf.env.BINDIR
elif isD('__APPLE__'):
conf.env.DEST_BINFMT = 'mac-o'
if not conf.env.DEST_BINFMT:
conf.env.DEST_BINFMT = Utils.destos_to_binfmt(conf.env.DEST_OS)
for i in MACRO_TO_DEST_CPU:
if isD(i):
conf.env.DEST_CPU = MACRO_TO_DEST_CPU[i]
break
Logs.debug(
'ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')])
)
if icc:
ver = k['__INTEL_COMPILER']
conf.env.CC_VERSION = (ver[:-2], ver[-2], ver[-1])
else:
if isD('__clang__') and isD('__clang_major__'):
conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__'])
else:
conf.env.CC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k.get('__GNUC_PATCHLEVEL__', '0'))
return k
@conf
def get_xlc_version(conf, cc):
cmd = cc + ['-qversion']
try:
out, err = conf.cmd_and_log(cmd, output=0)
except Errors.WafError:
conf.fatal('Could not find xlc %r' % cmd)
for v in (r"IBM XL C/C\+\+.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re = re.compile(v, re.I).search
match = version_re(out or err)
if match:
k = match.groupdict()
conf.env.CC_VERSION = (k['major'], k['minor'])
break
else:
conf.fatal('Could not determine the XLC version.')
@conf
def get_suncc_version(conf, cc):
cmd = cc + ['-V']
try:
out, err = conf.cmd_and_log(cmd, output=0)
except Errors.WafError as e:
if not (hasattr(e, 'returncode') and hasattr(e, 'stdout') and hasattr(e, 'stderr')):
conf.fatal('Could not find suncc %r' % cmd)
out = e.stdout
err = e.stderr
version = (out or err)
version = version.splitlines()[0]
version_re = re.compile(
r'cc: (studio.*?|\s+)?(sun\s+(c\+\+|c)|(WorkShop\s+Compilers))?\s+(?P<major>\d*)\.(?P<minor>\d*)', re.I
).search
match = version_re(version)
if match:
k = match.groupdict()
conf.env.CC_VERSION = (k['major'], k['minor'])
else:
conf.fatal('Could not determine the suncc version.')
@conf
def add_as_needed(self):
if self.env.DEST_BINFMT == 'elf' and 'gcc' in (self.env.CXX_NAME, self.env.CC_NAME):
self.env.append_unique('LINKFLAGS', '-Wl,--as-needed')
| test_exec |
python | django-compressor__django-compressor | compressor/filters/cssmin/__init__.py | {
"start": 54,
"end": 340
} | class ____(CallbackOutputFilter):
"""
A filter that utilizes Yury Selivanov's Python port of the YUI CSS
compression algorithm: https://pypi.python.org/pypi/csscompressor
"""
callback = "csscompressor.compress"
dependencies = ["csscompressor"]
| CSSCompressorFilter |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1524311,
"end": 1525981
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData, TeamAuditEntryData):
"""Audit log entry for a team.change_parent_team event."""
__schema__ = github_schema
__field_names__ = (
"is_ldap_mapped",
"parent_team",
"parent_team_name",
"parent_team_name_was",
"parent_team_resource_path",
"parent_team_url",
"parent_team_was",
"parent_team_was_resource_path",
"parent_team_was_url",
)
is_ldap_mapped = sgqlc.types.Field(Boolean, graphql_name="isLdapMapped")
"""Whether the team was mapped to an LDAP Group."""
parent_team = sgqlc.types.Field(Team, graphql_name="parentTeam")
"""The new parent team."""
parent_team_name = sgqlc.types.Field(String, graphql_name="parentTeamName")
"""The name of the new parent team"""
parent_team_name_was = sgqlc.types.Field(String, graphql_name="parentTeamNameWas")
"""The name of the former parent team"""
parent_team_resource_path = sgqlc.types.Field(URI, graphql_name="parentTeamResourcePath")
"""The HTTP path for the parent team"""
parent_team_url = sgqlc.types.Field(URI, graphql_name="parentTeamUrl")
"""The HTTP URL for the parent team"""
parent_team_was = sgqlc.types.Field(Team, graphql_name="parentTeamWas")
"""The former parent team."""
parent_team_was_resource_path = sgqlc.types.Field(URI, graphql_name="parentTeamWasResourcePath")
"""The HTTP path for the previous parent team"""
parent_team_was_url = sgqlc.types.Field(URI, graphql_name="parentTeamWasUrl")
"""The HTTP URL for the previous parent team"""
| TeamChangeParentTeamAuditEntry |
python | getsentry__sentry | src/sentry/explore/endpoints/serializers.py | {
"start": 657,
"end": 2258
} | class ____(serializers.Serializer):
# visualizes
chartType = serializers.IntegerField(required=False)
yAxes = serializers.ListField(child=serializers.CharField(), required=False)
# group bys
groupBy = serializers.CharField(required=False)
def validate(self, data):
visualize_serializer = VisualizeSerializer(data=data)
group_by_serializer = GroupBySerializer(data=data)
# if one of them is valid, then it's good
if visualize_serializer.is_valid() != group_by_serializer.is_valid():
return data
if visualize_serializer.is_valid() and group_by_serializer.is_valid():
raise ParseError("Ambiguous aggregate field. Must specify groupBy or yAxes, not both.")
# when neither are valid, we need to do some better error handling
visualize_errors = visualize_serializer.errors
group_by_errors = group_by_serializer.errors
visualize_has_not_required_errors = any(
error.code != "required" for error in visualize_errors.get("yAxes", [])
)
group_by_has_not_required_errors = any(
error.code != "required" for error in group_by_errors.get("groupBy", [])
)
if visualize_has_not_required_errors:
visualize_serializer.is_valid(raise_exception=True)
elif group_by_has_not_required_errors:
group_by_serializer.is_valid(raise_exception=True)
raise ValidationError(
{
**visualize_errors,
**group_by_errors,
}
)
| AggregateFieldSerializer |
python | gevent__gevent | src/gevent/backdoor.py | {
"start": 6138,
"end": 7310
} | class ____(object):
# Python 2 likes to test for this before writing to stderr.
softspace = None
encoding = 'utf-8'
__slots__ = (
'sock',
'fobj',
'fileno',
)
def __init__(self, sock, stdin):
self.sock = sock
self.fobj = stdin
# On Python 3, The builtin input() function (used by the
# default InteractiveConsole) calls fileno() on
# sys.stdin. If it's the same as the C stdin's fileno,
# and isatty(fd) (C function call) returns true,
# and all of that is also true for stdout, then input() will use
# PyOS_Readline to get the input.
#
# On Python 2, the sys.stdin object has to extend the file()
# class, and return true from isatty(fileno(sys.stdin.f_fp))
# (where f_fp is a C-level FILE* member) to use PyOS_Readline.
#
# If that doesn't hold, both versions fall back to reading and writing
# using sys.stdout.write() and sys.stdin.readline().
self.fileno = sock.fileno
def __getattr__(self, name):
return getattr(self.fobj, name)
def close(self):
pass
| _BaseFileLike |
python | lepture__mistune | src/mistune/core.py | {
"start": 2648,
"end": 3657
} | class ____:
"""The state to save inline parser's tokens."""
def __init__(self, env: MutableMapping[str, Any]):
self.env = env
self.src = ""
self.tokens: List[Dict[str, Any]] = []
self.in_image = False
self.in_link = False
self.in_emphasis = False
self.in_strong = False
def prepend_token(self, token: Dict[str, Any]) -> None:
"""Insert token before the last token."""
self.tokens.insert(len(self.tokens) - 1, token)
def append_token(self, token: Dict[str, Any]) -> None:
"""Add token to the end of token list."""
self.tokens.append(token)
def copy(self) -> "InlineState":
"""Create a copy of current state."""
state = self.__class__(self.env)
state.in_image = self.in_image
state.in_link = self.in_link
state.in_emphasis = self.in_emphasis
state.in_strong = self.in_strong
return state
ST = TypeVar("ST", InlineState, BlockState)
| InlineState |
python | huggingface__transformers | tests/models/deberta/test_modeling_deberta.py | {
"start": 8578,
"end": 10959
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = False
def setUp(self):
self.model_tester = DebertaModelTester(self)
self.config_tester = ConfigTester(self, config_class=DebertaConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_deberta_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/deberta-base"
model = DebertaModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
@require_sentencepiece
@require_tokenizers
| DebertaModelTest |
python | pappasam__jedi-language-server | jedi_language_server/initialization_options.py | {
"start": 2947,
"end": 4860
} | class ____:
code_action: CodeAction = field(default_factory=CodeAction)
completion: Completion = field(default_factory=Completion)
diagnostics: Diagnostics = field(default_factory=Diagnostics)
hover: Hover = field(default_factory=Hover)
jedi_settings: JediSettings = field(default_factory=JediSettings)
markup_kind_preferred: Optional[MarkupKind] = None
workspace: Workspace = field(default_factory=Workspace)
semantic_tokens: SemanticTokens = field(default_factory=SemanticTokens)
initialization_options_converter = Converter()
WEIRD_NAMES = {
"keyword_": "keyword",
"module_": "module",
"class_": "class",
"instance_": "instance",
"function_": "function",
"param_": "param",
"path_": "path",
"property_": "property",
"statement_ ": "statement",
}
def convert_class_keys(string: str) -> str:
"""Convert from snake_case to camelCase.
Also handles random special cases for keywords.
"""
if string in WEIRD_NAMES:
return WEIRD_NAMES[string]
return "".join(
word.capitalize() if idx > 0 else word
for idx, word in enumerate(string.split("_"))
)
def structure(cls: type) -> Any:
"""Hook to convert names when marshalling initialization_options."""
return make_dict_structure_fn(
cls,
initialization_options_converter,
**{ # type: ignore[arg-type]
a.name: override(rename=convert_class_keys(a.name))
for a in fields(cls)
},
)
initialization_options_converter.register_structure_hook_factory(
is_dataclass, structure
)
initialization_options_converter.register_structure_hook_factory(
lambda x: x == Pattern[str],
lambda _: lambda x, _: re.compile(x),
)
initialization_options_converter.register_unstructure_hook_factory(
lambda x: x == Pattern[str],
lambda _: lambda x: x.pattern,
)
| InitializationOptions |
python | huggingface__transformers | src/transformers/models/prophetnet/modeling_prophetnet.py | {
"start": 72033,
"end": 79949
} | class ____(ProphetNetPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"lm_head.weight": "prophetnet.word_embeddings.weight",
}
def __init__(self, config: ProphetNetConfig):
super().__init__(config)
self.prophetnet = ProphetNetModel(config)
self.padding_idx = config.pad_token_id
self.disable_ngram_loss = config.disable_ngram_loss
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.prophetnet.word_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.Tensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[tuple, ProphetNetSeq2SeqLMOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
ProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, ProphetNetForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> logits_next_token = outputs.logits # logits to predict next token as usual
>>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
outputs = self.prophetnet(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
batch_size, sequence_length = (
decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2]
)
predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1)
predict_logits = self.lm_head(predicting_streams)
logits = predict_logits[:, 0]
logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None
# To use .view in loss computation, make sure that logits is contiguous.
if not logits.is_contiguous():
logits = logits.contiguous()
loss = None
if labels is not None:
loss = self._compute_loss(predict_logits, labels)
if not return_dict:
all_logits = tuple(v for v in [logits, logits_ngram] if v is not None)
return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:]
else:
return ProphetNetSeq2SeqLMOutput(
loss=loss,
logits=logits,
logits_ngram=logits_ngram,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states,
decoder_attentions=outputs.decoder_attentions,
decoder_ngram_attentions=outputs.decoder_ngram_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def _compute_loss(self, logits, labels, ignore_index=-100):
expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index)
for i in range(self.config.ngram):
if i > 0 and self.disable_ngram_loss:
break
expend_targets[i, :, :] = labels
logits = logits.transpose(0, 1).contiguous()
lprobs = nn.functional.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
)
loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean")
if self.config.eps > 0.0:
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
non_masked_tokens = expend_targets.ne(ignore_index).view(-1)
smooth_loss = smooth_loss[non_masked_tokens]
smooth_loss = smooth_loss.mean()
eps_i = self.config.eps / lprobs.size(-1)
loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss
return loss
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def get_encoder(self, modality=None):
if modality is None:
return self.prophetnet.encoder
else:
return super().get_encoder(modality=modality)
@auto_docstring(
custom_intro="""
The standalone decoder part of the ProphetNetModel with a lm head on top. The model can be used for causal
"""
)
| ProphetNetForConditionalGeneration |
python | pytorch__pytorch | torch/_inductor/fx_passes/memory_estimator.py | {
"start": 11516,
"end": 17214
} | class ____:
"""
Tracks memory usage for alternative scheduling orders of an FX graph.
This class enables tracking memory usage as nodes are scheduled in a different
order than the original graph.
"""
def __init__(
self,
graph: fx.Graph,
is_releasable: Callable[[fx.Node], bool] | None = None,
device_filter: Callable[[torch.device], bool] | None = None,
):
"""
Initialize memory tracker for alternative scheduling of the given graph.
Args:
graph: FX graph to track memory for under alternative scheduling
is_releaseable: do we consider this input to the graph to release memory
upon final use, or is allocated for the duration of the graph ?
by default, we assume all nodes but those that start with "primals" to be releasable
device_filter: Function to determine which devices to track (default: non-CPU)
"""
self.graph = graph
self.nodes = list(graph.nodes)
self.device_filter = device_filter or (lambda device: device.type != "cpu")
self.scheduled: OrderedSet[fx.Node] = OrderedSet()
# Memory tracking using GraphAliasTracker
self.alias_tracker = GraphAliasTracker(self.nodes)
self.current_live_storages: OrderedSet[StorageKey] = OrderedSet()
self.current_memory_bytes = 0
self.is_releasable = _is_releasable if is_releasable is None else is_releasable
# Initialize live storages with placeholders and get_attr nodes
for node in self.nodes:
if node.op in ("placeholder", "get_attr"):
fresh_allocations = self.alias_tracker.get_fresh_allocations(node)
for storage_key in fresh_allocations:
if self.device_filter(storage_key.device):
self.current_live_storages.add(storage_key)
self.current_memory_bytes += self._get_storage_size(storage_key)
self.peak_memory = self.current_memory_bytes
log.debug(
"Memory tracker initialized with initial memory: %d MB",
self.current_memory_bytes // (1024 * 1024),
)
def schedule_node(self, node: fx.Node) -> None:
"""
Schedule a node and update memory tracking for the new scheduling order.
Args:
node: The node being scheduled (potentially out of original order)
"""
assert node not in self.scheduled, "should not schedule node twice"
self.scheduled.add(node)
self._update_memory_for_node(node)
def get_current_memory_bytes(self) -> int:
"""Get current live memory in bytes under the current scheduling."""
return self.current_memory_bytes
def _get_storage_size(self, storage_key: StorageKey) -> int:
"""Get the size of a storage in bytes, handling symbolic shapes."""
size_bytes = storage_key.storage.nbytes()
return hint_int(
size_bytes, fallback=torch._inductor.config.unbacked_symint_fallback
)
def _get_storages_freed_by_node(self, node: fx.Node) -> OrderedSet[StorageKey]:
"""Get storages that would be freed if we schedule this node."""
freed_storages: OrderedSet[StorageKey] = OrderedSet()
input_storages = self.alias_tracker.get_storage_uses(node)
for storage_key in input_storages:
if not self.device_filter(storage_key.device):
continue
# Invariant: if a node uses a storage, it must be live
assert storage_key in self.current_live_storages, (
"all input storages should be currently allocated"
)
if not self.is_releasable(
self.alias_tracker.storage_to_allocator[storage_key]
):
continue
all_uses = self.alias_tracker.storage_to_uses[storage_key]
# If no more unscheduled uses remain, the storage can be freed
if all(u in self.scheduled for u in all_uses):
freed_storages.add(storage_key)
return freed_storages
def _update_memory_for_node(self, node: fx.Node) -> None:
"""Update memory tracking when a node is scheduled."""
if node.op in ("placeholder", "get_attr", "output"):
return
# Add fresh allocations
fresh_allocations = self.alias_tracker.get_fresh_allocations(node)
alloc_bytes = 0
for storage_key in fresh_allocations:
if (
self.device_filter(storage_key.device)
and storage_key not in self.current_live_storages
):
size = self._get_storage_size(storage_key)
self.current_live_storages.add(storage_key)
self.current_memory_bytes += size
alloc_bytes += size
self.peak_memory = max(self.current_memory_bytes, self.peak_memory)
# Remove storages that are no longer used
storages_to_free = self._get_storages_freed_by_node(node)
freed_bytes = 0
for storage_key in storages_to_free:
if storage_key in self.current_live_storages:
size = self._get_storage_size(storage_key)
self.current_live_storages.remove(storage_key)
self.current_memory_bytes -= size
freed_bytes += size
log.debug(
"Scheduled %s: memory change %d allocs, %d frees, current memory: %d MB",
node.name,
len(fresh_allocations),
len(storages_to_free),
self.current_memory_bytes // (1024 * 1024),
)
| MemoryTracker |
python | Textualize__textual | src/textual/css/_styles_builder.py | {
"start": 2340,
"end": 48798
} | class ____:
"""
The StylesBuilder object takes tokens parsed from the CSS and converts
to the appropriate internal types.
"""
def __init__(self) -> None:
self.styles = Styles()
def __rich_repr__(self) -> rich.repr.Result:
yield "styles", self.styles
def __repr__(self) -> str:
return "StylesBuilder()"
def error(self, name: str, token: Token, message: str | HelpText) -> NoReturn:
raise DeclarationError(name, token, message)
def add_declaration(self, declaration: Declaration) -> None:
if not declaration.name:
return
rule_name = declaration.name.replace("-", "_")
if not declaration.tokens:
self.error(
rule_name,
declaration.token,
f"Missing property value for '{declaration.name}:'",
)
process_method = getattr(self, f"process_{rule_name}", None)
if process_method is None:
suggested_property_name = self._get_suggested_property_name_for_rule(
declaration.name
)
self.error(
declaration.name,
declaration.token,
property_invalid_value_help_text(
declaration.name,
"css",
suggested_property_name=suggested_property_name,
),
)
tokens = declaration.tokens
important = tokens[-1].name == "important"
if important:
tokens = tokens[:-1]
self.styles.important.add(rule_name)
# Check for special token(s)
if tokens[0].name == "token":
value = tokens[0].value
if value == "initial":
self.styles._rules[rule_name] = None
return
try:
process_method(declaration.name, tokens)
except DeclarationError:
raise
except Exception as error:
self.error(declaration.name, declaration.token, str(error))
def _process_enum_multiple(
self, name: str, tokens: list[Token], valid_values: set[str], count: int
) -> tuple[str, ...]:
"""Generic code to process a declaration with two enumerations, like overflow: auto auto"""
if len(tokens) > count or not tokens:
self.error(name, tokens[0], f"expected 1 to {count} tokens here")
results: list[str] = []
append = results.append
for token in tokens:
token_name, value, _, _, location, _ = token
if token_name != "token":
self.error(
name,
token,
f"invalid token {value!r}; expected {friendly_list(valid_values)}",
)
append(value)
short_results = results[:]
while len(results) < count:
results.extend(short_results)
results = results[:count]
return tuple(results)
def _process_enum(
self, name: str, tokens: list[Token], valid_values: set[str]
) -> str:
"""Process a declaration that expects an enum.
Args:
name: Name of declaration.
tokens: Tokens from parser.
valid_values: A set of valid values.
Returns:
True if the value is valid or False if it is invalid (also generates an error)
"""
if len(tokens) != 1:
self.error(
name,
tokens[0],
string_enum_help_text(
name, valid_values=list(valid_values), context="css"
),
)
token = tokens[0]
token_name, value, _, _, location, _ = token
if token_name != "token":
self.error(
name,
token,
string_enum_help_text(
name, valid_values=list(valid_values), context="css"
),
)
if value not in valid_values:
self.error(
name,
token,
string_enum_help_text(
name, valid_values=list(valid_values), context="css"
),
)
return value
def process_display(self, name: str, tokens: list[Token]) -> None:
for token in tokens:
name, value, _, _, location, _ = token
if name == "token":
value = value.lower()
if value in VALID_DISPLAY:
self.styles._rules["display"] = cast(Display, value)
else:
self.error(
name,
token,
string_enum_help_text(
"display", valid_values=list(VALID_DISPLAY), context="css"
),
)
else:
self.error(
name,
token,
string_enum_help_text(
"display", valid_values=list(VALID_DISPLAY), context="css"
),
)
def _process_scalar(self, name: str, tokens: list[Token]) -> None:
def scalar_error():
self.error(
name, tokens[0], scalar_help_text(property_name=name, context="css")
)
if not tokens:
return
if len(tokens) == 1:
try:
self.styles._rules[name.replace("-", "_")] = Scalar.parse( # type: ignore
tokens[0].value
)
except ScalarParseError:
scalar_error()
else:
scalar_error()
def _distribute_importance(self, prefix: str, suffixes: tuple[str, ...]) -> None:
"""Distribute importance amongst all aspects of the given style.
Args:
prefix: The prefix of the style.
suffixes: The suffixes to distribute amongst.
A number of styles can be set with the 'prefix' of the style,
providing the values as a series of parameters; or they can be set
with specific suffixes. Think `border` vs `border-left`, etc. This
method is used to ensure that if the former is set, `!important` is
distributed amongst all the suffixes.
"""
if prefix in self.styles.important:
self.styles.important.remove(prefix)
self.styles.important.update(f"{prefix}_{suffix}" for suffix in suffixes)
def process_box_sizing(self, name: str, tokens: list[Token]) -> None:
for token in tokens:
name, value, _, _, location, _ = token
if name == "token":
value = value.lower()
if value in VALID_BOX_SIZING:
self.styles._rules["box_sizing"] = cast(BoxSizing, value)
else:
self.error(
name,
token,
string_enum_help_text(
"box-sizing",
valid_values=list(VALID_BOX_SIZING),
context="css",
),
)
else:
self.error(
name,
token,
string_enum_help_text(
"box-sizing", valid_values=list(VALID_BOX_SIZING), context="css"
),
)
def process_width(self, name: str, tokens: list[Token]) -> None:
self._process_scalar(name, tokens)
def process_height(self, name: str, tokens: list[Token]) -> None:
self._process_scalar(name, tokens)
def process_min_width(self, name: str, tokens: list[Token]) -> None:
self._process_scalar(name, tokens)
def process_min_height(self, name: str, tokens: list[Token]) -> None:
self._process_scalar(name, tokens)
def process_max_width(self, name: str, tokens: list[Token]) -> None:
self._process_scalar(name, tokens)
def process_max_height(self, name: str, tokens: list[Token]) -> None:
self._process_scalar(name, tokens)
def process_overflow(self, name: str, tokens: list[Token]) -> None:
rules = self.styles._rules
overflow_x, overflow_y = self._process_enum_multiple(
name, tokens, VALID_OVERFLOW, 2
)
rules["overflow_x"] = cast(Overflow, overflow_x)
rules["overflow_y"] = cast(Overflow, overflow_y)
self._distribute_importance("overflow", ("x", "y"))
def process_overflow_x(self, name: str, tokens: list[Token]) -> None:
self.styles._rules["overflow_x"] = cast(
Overflow, self._process_enum(name, tokens, VALID_OVERFLOW)
)
def process_overflow_y(self, name: str, tokens: list[Token]) -> None:
self.styles._rules["overflow_y"] = cast(
Overflow, self._process_enum(name, tokens, VALID_OVERFLOW)
)
def process_visibility(self, name: str, tokens: list[Token]) -> None:
for token in tokens:
name, value, _, _, location, _ = token
if name == "token":
value = value.lower()
if value in VALID_VISIBILITY:
self.styles._rules["visibility"] = cast(Visibility, value)
else:
self.error(
name,
token,
string_enum_help_text(
"visibility",
valid_values=list(VALID_VISIBILITY),
context="css",
),
)
else:
string_enum_help_text(
"visibility", valid_values=list(VALID_VISIBILITY), context="css"
)
def process_text_wrap(self, name: str, tokens: list[Token]) -> None:
for token in tokens:
name, value, _, _, location, _ = token
if name == "token":
value = value.lower()
if value in VALID_TEXT_WRAP:
self.styles._rules["text_wrap"] = cast(TextWrap, value)
else:
self.error(
name,
token,
string_enum_help_text(
"text-wrap",
valid_values=list(VALID_TEXT_WRAP),
context="css",
),
)
else:
string_enum_help_text(
"text-wrap", valid_values=list(VALID_TEXT_WRAP), context="css"
)
def process_text_overflow(self, name: str, tokens: list[Token]) -> None:
for token in tokens:
name, value, _, _, location, _ = token
if name == "token":
value = value.lower()
if value in VALID_TEXT_OVERFLOW:
self.styles._rules["text_overflow"] = cast(TextOverflow, value)
else:
self.error(
name,
token,
string_enum_help_text(
"text-overflow",
valid_values=list(VALID_TEXT_OVERFLOW),
context="css",
),
)
else:
string_enum_help_text(
"text-overflow",
valid_values=list(VALID_TEXT_OVERFLOW),
context="css",
)
def _process_fractional(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
token = tokens[0]
error = False
if len(tokens) != 1:
error = True
else:
token_name = token.name
value = token.value
rule_name = name.replace("-", "_")
if token_name == "scalar" and value.endswith("%"):
try:
text_opacity = percentage_string_to_float(value)
self.styles.set_rule(rule_name, text_opacity)
except ValueError:
error = True
elif token_name == "number":
try:
text_opacity = clamp(float(value), 0, 1)
self.styles.set_rule(rule_name, text_opacity)
except ValueError:
error = True
else:
error = True
if error:
self.error(name, token, fractional_property_help_text(name, context="css"))
process_opacity = _process_fractional
process_text_opacity = _process_fractional
def _process_space(self, name: str, tokens: list[Token]) -> None:
space: list[int] = []
append = space.append
for token in tokens:
token_name, value, _, _, _, _ = token
if token_name == "number":
try:
append(int(value))
except ValueError:
self.error(
name,
token,
spacing_invalid_value_help_text(name, context="css"),
)
else:
self.error(
name, token, spacing_invalid_value_help_text(name, context="css")
)
if len(space) not in (1, 2, 4):
self.error(
name,
tokens[0],
spacing_wrong_number_of_values_help_text(
name, num_values_supplied=len(space), context="css"
),
)
self.styles._rules[name] = Spacing.unpack(cast(SpacingDimensions, tuple(space))) # type: ignore
def _process_space_partial(self, name: str, tokens: list[Token]) -> None:
"""Process granular margin / padding declarations."""
if len(tokens) != 1:
self.error(
name, tokens[0], spacing_invalid_value_help_text(name, context="css")
)
_EDGE_SPACING_MAP = {"top": 0, "right": 1, "bottom": 2, "left": 3}
token = tokens[0]
token_name, value, _, _, _, _ = token
if token_name == "number":
space = int(value)
else:
self.error(
name, token, spacing_invalid_value_help_text(name, context="css")
)
style_name, _, edge = name.replace("-", "_").partition("_")
current_spacing = cast(
"tuple[int, int, int, int]",
self.styles._rules.get(style_name, (0, 0, 0, 0)),
)
spacing_list = list(current_spacing)
spacing_list[_EDGE_SPACING_MAP[edge]] = space
self.styles._rules[style_name] = Spacing(*spacing_list) # type: ignore
process_padding = _process_space
process_margin = _process_space
process_margin_top = _process_space_partial
process_margin_right = _process_space_partial
process_margin_bottom = _process_space_partial
process_margin_left = _process_space_partial
process_padding_top = _process_space_partial
process_padding_right = _process_space_partial
process_padding_bottom = _process_space_partial
process_padding_left = _process_space_partial
def _parse_border(self, name: str, tokens: list[Token]) -> BorderValue:
border_type: EdgeType = "solid"
border_color = Color(0, 255, 0)
border_alpha: float | None = None
def border_value_error():
self.error(name, token, border_property_help_text(name, context="css"))
for token in tokens:
token_name, value, _, _, _, _ = token
if token_name == "token":
if value in VALID_BORDER:
border_type = value # type: ignore
else:
try:
border_color = Color.parse(value)
except ColorParseError:
border_value_error()
elif token_name == "color":
try:
border_color = Color.parse(value)
except ColorParseError:
border_value_error()
elif token_name == "scalar":
alpha_scalar = Scalar.parse(token.value)
if alpha_scalar.unit != Unit.PERCENT:
self.error(name, token, "alpha must be given as a percentage.")
border_alpha = alpha_scalar.value / 100.0
else:
border_value_error()
if border_alpha is not None:
border_color = border_color.multiply_alpha(border_alpha)
return normalize_border_value((border_type, border_color))
def _process_border_edge(self, edge: str, name: str, tokens: list[Token]) -> None:
border = self._parse_border(name, tokens)
self.styles._rules[f"border_{edge}"] = border # type: ignore
def process_border(self, name: str, tokens: list[Token]) -> None:
border = self._parse_border(name, tokens)
rules = self.styles._rules
rules["border_top"] = rules["border_right"] = border
rules["border_bottom"] = rules["border_left"] = border
self._distribute_importance("border", ("top", "left", "bottom", "right"))
def process_border_top(self, name: str, tokens: list[Token]) -> None:
self._process_border_edge("top", name, tokens)
def process_border_right(self, name: str, tokens: list[Token]) -> None:
self._process_border_edge("right", name, tokens)
def process_border_bottom(self, name: str, tokens: list[Token]) -> None:
self._process_border_edge("bottom", name, tokens)
def process_border_left(self, name: str, tokens: list[Token]) -> None:
self._process_border_edge("left", name, tokens)
def _process_outline(self, edge: str, name: str, tokens: list[Token]) -> None:
border = self._parse_border(name, tokens)
self.styles._rules[f"outline_{edge}"] = border # type: ignore
def process_outline(self, name: str, tokens: list[Token]) -> None:
border = self._parse_border(name, tokens)
rules = self.styles._rules
rules["outline_top"] = rules["outline_right"] = border
rules["outline_bottom"] = rules["outline_left"] = border
self._distribute_importance("outline", ("top", "left", "bottom", "right"))
def process_outline_top(self, name: str, tokens: list[Token]) -> None:
self._process_outline("top", name, tokens)
def process_outline_right(self, name: str, tokens: list[Token]) -> None:
self._process_outline("right", name, tokens)
def process_outline_bottom(self, name: str, tokens: list[Token]) -> None:
self._process_outline("bottom", name, tokens)
def process_outline_left(self, name: str, tokens: list[Token]) -> None:
self._process_outline("left", name, tokens)
def process_keyline(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) > 3:
self.error(name, tokens[0], keyline_help_text())
keyline_style = "none"
keyline_color = Color.parse("green")
keyline_alpha = 1.0
for token in tokens:
if token.name == "color":
try:
keyline_color = Color.parse(token.value)
except Exception as error:
self.error(
name,
token,
color_property_help_text(
name, context="css", error=error, value=token.value
),
)
elif token.name == "token":
try:
keyline_color = Color.parse(token.value)
except Exception:
keyline_style = token.value
if keyline_style not in VALID_KEYLINE:
self.error(name, token, keyline_help_text())
elif token.name == "scalar":
alpha_scalar = Scalar.parse(token.value)
if alpha_scalar.unit != Unit.PERCENT:
self.error(name, token, "alpha must be given as a percentage.")
keyline_alpha = alpha_scalar.value / 100.0
self.styles._rules["keyline"] = (
keyline_style,
keyline_color.multiply_alpha(keyline_alpha),
)
def process_offset(self, name: str, tokens: list[Token]) -> None:
def offset_error(name: str, token: Token) -> None:
self.error(name, token, offset_property_help_text(context="css"))
if not tokens:
return
if len(tokens) != 2:
offset_error(name, tokens[0])
else:
token1, token2 = tokens
if token1.name not in ("scalar", "number"):
offset_error(name, token1)
if token2.name not in ("scalar", "number"):
offset_error(name, token2)
scalar_x = Scalar.parse(token1.value, Unit.WIDTH)
scalar_y = Scalar.parse(token2.value, Unit.HEIGHT)
self.styles._rules["offset"] = ScalarOffset(scalar_x, scalar_y)
def process_offset_x(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) != 1:
self.error(name, tokens[0], offset_single_axis_help_text(name))
else:
token = tokens[0]
if token.name not in ("scalar", "number"):
self.error(name, token, offset_single_axis_help_text(name))
x = Scalar.parse(token.value, Unit.WIDTH)
y = self.styles.offset.y
self.styles._rules["offset"] = ScalarOffset(x, y)
def process_offset_y(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) != 1:
self.error(name, tokens[0], offset_single_axis_help_text(name))
else:
token = tokens[0]
if token.name not in ("scalar", "number"):
self.error(name, token, offset_single_axis_help_text(name))
y = Scalar.parse(token.value, Unit.HEIGHT)
x = self.styles.offset.x
self.styles._rules["offset"] = ScalarOffset(x, y)
def process_position(self, name: str, tokens: list[Token]):
if not tokens:
return
if len(tokens) != 1:
self.error(name, tokens[0], offset_single_axis_help_text(name))
else:
token = tokens[0]
if token.value not in VALID_POSITION:
self.error(name, tokens[0], position_help_text(name))
self.styles._rules["position"] = token.value
def process_layout(self, name: str, tokens: list[Token]) -> None:
from textual.layouts.factory import MissingLayout, get_layout
if tokens:
if len(tokens) != 1:
self.error(
name, tokens[0], layout_property_help_text(name, context="css")
)
else:
value = tokens[0].value
layout_name = value
try:
self.styles._rules["layout"] = get_layout(layout_name)
except MissingLayout:
self.error(
name,
tokens[0],
layout_property_help_text(name, context="css"),
)
def process_color(self, name: str, tokens: list[Token]) -> None:
"""Processes a simple color declaration."""
name = name.replace("-", "_")
color: Color | None = None
alpha: float | None = None
self.styles._rules[f"auto_{name}"] = False # type: ignore
for token in tokens:
if (
"background" not in name
and token.name == "token"
and token.value == "auto"
):
self.styles._rules[f"auto_{name}"] = True # type: ignore
elif token.name == "scalar":
alpha_scalar = Scalar.parse(token.value)
if alpha_scalar.unit != Unit.PERCENT:
self.error(name, token, "alpha must be given as a percentage.")
alpha = alpha_scalar.value / 100.0
elif token.name in ("color", "token"):
try:
color = Color.parse(token.value)
except Exception as error:
self.error(
name,
token,
color_property_help_text(
name, context="css", error=error, value=token.value
),
)
else:
self.error(
name,
token,
color_property_help_text(name, context="css", value=token.value),
)
if color is not None or alpha is not None:
if alpha is not None:
color = (color or Color(255, 255, 255)).multiply_alpha(alpha)
self.styles._rules[name] = color # type: ignore
process_tint = process_color
process_background = process_color
process_background_tint = process_color
process_scrollbar_color = process_color
process_scrollbar_color_hover = process_color
process_scrollbar_color_active = process_color
process_scrollbar_corner_color = process_color
process_scrollbar_background = process_color
process_scrollbar_background_hover = process_color
process_scrollbar_background_active = process_color
def process_scrollbar_visibility(self, name: str, tokens: list[Token]) -> None:
"""Process scrollbar visibility rules."""
self.styles._rules["scrollbar_visibility"] = cast(
ScrollbarVisibility,
self._process_enum(name, tokens, VALID_SCROLLBAR_VISIBILITY),
)
process_link_color = process_color
process_link_background = process_color
process_link_color_hover = process_color
process_link_background_hover = process_color
process_border_title_color = process_color
process_border_title_background = process_color
process_border_subtitle_color = process_color
process_border_subtitle_background = process_color
def process_text_style(self, name: str, tokens: list[Token]) -> None:
for token in tokens:
value = token.value
if value not in VALID_STYLE_FLAGS:
self.error(
name,
token,
style_flags_property_help_text(name, value, context="css"),
)
style_definition = " ".join(token.value for token in tokens)
self.styles._rules[name.replace("-", "_")] = style_definition # type: ignore
process_link_style = process_text_style
process_link_style_hover = process_text_style
process_border_title_style = process_text_style
process_border_subtitle_style = process_text_style
def process_text_align(self, name: str, tokens: list[Token]) -> None:
"""Process a text-align declaration"""
if not tokens:
return
if len(tokens) > 1 or tokens[0].value not in VALID_TEXT_ALIGN:
self.error(
name,
tokens[0],
text_align_help_text(),
)
self.styles._rules["text_align"] = tokens[0].value # type: ignore
def process_dock(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) > 1 or tokens[0].value not in VALID_EDGE:
self.error(
name,
tokens[0],
dock_property_help_text(name, context="css"),
)
dock_value = tokens[0].value
self.styles._rules["dock"] = dock_value
def process_split(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) > 1 or tokens[0].value not in VALID_EDGE:
self.error(
name,
tokens[0],
split_property_help_text(name, context="css"),
)
split_value = tokens[0].value
self.styles._rules["split"] = split_value
def process_layer(self, name: str, tokens: list[Token]) -> None:
if len(tokens) > 1:
self.error(name, tokens[1], "unexpected tokens in dock-edge declaration")
self.styles._rules["layer"] = tokens[0].value
def process_layers(self, name: str, tokens: list[Token]) -> None:
layers: list[str] = []
for token in tokens:
if token.name not in {"token", "string"}:
self.error(name, token, f"{token.name} not expected here")
layers.append(token.value)
self.styles._rules["layers"] = tuple(layers)
def process_transition(self, name: str, tokens: list[Token]) -> None:
transitions: dict[str, Transition] = {}
def make_groups() -> Iterable[list[Token]]:
"""Batch tokens into comma-separated groups."""
group: list[Token] = []
for token in tokens:
if token.name == "comma":
if group:
yield group
group = []
else:
group.append(token)
if group:
yield group
valid_duration_token_names = ("duration", "number")
for tokens in make_groups():
css_property = ""
duration = 1.0
easing = "linear"
delay = 0.0
try:
iter_tokens = iter(tokens)
token = next(iter_tokens)
if token.name != "token":
self.error(name, token, "expected property")
css_property = token.value
token = next(iter_tokens)
if token.name not in valid_duration_token_names:
self.error(name, token, "expected duration or number")
try:
duration = _duration_as_seconds(token.value)
except ScalarError as error:
self.error(name, token, str(error))
token = next(iter_tokens)
if token.name != "token":
self.error(name, token, "easing function expected")
if token.value not in EASING:
self.error(
name,
token,
f"expected easing function; found {token.value!r}",
)
easing = token.value
token = next(iter_tokens)
if token.name not in valid_duration_token_names:
self.error(name, token, "expected duration or number")
try:
delay = _duration_as_seconds(token.value)
except ScalarError as error:
self.error(name, token, str(error))
except StopIteration:
pass
transitions[css_property] = Transition(duration, easing, delay)
self.styles._rules["transitions"] = transitions
def process_align(self, name: str, tokens: list[Token]) -> None:
def align_error(name, token):
self.error(name, token, align_help_text())
if len(tokens) != 2:
self.error(name, tokens[0], align_help_text())
token_horizontal = tokens[0]
token_vertical = tokens[1]
if token_horizontal.name != "token":
align_error(name, token_horizontal)
elif token_horizontal.value not in VALID_ALIGN_HORIZONTAL:
align_error(name, token_horizontal)
if token_vertical.name != "token":
align_error(name, token_vertical)
elif token_vertical.value not in VALID_ALIGN_VERTICAL:
align_error(name, token_horizontal)
name = name.replace("-", "_")
self.styles._rules[f"{name}_horizontal"] = token_horizontal.value # type: ignore
self.styles._rules[f"{name}_vertical"] = token_vertical.value # type: ignore
self._distribute_importance(name, ("horizontal", "vertical"))
def process_align_horizontal(self, name: str, tokens: list[Token]) -> None:
try:
value = self._process_enum(name, tokens, VALID_ALIGN_HORIZONTAL)
except StyleValueError:
self.error(
name,
tokens[0],
string_enum_help_text(name, VALID_ALIGN_HORIZONTAL, context="css"),
)
else:
self.styles._rules[name.replace("-", "_")] = value # type: ignore
def process_align_vertical(self, name: str, tokens: list[Token]) -> None:
try:
value = self._process_enum(name, tokens, VALID_ALIGN_VERTICAL)
except StyleValueError:
self.error(
name,
tokens[0],
string_enum_help_text(name, VALID_ALIGN_VERTICAL, context="css"),
)
else:
self.styles._rules[name.replace("-", "_")] = value # type: ignore
process_content_align = process_align
process_content_align_horizontal = process_align_horizontal
process_content_align_vertical = process_align_vertical
process_border_title_align = process_align_horizontal
process_border_subtitle_align = process_align_horizontal
def process_scrollbar_gutter(self, name: str, tokens: list[Token]) -> None:
try:
value = self._process_enum(name, tokens, VALID_SCROLLBAR_GUTTER)
except StyleValueError:
self.error(
name,
tokens[0],
string_enum_help_text(name, VALID_SCROLLBAR_GUTTER, context="css"),
)
else:
self.styles._rules[name.replace("-", "_")] = value # type: ignore
def process_scrollbar_size(self, name: str, tokens: list[Token]) -> None:
def scrollbar_size_error(name: str, token: Token) -> None:
self.error(name, token, scrollbar_size_property_help_text(context="css"))
if not tokens:
return
if len(tokens) != 2:
scrollbar_size_error(name, tokens[0])
else:
token1, token2 = tokens
if token1.name != "number" or not token1.value.isdigit():
scrollbar_size_error(name, token1)
if token2.name != "number" or not token2.value.isdigit():
scrollbar_size_error(name, token2)
horizontal = int(token1.value)
vertical = int(token2.value)
self.styles._rules["scrollbar_size_horizontal"] = horizontal
self.styles._rules["scrollbar_size_vertical"] = vertical
self._distribute_importance("scrollbar_size", ("horizontal", "vertical"))
def process_scrollbar_size_vertical(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) != 1:
self.error(name, tokens[0], scrollbar_size_single_axis_help_text(name))
else:
token = tokens[0]
if token.name != "number" or not token.value.isdigit():
self.error(name, token, scrollbar_size_single_axis_help_text(name))
value = int(token.value)
self.styles._rules["scrollbar_size_vertical"] = value
def process_scrollbar_size_horizontal(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) != 1:
self.error(name, tokens[0], scrollbar_size_single_axis_help_text(name))
else:
token = tokens[0]
if token.name != "number" or not token.value.isdigit():
self.error(name, token, scrollbar_size_single_axis_help_text(name))
value = int(token.value)
self.styles._rules["scrollbar_size_horizontal"] = value
def _process_grid_rows_or_columns(self, name: str, tokens: list[Token]) -> None:
scalars: list[Scalar] = []
percent_unit = Unit.WIDTH if name == "grid-columns" else Unit.HEIGHT
for token in tokens:
if token.name == "number":
scalars.append(Scalar.from_number(float(token.value)))
elif token.name == "scalar":
scalars.append(Scalar.parse(token.value, percent_unit=percent_unit))
elif token.name == "token" and token.value == "auto":
scalars.append(Scalar.parse("auto"))
else:
self.error(
name,
token,
table_rows_or_columns_help_text(name, token.value, context="css"),
)
self.styles._rules[name.replace("-", "_")] = scalars # type: ignore
process_grid_rows = _process_grid_rows_or_columns
process_grid_columns = _process_grid_rows_or_columns
def _process_integer(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) != 1:
self.error(name, tokens[0], integer_help_text(name))
else:
token = tokens[0]
if token.name != "number" or not token.value.isdigit():
self.error(name, token, integer_help_text(name))
value = int(token.value)
if value == 0:
self.error(name, token, integer_help_text(name))
self.styles._rules[name.replace("-", "_")] = value # type: ignore
process_grid_gutter_horizontal = _process_integer
process_grid_gutter_vertical = _process_integer
process_column_span = _process_integer
process_row_span = _process_integer
process_grid_size_columns = _process_integer
process_grid_size_rows = _process_integer
process_line_pad = _process_integer
def process_grid_gutter(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) == 1:
token = tokens[0]
if token.name != "number":
self.error(name, token, integer_help_text(name))
value = max(0, int(token.value))
self.styles._rules["grid_gutter_horizontal"] = value
self.styles._rules["grid_gutter_vertical"] = value
elif len(tokens) == 2:
token = tokens[0]
if token.name != "number":
self.error(name, token, integer_help_text(name))
value = max(0, int(token.value))
self.styles._rules["grid_gutter_horizontal"] = value
token = tokens[1]
if token.name != "number":
self.error(name, token, integer_help_text(name))
value = max(0, int(token.value))
self.styles._rules["grid_gutter_vertical"] = value
else:
self.error(name, tokens[0], "expected two integers here")
def process_grid_size(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
if len(tokens) == 1:
token = tokens[0]
if token.name != "number":
self.error(name, token, integer_help_text(name))
value = max(0, int(token.value))
self.styles._rules["grid_size_columns"] = value
self.styles._rules["grid_size_rows"] = 0
elif len(tokens) == 2:
token = tokens[0]
if token.name != "number":
self.error(name, token, integer_help_text(name))
value = max(0, int(token.value))
self.styles._rules["grid_size_columns"] = value
token = tokens[1]
if token.name != "number":
self.error(name, token, integer_help_text(name))
value = max(0, int(token.value))
self.styles._rules["grid_size_rows"] = value
else:
self.error(name, tokens[0], "expected two integers here")
def process_overlay(self, name: str, tokens: list[Token]) -> None:
try:
value = self._process_enum(name, tokens, VALID_OVERLAY)
except StyleValueError:
self.error(
name,
tokens[0],
string_enum_help_text(name, VALID_OVERLAY, context="css"),
)
else:
self.styles._rules[name] = value # type: ignore
def process_constrain(self, name: str, tokens: list[Token]) -> None:
if len(tokens) == 1:
try:
value = self._process_enum(name, tokens, VALID_CONSTRAIN)
except StyleValueError:
self.error(
name,
tokens[0],
string_enum_help_text(name, VALID_CONSTRAIN, context="css"),
)
else:
self.styles._rules["constrain_x"] = value # type: ignore
self.styles._rules["constrain_y"] = value # type: ignore
elif len(tokens) == 2:
constrain_x, constrain_y = self._process_enum_multiple(
name, tokens, VALID_CONSTRAIN, 2
)
self.styles._rules["constrain_x"] = constrain_x # type: ignore
self.styles._rules["constrain_y"] = constrain_y # type: ignore
else:
self.error(name, tokens[0], "one or two values expected here")
def process_constrain_x(self, name: str, tokens: list[Token]) -> None:
try:
value = self._process_enum(name, tokens, VALID_CONSTRAIN)
except StyleValueError:
self.error(
name,
tokens[0],
string_enum_help_text(name, VALID_CONSTRAIN, context="css"),
)
else:
self.styles._rules[name] = value # type: ignore
def process_constrain_y(self, name: str, tokens: list[Token]) -> None:
try:
value = self._process_enum(name, tokens, VALID_CONSTRAIN)
except StyleValueError:
self.error(
name,
tokens[0],
string_enum_help_text(name, VALID_CONSTRAIN, context="css"),
)
else:
self.styles._rules[name] = value # type: ignore
def process_hatch(self, name: str, tokens: list[Token]) -> None:
if not tokens:
return
character: str | None = None
color = TRANSPARENT
opacity = 1.0
if len(tokens) == 1 and tokens[0].value == "none":
self.styles._rules[name] = "none"
return
if len(tokens) not in (2, 3):
self.error(name, tokens[0], "2 or 3 values expected here")
character_token, color_token, *opacity_tokens = tokens
if character_token.name == "token":
if character_token.value not in VALID_HATCH:
self.error(
name,
tokens[0],
string_enum_help_text(name, VALID_HATCH, context="css"),
)
character = HATCHES[character_token.value]
elif character_token.name == "string":
character = character_token.value[1:-1]
if len(character) != 1:
self.error(
name,
character_token,
f"Hatch type requires a string of length 1; got {character_token.value}",
)
if cell_len(character) != 1:
self.error(
name,
character_token,
f"Hatch type requires a string with a *cell length* of 1; got {character_token.value}",
)
if color_token.name in ("color", "token"):
try:
color = Color.parse(color_token.value)
except Exception as error:
self.error(
name,
color_token,
color_property_help_text(
name, context="css", error=error, value=color_token.value
),
)
else:
self.error(
name, color_token, f"Expected a color; found {color_token.value!r}"
)
if opacity_tokens:
opacity_token = opacity_tokens[0]
if opacity_token.name == "scalar":
opacity_scalar = opacity = Scalar.parse(opacity_token.value)
if opacity_scalar.unit != Unit.PERCENT:
self.error(
name,
opacity_token,
"hatch alpha must be given as a percentage.",
)
opacity = clamp(opacity_scalar.value / 100.0, 0, 1.0)
else:
self.error(
name,
opacity_token,
f"expected a percentage here; found {opacity_token.value!r}",
)
self.styles._rules[name] = (character or " ", color.multiply_alpha(opacity))
def process_expand(self, name: str, tokens: list[Token]):
if not tokens:
return
if len(tokens) != 1:
self.error(name, tokens[0], offset_single_axis_help_text(name))
else:
token = tokens[0]
if token.value not in VALID_EXPAND:
self.error(name, tokens[0], expand_help_text(name))
self.styles._rules["expand"] = token.value
def _get_suggested_property_name_for_rule(self, rule_name: str) -> str | None:
"""
Returns a valid CSS property "Python" name, or None if no close matches could be found.
Args:
rule_name: An invalid "Python-ised" CSS property (i.e. "offst_x" rather than "offst-x")
Returns:
The closest valid "Python-ised" CSS property.
Returns `None` if no close matches could be found.
Example: returns "background" for rule_name "bkgrund", "offset_x" for "ofset_x"
"""
processable_rules_name = [
attr[8:] for attr in dir(self) if attr.startswith("process_")
]
return get_suggestion(rule_name, processable_rules_name)
| StylesBuilder |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E23.py | {
"start": 3046,
"end": 3510
} | class ____[A: object="foo"[::-1], B: object =[[["foo", "bar"]]], C: object= bytes](object, something_dynamic[x::-1]):
pass
# E231
t"{(a,b)}"
# Okay because it's hard to differentiate between the usages of a colon in a t-string
t"{a:=1}"
t"{ {'a':1} }"
t"{a:.3f}"
t"{(a:=1)}"
t"{(lambda x:x)}"
t"normal{t"{a:.3f}"}normal"
#: Okay
snapshot.file_uri[len(t's3://{self.s3_bucket_name}/'):]
#: E231
{len(t's3://{self.s3_bucket_name}/'):1}
| PEP696GoodWithNonEmptyBases |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/dbt_project_manager.py | {
"start": 2301,
"end": 3624
} | class ____(DbtProjectManager):
"""Wraps DbtProjectArgs provided to the DbtProjectComponent. Avoids instantiating the DbtProject object
immediately as this would cause errors in cases where the project_dir has not yet been synced.
"""
args: "DbtProjectArgs"
@property
def defs_state_discriminator(self) -> str:
return Path(self.args.project_dir).stem
def sync(self, state_path: Path) -> None:
# we prepare the project in the original project directory rather than the new one
# so that code that does not have access to the local state path can still access
# the manifest.json file.
project = self.get_project(None)
project.preparer.prepare(project)
shutil.copytree(
self.args.project_dir, self._local_project_dir(state_path), dirs_exist_ok=True
)
def get_project(self, state_path: Optional[Path]) -> "DbtProject":
kwargs = asdict(self.args)
project_dir = self._local_project_dir(state_path) if state_path else self.args.project_dir
return DbtProject(
project_dir=project_dir,
# allow default values on DbtProject to take precedence
**{k: v for k, v in kwargs.items() if v is not None and k != "project_dir"},
)
@dataclass
| DbtProjectArgsManager |
python | ray-project__ray | rllib/algorithms/dreamerv3/torch/models/components/sequence_model.py | {
"start": 4307,
"end": 5203
} | class ____(nn.Module):
"""Analogous to Danijar's JAX GRU unit code."""
def __init__(self, input_size, cell_size):
super().__init__()
self.cell_size = cell_size
self.output_size = 3 * self.cell_size
self.linear = nn.Linear(
input_size + self.cell_size,
self.output_size,
bias=False,
)
dreamerv3_normal_initializer(list(self.linear.parameters()))
self.layer_norm = nn.LayerNorm(self.output_size, eps=0.001)
def forward(self, x, h):
x = torch.cat([h, x], dim=-1)
x = self.linear(x)
x = self.layer_norm(x)
reset, cand, update = torch.split(x, self.cell_size, dim=-1)
reset = torch.sigmoid(reset)
cand = torch.tanh(reset * cand)
update = torch.sigmoid(update - 1)
h = update * cand + (1 - update) * h
return h, h
| DreamerV3GRU |
python | doocs__leetcode | solution/0800-0899/0826.Most Profit Assigning Work/Solution.py | {
"start": 0,
"end": 410
} | class ____:
def maxProfitAssignment(
self, difficulty: List[int], profit: List[int], worker: List[int]
) -> int:
worker.sort()
jobs = sorted(zip(difficulty, profit))
ans = mx = i = 0
for w in worker:
while i < len(jobs) and jobs[i][0] <= w:
mx = max(mx, jobs[i][1])
i += 1
ans += mx
return ans
| Solution |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 82430,
"end": 92003
} | class ____:
def test_singles_tl(self):
assert self.locale._format_timeframe("second", 1) == "isang segundo"
assert self.locale._format_timeframe("minute", 1) == "isang minuto"
assert self.locale._format_timeframe("hour", 1) == "isang oras"
assert self.locale._format_timeframe("day", 1) == "isang araw"
assert self.locale._format_timeframe("week", 1) == "isang linggo"
assert self.locale._format_timeframe("month", 1) == "isang buwan"
assert self.locale._format_timeframe("year", 1) == "isang taon"
def test_meridians_tl(self):
assert self.locale.meridian(7, "A") == "ng umaga"
assert self.locale.meridian(18, "A") == "ng hapon"
assert self.locale.meridian(10, "a") == "nu"
assert self.locale.meridian(22, "a") == "nh"
def test_describe_tl(self):
assert self.locale.describe("second", only_distance=True) == "isang segundo"
assert (
self.locale.describe("second", only_distance=False)
== "isang segundo mula ngayon"
)
assert self.locale.describe("minute", only_distance=True) == "isang minuto"
assert (
self.locale.describe("minute", only_distance=False)
== "isang minuto mula ngayon"
)
assert self.locale.describe("hour", only_distance=True) == "isang oras"
assert (
self.locale.describe("hour", only_distance=False)
== "isang oras mula ngayon"
)
assert self.locale.describe("day", only_distance=True) == "isang araw"
assert (
self.locale.describe("day", only_distance=False) == "isang araw mula ngayon"
)
assert self.locale.describe("week", only_distance=True) == "isang linggo"
assert (
self.locale.describe("week", only_distance=False)
== "isang linggo mula ngayon"
)
assert self.locale.describe("month", only_distance=True) == "isang buwan"
assert (
self.locale.describe("month", only_distance=False)
== "isang buwan mula ngayon"
)
assert self.locale.describe("year", only_distance=True) == "isang taon"
assert (
self.locale.describe("year", only_distance=False)
== "isang taon mula ngayon"
)
def test_relative_tl(self):
# time
assert self.locale._format_relative("ngayon", "now", 0) == "ngayon"
assert (
self.locale._format_relative("1 segundo", "seconds", 1)
== "1 segundo mula ngayon"
)
assert (
self.locale._format_relative("1 minuto", "minutes", 1)
== "1 minuto mula ngayon"
)
assert (
self.locale._format_relative("1 oras", "hours", 1) == "1 oras mula ngayon"
)
assert self.locale._format_relative("1 araw", "days", 1) == "1 araw mula ngayon"
assert (
self.locale._format_relative("1 linggo", "weeks", 1)
== "1 linggo mula ngayon"
)
assert (
self.locale._format_relative("1 buwan", "months", 1)
== "1 buwan mula ngayon"
)
assert (
self.locale._format_relative("1 taon", "years", 1) == "1 taon mula ngayon"
)
assert (
self.locale._format_relative("1 segundo", "seconds", -1)
== "nakaraang 1 segundo"
)
assert (
self.locale._format_relative("1 minuto", "minutes", -1)
== "nakaraang 1 minuto"
)
assert self.locale._format_relative("1 oras", "hours", -1) == "nakaraang 1 oras"
assert self.locale._format_relative("1 araw", "days", -1) == "nakaraang 1 araw"
assert (
self.locale._format_relative("1 linggo", "weeks", -1)
== "nakaraang 1 linggo"
)
assert (
self.locale._format_relative("1 buwan", "months", -1) == "nakaraang 1 buwan"
)
assert self.locale._format_relative("1 taon", "years", -1) == "nakaraang 1 taon"
def test_plurals_tl(self):
# Seconds
assert self.locale._format_timeframe("seconds", 0) == "0 segundo"
assert self.locale._format_timeframe("seconds", 1) == "1 segundo"
assert self.locale._format_timeframe("seconds", 2) == "2 segundo"
assert self.locale._format_timeframe("seconds", 4) == "4 segundo"
assert self.locale._format_timeframe("seconds", 5) == "5 segundo"
assert self.locale._format_timeframe("seconds", 21) == "21 segundo"
assert self.locale._format_timeframe("seconds", 22) == "22 segundo"
assert self.locale._format_timeframe("seconds", 25) == "25 segundo"
# Minutes
assert self.locale._format_timeframe("minutes", 0) == "0 minuto"
assert self.locale._format_timeframe("minutes", 1) == "1 minuto"
assert self.locale._format_timeframe("minutes", 2) == "2 minuto"
assert self.locale._format_timeframe("minutes", 4) == "4 minuto"
assert self.locale._format_timeframe("minutes", 5) == "5 minuto"
assert self.locale._format_timeframe("minutes", 21) == "21 minuto"
assert self.locale._format_timeframe("minutes", 22) == "22 minuto"
assert self.locale._format_timeframe("minutes", 25) == "25 minuto"
# Hours
assert self.locale._format_timeframe("hours", 0) == "0 oras"
assert self.locale._format_timeframe("hours", 1) == "1 oras"
assert self.locale._format_timeframe("hours", 2) == "2 oras"
assert self.locale._format_timeframe("hours", 4) == "4 oras"
assert self.locale._format_timeframe("hours", 5) == "5 oras"
assert self.locale._format_timeframe("hours", 21) == "21 oras"
assert self.locale._format_timeframe("hours", 22) == "22 oras"
assert self.locale._format_timeframe("hours", 25) == "25 oras"
# Days
assert self.locale._format_timeframe("days", 0) == "0 araw"
assert self.locale._format_timeframe("days", 1) == "1 araw"
assert self.locale._format_timeframe("days", 2) == "2 araw"
assert self.locale._format_timeframe("days", 3) == "3 araw"
assert self.locale._format_timeframe("days", 21) == "21 araw"
# Weeks
assert self.locale._format_timeframe("weeks", 0) == "0 linggo"
assert self.locale._format_timeframe("weeks", 1) == "1 linggo"
assert self.locale._format_timeframe("weeks", 2) == "2 linggo"
assert self.locale._format_timeframe("weeks", 4) == "4 linggo"
assert self.locale._format_timeframe("weeks", 5) == "5 linggo"
assert self.locale._format_timeframe("weeks", 21) == "21 linggo"
assert self.locale._format_timeframe("weeks", 22) == "22 linggo"
assert self.locale._format_timeframe("weeks", 25) == "25 linggo"
# Months
assert self.locale._format_timeframe("months", 0) == "0 buwan"
assert self.locale._format_timeframe("months", 1) == "1 buwan"
assert self.locale._format_timeframe("months", 2) == "2 buwan"
assert self.locale._format_timeframe("months", 4) == "4 buwan"
assert self.locale._format_timeframe("months", 5) == "5 buwan"
assert self.locale._format_timeframe("months", 21) == "21 buwan"
assert self.locale._format_timeframe("months", 22) == "22 buwan"
assert self.locale._format_timeframe("months", 25) == "25 buwan"
# Years
assert self.locale._format_timeframe("years", 1) == "1 taon"
assert self.locale._format_timeframe("years", 2) == "2 taon"
assert self.locale._format_timeframe("years", 5) == "5 taon"
def test_multi_describe_tl(self):
describe = self.locale.describe_multi
fulltest = [("years", 5), ("weeks", 1), ("hours", 1), ("minutes", 6)]
assert describe(fulltest) == "5 taon 1 linggo 1 oras 6 minuto mula ngayon"
seconds4000_0days = [("days", 0), ("hours", 1), ("minutes", 6)]
assert describe(seconds4000_0days) == "0 araw 1 oras 6 minuto mula ngayon"
seconds4000 = [("hours", 1), ("minutes", 6)]
assert describe(seconds4000) == "1 oras 6 minuto mula ngayon"
assert describe(seconds4000, only_distance=True) == "1 oras 6 minuto"
seconds3700 = [("hours", 1), ("minutes", 1)]
assert describe(seconds3700) == "1 oras 1 minuto mula ngayon"
seconds300_0hours = [("hours", 0), ("minutes", 5)]
assert describe(seconds300_0hours) == "0 oras 5 minuto mula ngayon"
seconds300 = [("minutes", 5)]
assert describe(seconds300) == "5 minuto mula ngayon"
seconds60 = [("minutes", 1)]
assert describe(seconds60) == "1 minuto mula ngayon"
assert describe(seconds60, only_distance=True) == "1 minuto"
seconds60 = [("seconds", 1)]
assert describe(seconds60) == "1 segundo mula ngayon"
assert describe(seconds60, only_distance=True) == "1 segundo"
def test_ordinal_number_tl(self):
assert self.locale.ordinal_number(0) == "ika-0"
assert self.locale.ordinal_number(1) == "ika-1"
assert self.locale.ordinal_number(2) == "ika-2"
assert self.locale.ordinal_number(3) == "ika-3"
assert self.locale.ordinal_number(10) == "ika-10"
assert self.locale.ordinal_number(23) == "ika-23"
assert self.locale.ordinal_number(100) == "ika-100"
assert self.locale.ordinal_number(103) == "ika-103"
assert self.locale.ordinal_number(114) == "ika-114"
@pytest.mark.usefixtures("lang_locale")
| TestTagalogLocale |
python | tensorflow__tensorflow | tensorflow/python/distribute/values.py | {
"start": 8499,
"end": 13279
} | class ____(DistributedValues):
"""A map from device to values; acts as the same type as the values."""
def __getattr__(self, name):
# The '_use_resource_variables' and the attrs starts with '_self' are used
# for restoring the saved_model proto, and '_attribute_sentinel' is used for
# Layer tracking. At the point these attrs are queried, the variable has not
# been initialized. Thus it should not query those of the underlying
# components.
if name.startswith("_self_") or name in ("_use_resource_variables",
"_attribute_sentinel",
"_distributed_container"):
return super(DistributedDelegate, self).__getattr__(name)
# This allows copy.copy(DistributedDelegate). When copying an object,
# copy.copy doesn't invoke its __init__ method, instead it makes a new
# empty object, then copies the attributes over. copy.copy looks for
# attributes like "__getstate__" in case the object implements its custom
# copying. Since DistributedDelegate doesn't have those attributes defined,
# __getattr__ will be invoked, which tries to access "_values" attributes,
# but that doesn't exist either because this is an empty object, and again
# __getattr__ is invoked, leading to an infinite recursion.
if name == "_values":
raise AttributeError()
# TODO(priyag): This needs to be made robust against pitfalls from mix use
# __getattr__ and @property. See b/120402273.
return getattr(self._get(), name)
@property
def values(self):
"""Returns the per replica values."""
return self._values
def _get_as_operand(self):
"""Returns the value for operations for the current device.
Some implementations, e.g. `TPUMirroredVariable`, are not able to return the
value type within a replica context. They can, however, return a value that
can be used by the operations below.
"""
return self._get()
# pylint: disable=multiple-statements
def __add__(self, o):
return self._get_as_operand() + o
def __radd__(self, o):
return o + self._get_as_operand()
def __sub__(self, o):
return self._get_as_operand() - o
def __rsub__(self, o):
return o - self._get_as_operand()
def __mul__(self, o):
return self._get_as_operand() * o
def __rmul__(self, o):
return o * self._get_as_operand()
def __truediv__(self, o):
return self._get_as_operand() / o
def __rtruediv__(self, o):
return o / self._get_as_operand()
def __floordiv__(self, o):
return self._get_as_operand() // o
def __rfloordiv__(self, o):
return o // self._get_as_operand()
def __mod__(self, o):
return self._get_as_operand() % o
def __rmod__(self, o):
return o % self._get_as_operand()
def __lt__(self, o):
return self._get_as_operand() < o
def __le__(self, o):
return self._get_as_operand() <= o
def __gt__(self, o):
return self._get_as_operand() > o
def __ge__(self, o):
return self._get_as_operand() >= o
def __and__(self, o):
return self._get_as_operand() & o
def __rand__(self, o):
return o & self._get_as_operand()
def __or__(self, o):
return self._get_as_operand() | o
def __ror__(self, o):
return o | self._get_as_operand()
def __xor__(self, o):
return self._get_as_operand() ^ o
def __rxor__(self, o):
return o ^ self._get_as_operand()
def __getitem__(self, o):
return self._get_as_operand()[o]
def __pow__(self, o, modulo=None):
return pow(self._get_as_operand(), o, modulo)
def __rpow__(self, o):
return pow(o, self._get_as_operand())
def __invert__(self):
return ~self._get_as_operand()
def __neg__(self):
return -self._get_as_operand()
def __abs__(self):
return abs(self._get_as_operand())
def __div__(self, o):
try:
return self._get_as_operand().__div__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rdiv__(self, o):
try:
return self._get_as_operand().__rdiv__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __matmul__(self, o):
try:
return self._get_as_operand().__matmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
def __rmatmul__(self, o):
try:
return self._get_as_operand().__rmatmul__(o)
except AttributeError:
# See https://docs.python.org/3/library/constants.html#NotImplemented
return NotImplemented
# TODO(josh11b): Even more operator overloads.
| DistributedDelegate |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 35017,
"end": 35076
} | class ____(ExprNode):
__slots__ = ("op", "values")
| BoolOp |
python | RaRe-Technologies__gensim | gensim/test/test_poincare.py | {
"start": 10813,
"end": 18541
} | class ____(unittest.TestCase):
def setUp(self):
self.vectors = PoincareKeyedVectors.load_word2vec_format(datapath('poincare_vectors.bin'), binary=True)
def test_most_similar(self):
"""Test most_similar returns expected results."""
expected = [
'canine.n.02',
'hunting_dog.n.01',
'carnivore.n.01',
'placental.n.01',
'mammal.n.01'
]
predicted = [result[0] for result in self.vectors.most_similar('dog.n.01', topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_topn(self):
"""Test most_similar returns correct results when `topn` is specified."""
self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)
self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)
predicted = self.vectors.most_similar('dog.n.01', topn=None)
self.assertEqual(len(predicted), len(self.vectors) - 1)
self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')
def test_most_similar_raises_keyerror(self):
"""Test most_similar raises KeyError when input is out of vocab."""
with self.assertRaises(KeyError):
self.vectors.most_similar('not_in_vocab')
def test_most_similar_restrict_vocab(self):
"""Test most_similar returns handles restrict_vocab correctly."""
expected = set(self.vectors.index_to_key[:5])
predicted = set(result[0] for result in self.vectors.most_similar('dog.n.01', topn=5, restrict_vocab=5))
self.assertEqual(expected, predicted)
def test_most_similar_to_given(self):
"""Test most_similar_to_given returns correct results."""
predicted = self.vectors.most_similar_to_given('dog.n.01', ['carnivore.n.01', 'placental.n.01', 'mammal.n.01'])
self.assertEqual(predicted, 'carnivore.n.01')
def test_most_similar_with_vector_input(self):
"""Test most_similar returns expected results with an input vector instead of an input word."""
expected = [
'dog.n.01',
'canine.n.02',
'hunting_dog.n.01',
'carnivore.n.01',
'placental.n.01',
]
input_vector = self.vectors['dog.n.01']
predicted = [result[0] for result in self.vectors.most_similar([input_vector], topn=5)]
self.assertEqual(expected, predicted)
def test_distance(self):
"""Test that distance returns expected values."""
self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))
self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)
def test_distances(self):
"""Test that distances between one word and multiple other words have expected values."""
distances = self.vectors.distances('dog.n.01', ['mammal.n.01', 'dog.n.01'])
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
distances = self.vectors.distances('dog.n.01')
self.assertEqual(len(distances), len(self.vectors))
self.assertTrue(np.allclose(distances[-1], 10.04756))
def test_distances_with_vector_input(self):
"""Test that distances between input vector and a list of words have expected values."""
input_vector = self.vectors['dog.n.01']
distances = self.vectors.distances(input_vector, ['mammal.n.01', 'dog.n.01'])
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
distances = self.vectors.distances(input_vector)
self.assertEqual(len(distances), len(self.vectors))
self.assertTrue(np.allclose(distances[-1], 10.04756))
def test_poincare_distances_batch(self):
"""Test that poincare_distance_batch returns correct distances."""
vector_1 = self.vectors['dog.n.01']
vectors_2 = self.vectors[['mammal.n.01', 'dog.n.01']]
distances = self.vectors.vector_distance_batch(vector_1, vectors_2)
self.assertTrue(np.allclose(distances, [4.5278745, 0]))
def test_poincare_distance(self):
"""Test that poincare_distance returns correct distance between two input vectors."""
vector_1 = self.vectors['dog.n.01']
vector_2 = self.vectors['mammal.n.01']
distance = self.vectors.vector_distance(vector_1, vector_2)
self.assertTrue(np.allclose(distance, 4.5278745))
distance = self.vectors.vector_distance(vector_1, vector_1)
self.assertTrue(np.allclose(distance, 0))
def test_closest_child(self):
"""Test closest_child returns expected value and returns None for lowest node in hierarchy."""
self.assertEqual(self.vectors.closest_child('dog.n.01'), 'terrier.n.01')
self.assertEqual(self.vectors.closest_child('harbor_porpoise.n.01'), None)
def test_closest_parent(self):
"""Test closest_parent returns expected value and returns None for highest node in hierarchy."""
self.assertEqual(self.vectors.closest_parent('dog.n.01'), 'canine.n.02')
self.assertEqual(self.vectors.closest_parent('mammal.n.01'), None)
def test_ancestors(self):
"""Test ancestors returns expected list and returns empty list for highest node in hierarchy."""
expected = ['canine.n.02', 'carnivore.n.01', 'placental.n.01', 'mammal.n.01']
self.assertEqual(self.vectors.ancestors('dog.n.01'), expected)
expected = []
self.assertEqual(self.vectors.ancestors('mammal.n.01'), expected)
def test_descendants(self):
"""Test descendants returns expected list and returns empty list for lowest node in hierarchy."""
expected = [
'terrier.n.01', 'sporting_dog.n.01', 'spaniel.n.01', 'water_spaniel.n.01', 'irish_water_spaniel.n.01'
]
self.assertEqual(self.vectors.descendants('dog.n.01'), expected)
self.assertEqual(self.vectors.descendants('dog.n.01', max_depth=3), expected[:3])
def test_similarity(self):
"""Test similarity returns expected value for two nodes, and for identical nodes."""
self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))
self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))
def norm(self):
"""Test norm returns expected value."""
self.assertTrue(np.allclose(self.vectors.norm('dog.n.01'), 0.97757602))
self.assertTrue(np.allclose(self.vectors.norm('mammal.n.01'), 0.03914723))
def test_difference_in_hierarchy(self):
"""Test difference_in_hierarchy returns expected value for two nodes, and for identical nodes."""
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'dog.n.01'), 0))
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('mammal.n.01', 'dog.n.01'), 0.9384287))
self.assertTrue(np.allclose(self.vectors.difference_in_hierarchy('dog.n.01', 'mammal.n.01'), -0.9384287))
def test_closer_than(self):
"""Test closer_than returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.closer_than('dog.n.01', 'dog.n.01'), [])
expected = set(['canine.n.02', 'hunting_dog.n.01'])
self.assertEqual(set(self.vectors.closer_than('dog.n.01', 'carnivore.n.01')), expected)
def test_rank(self):
"""Test rank returns expected value for distinct and identical nodes."""
self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)
self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| TestPoincareKeyedVectors |
python | kamyu104__LeetCode-Solutions | Python/sum-of-values-at-indices-with-k-set-bits.py | {
"start": 807,
"end": 1119
} | class ____(object):
def sumIndicesWithKSetBits(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def popcount(x):
return bin(x)[1:].count('1')
return sum(x for i, x in enumerate(nums) if popcount(i) == k)
| Solution2 |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 57925,
"end": 58421
} | class ____(PrefectFilterBaseModel):
"""Filter by BlockSchema.id"""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of IDs to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.BlockSchema.id.in_(self.any_))
return filters
| BlockSchemaFilterId |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_requests/request_authenticators/api_token_authenticator.py | {
"start": 116,
"end": 516
} | class ____(Authenticator):
def __init__(self, email: str, password: str) -> None:
super().__init__()
self._email = f"{email}/token"
self._password = password
@property
def client_access_token(self) -> str:
api_token = base64.b64encode(f"{self._email}:{self._password}".encode("utf-8"))
return f"Basic {api_token.decode('utf-8')}"
| ApiTokenAuthenticator |
python | allegroai__clearml | clearml/backend_api/services/v2_13/auth.py | {
"start": 10480,
"end": 13611
} | class ____(Response):
"""
Response of auth.get_credentials endpoint.
:param credentials: List of credentials for the user own company, each with an
empty secret field.
:type credentials: Sequence[CredentialKey]
:param additional_credentials: The user credentials for the user tenant
companies, each with an empty secret field.
:type additional_credentials: dict
"""
_service = "auth"
_action = "get_credentials"
_version = "2.13"
_schema = {
"definitions": {
"credential_key": {
"properties": {
"access_key": {"description": "", "type": ["string", "null"]},
"last_used": {
"description": "",
"format": "date-time",
"type": ["string", "null"],
},
"last_used_from": {"description": "", "type": ["string", "null"]},
},
"type": "object",
}
},
"properties": {
"additional_credentials": {
"additionalProperties": True,
"description": "The user credentials for the user tenant companies, each with an empty secret field.",
"type": ["object", "null"],
},
"credentials": {
"description": "List of credentials for the user own company, each with an empty secret field.",
"items": {"$ref": "#/definitions/credential_key"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, credentials: Optional[List[Any]] = None, additional_credentials: Optional[dict] = None, **kwargs: Any
) -> None:
super(GetCredentialsResponse, self).__init__(**kwargs)
self.credentials = credentials
self.additional_credentials = additional_credentials
@schema_property("credentials")
def credentials(self) -> Optional[List[Any]]:
return self._property_credentials
@credentials.setter
def credentials(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_credentials = None
return
self.assert_isinstance(value, "credentials", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [CredentialKey.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "credentials", CredentialKey, is_array=True)
self._property_credentials = value
@schema_property("additional_credentials")
def additional_credentials(self) -> Optional[dict]:
return self._property_additional_credentials
@additional_credentials.setter
def additional_credentials(self, value: Optional[dict]) -> None:
if value is None:
self._property_additional_credentials = None
return
self.assert_isinstance(value, "additional_credentials", (dict,))
self._property_additional_credentials = value
| GetCredentialsResponse |
python | scipy__scipy | scipy/optimize/tests/test_constraints.py | {
"start": 8016,
"end": 9407
} | class ____:
def test_defaults(self):
A = np.eye(4)
lc = LinearConstraint(A)
lc2 = LinearConstraint(A, -np.inf, np.inf)
assert_array_equal(lc.lb, lc2.lb)
assert_array_equal(lc.ub, lc2.ub)
def test_input_validation(self):
A = np.eye(4)
message = "`lb`, `ub`, and `keep_feasible` must be broadcastable"
with pytest.raises(ValueError, match=message):
LinearConstraint(A, [1, 2], [1, 2, 3])
message = "Constraint limits must be dense arrays"
with pytest.raises(ValueError, match=message):
LinearConstraint(A, sps.coo_array([1, 2]), [2, 3])
with pytest.raises(ValueError, match=message):
LinearConstraint(A, [1, 2], sps.coo_array([2, 3]))
message = "`keep_feasible` must be a dense array"
with pytest.raises(ValueError, match=message):
keep_feasible = sps.coo_array([True, True])
LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible)
A = np.empty((4, 3, 5))
message = "`A` must have exactly two dimensions."
with pytest.raises(ValueError, match=message):
LinearConstraint(A)
def test_residual(self):
A = np.eye(2)
lc = LinearConstraint(A, -2, 4)
x0 = [-1, 2]
np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2]))
| TestLinearConstraint |
python | joke2k__faker | tests/providers/test_lorem.py | {
"start": 22020,
"end": 24847
} | class ____:
"""Test ```de_AT``` lorem provider"""
word_list = [word.lower() for word in DeAtLoremProvider.word_list]
def test_paragraph(self, faker, num_samples):
num_sentences = 10
for _ in range(num_samples):
paragraph = faker.paragraph(nb_sentences=num_sentences)
assert isinstance(paragraph, str)
words = paragraph.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_paragraphs(self, faker, num_samples):
num_paragraphs = 5
for _ in range(num_samples):
paragraphs = faker.paragraphs(nb=num_paragraphs)
for paragraph in paragraphs:
assert isinstance(paragraph, str)
words = paragraph.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_sentence(self, faker, num_samples):
num_words = 10
for _ in range(num_samples):
sentence = faker.sentence(nb_words=num_words)
assert isinstance(sentence, str)
words = sentence.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_sentences(self, faker, num_samples):
num_sentences = 5
for _ in range(num_samples):
sentences = faker.sentences(nb=num_sentences)
for sentence in sentences:
assert isinstance(sentence, str)
words = sentence.replace(".", "").split()
assert all(word.lower() in self.word_list for word in words)
def test_text(self, faker, num_samples):
num_chars = 25
for _ in range(num_samples):
text = faker.text(max_nb_chars=num_chars)
assert isinstance(text, str)
words = re.sub(r"[.\n]+", " ", text).split()
assert all(word.lower() in self.word_list for word in words)
def test_texts(self, faker, num_samples):
num_texts = 5
num_chars = 25
for _ in range(num_samples):
texts = faker.texts(max_nb_chars=num_chars, nb_texts=num_texts)
for text in texts:
assert isinstance(text, str)
words = re.sub(r"[.\n]+", " ", text).split()
assert all(word.lower() in self.word_list for word in words)
def test_word(self, faker, num_samples):
for _ in range(num_samples):
word = faker.word()
assert isinstance(word, str) and word in DeAtLoremProvider.word_list
def test_words(self, faker, num_samples):
num_words = 5
for _ in range(num_samples):
words = faker.words(num_words)
assert all(isinstance(word, str) and word in DeAtLoremProvider.word_list for word in words)
| TestDeAt |
python | scikit-learn__scikit-learn | sklearn/ensemble/_hist_gradient_boosting/predictor.py | {
"start": 451,
"end": 5122
} | class ____:
"""Tree class used for predictions.
Parameters
----------
nodes : ndarray of PREDICTOR_RECORD_DTYPE
The nodes of the tree.
binned_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
Array of bitsets for binned categories used in predict_binned when a
split is categorical.
raw_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
Array of bitsets for raw categories used in predict when a split is
categorical.
"""
def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets):
self.nodes = nodes
self.binned_left_cat_bitsets = binned_left_cat_bitsets
self.raw_left_cat_bitsets = raw_left_cat_bitsets
def get_n_leaf_nodes(self):
"""Return number of leaves."""
return int(self.nodes["is_leaf"].sum())
def get_max_depth(self):
"""Return maximum depth among all leaves."""
return int(self.nodes["depth"].max())
def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):
"""Predict raw values for non-binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
Array of bitsets of known categories, for each categorical feature.
f_idx_map : ndarray of shape (n_features,)
Map from original feature index to the corresponding index in the
known_cat_bitsets array.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
"""
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_raw_data(
self.nodes,
X,
self.raw_left_cat_bitsets,
known_cat_bitsets,
f_idx_map,
n_threads,
out,
)
return out
def predict_binned(self, X, missing_values_bin_idx, n_threads):
"""Predict raw values for binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
missing_values_bin_idx : uint8
Index of the bin that is used for missing values. This is the
index of the last bin and is always equal to max_bins (as passed
to the GBDT classes), or equivalently to n_bins - 1.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
"""
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_binned_data(
self.nodes,
X,
self.binned_left_cat_bitsets,
missing_values_bin_idx,
n_threads,
out,
)
return out
def compute_partial_dependence(self, grid, target_features, out):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray, shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray, shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
out : ndarray, shape (n_samples)
The value of the partial dependence function on each grid
point.
"""
_compute_partial_dependence(self.nodes, grid, target_features, out)
def __setstate__(self, state):
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
# The dtype of feature_idx is np.intp which is platform dependent. Here, we
# make sure that saving and loading on different bitness systems works without
# errors. For instance, on a 64 bit Python runtime, np.intp = np.int64,
# while on 32 bit np.intp = np.int32.
#
# TODO: consider always using platform agnostic dtypes for fitted
# estimator attributes. For this particular estimator, this would
# mean replacing the intp field of PREDICTOR_RECORD_DTYPE by an int32
# field. Ideally this should be done consistently throughout
# scikit-learn along with a common test.
if self.nodes.dtype != PREDICTOR_RECORD_DTYPE:
self.nodes = self.nodes.astype(PREDICTOR_RECORD_DTYPE, casting="same_kind")
| TreePredictor |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_celery_kubernetes_executor.py | {
"start": 900,
"end": 2078
} | class ____:
"""Tests celery kubernetes executor."""
def test_should_create_a_worker_deployment_with_the_celery_executor(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"dags": {"persistence": {"enabled": True}, "gitSync": {"enabled": True}},
},
show_only=["templates/workers/worker-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.volumes[0].name", docs[0]) == "config"
assert jmespath.search("spec.template.spec.volumes[1].name", docs[0]) == "dags"
def test_should_create_a_worker_deployment_with_the_celery_kubernetes_executor(self):
docs = render_chart(
values={
"executor": "CeleryKubernetesExecutor",
"dags": {"gitSync": {"enabled": True}, "persistence": {"enabled": False}},
},
show_only=["templates/workers/worker-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.volumes[0].name", docs[0]) == "config"
assert jmespath.search("spec.template.spec.volumes[1].name", docs[0]) == "dags"
| TestCeleryKubernetesExecutor |
python | Lightning-AI__lightning | src/lightning/pytorch/utilities/types.py | {
"start": 4018,
"end": 4143
} | class ____(Protocol):
def __len__(self) -> int:
pass
def __iter__(self) -> Iterator:
pass
| _SizedIterable |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external_data.py | {
"start": 7265,
"end": 7447
} | class ____(NamedTuple):
type: NestedResourceType
name: str
@whitelist_for_serdes(storage_name="ExternalJobRef", old_fields={"is_legacy_pipeline": False})
@record
| NestedResource |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/agent_processor.py | {
"start": 17846,
"end": 20474
} | class ____(AgentProcessor):
"""
An AgentManager is an AgentProcessor that also holds a single trajectory and policy queue.
Note: this leaves room for adding AgentProcessors that publish multiple trajectory queues.
"""
def __init__(
self,
policy: Policy,
behavior_id: str,
stats_reporter: StatsReporter,
max_trajectory_length: int = sys.maxsize,
threaded: bool = True,
):
super().__init__(policy, behavior_id, stats_reporter, max_trajectory_length)
trajectory_queue_len = 20 if threaded else 0
self.trajectory_queue: AgentManagerQueue[Trajectory] = AgentManagerQueue(
self._behavior_id, maxlen=trajectory_queue_len
)
# NOTE: we make policy queues of infinite length to avoid lockups of the trainers.
# In the environment manager, we make sure to empty the policy queue before continuing to produce steps.
self.policy_queue: AgentManagerQueue[Policy] = AgentManagerQueue(
self._behavior_id, maxlen=0
)
self.publish_trajectory_queue(self.trajectory_queue)
def record_environment_stats(
self, env_stats: EnvironmentStats, worker_id: int
) -> None:
"""
Pass stats from the environment to the StatsReporter.
Depending on the StatsAggregationMethod, either StatsReporter.add_stat or StatsReporter.set_stat is used.
The worker_id is used to determine whether StatsReporter.set_stat should be used.
:param env_stats:
:param worker_id:
:return:
"""
for stat_name, value_list in env_stats.items():
for val, agg_type in value_list:
if agg_type == StatsAggregationMethod.AVERAGE:
self._stats_reporter.add_stat(stat_name, val, agg_type)
elif agg_type == StatsAggregationMethod.SUM:
self._stats_reporter.add_stat(stat_name, val, agg_type)
elif agg_type == StatsAggregationMethod.HISTOGRAM:
self._stats_reporter.add_stat(stat_name, val, agg_type)
elif agg_type == StatsAggregationMethod.MOST_RECENT:
# In order to prevent conflicts between multiple environments,
# only stats from the first environment are recorded.
if worker_id == 0:
self._stats_reporter.set_stat(stat_name, val)
else:
raise UnityTrainerException(
f"Unknown StatsAggregationMethod encountered. {agg_type}"
)
| AgentManager |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/exception.py | {
"start": 473,
"end": 591
} | class ____(TrainerError):
"""
Any error related to training with a curriculum.
"""
pass
| CurriculumError |
python | getsentry__responses | responses/__init__.py | {
"start": 17166,
"end": 19776
} | class ____(BaseResponse):
def __init__(
self,
method: str,
url: "_URLPatternType",
body: "_Body" = "",
json: Optional[Any] = None,
status: int = 200,
headers: Optional[Mapping[str, str]] = None,
stream: Optional[bool] = None,
content_type: Union[str, object] = _UNSET,
auto_calculate_content_length: bool = False,
**kwargs: Any,
) -> None:
super().__init__(method, url, **kwargs)
# if we were passed a `json` argument,
# override the body and content_type
if json is not None:
assert not body
body = json_module.dumps(json)
if content_type is _UNSET:
content_type = "application/json"
if content_type is _UNSET:
if isinstance(body, str) and _has_unicode(body):
content_type = "text/plain; charset=utf-8"
else:
content_type = "text/plain"
self.body: "_Body" = body
self.status: int = status
self.headers: Optional[Mapping[str, str]] = headers
if stream is not None:
warn(
"stream argument is deprecated. Use stream parameter in request directly",
DeprecationWarning,
)
self.stream: Optional[bool] = stream
self.content_type: str = content_type # type: ignore[assignment]
self.auto_calculate_content_length: bool = auto_calculate_content_length
def get_response(self, request: "PreparedRequest") -> HTTPResponse:
if self.body and isinstance(self.body, Exception):
setattr(self.body, "request", request)
raise self.body
headers = self.get_headers()
status = self.status
assert not isinstance(self.body, (Response, BaseException))
body = _handle_body(self.body)
if (
self.auto_calculate_content_length
and isinstance(body, BytesIO)
and "Content-Length" not in headers
):
content_length = len(body.getvalue())
headers["Content-Length"] = str(content_length)
return _form_response(body, headers, status, request.method)
def __repr__(self) -> str:
return (
"<Response(url='{url}' status={status} "
"content_type='{content_type}' headers='{headers}')>".format(
url=self.url,
status=self.status,
content_type=self.content_type,
headers=json_module.dumps(self.headers),
)
)
| Response |
python | realpython__materials | mandelbrot-set-python/viewport.py | {
"start": 85,
"end": 596
} | class ____:
image: Image.Image
center: complex
width: float
@property
def height(self):
return self.scale * self.image.height
@property
def offset(self):
return self.center + complex(-self.width, self.height) / 2
@property
def scale(self):
return self.width / self.image.width
def __iter__(self):
for y in range(self.image.height):
for x in range(self.image.width):
yield Pixel(self, x, y)
@dataclass
| Viewport |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/entities.py | {
"start": 424,
"end": 1139
} | class ____:
def __init__(self, **kw):
for key, value in kw.items():
setattr(self, key, value)
def __repr__(self):
if id(self) in _repr_stack:
return object.__repr__(self)
_repr_stack.add(id(self))
try:
return "%s(%s)" % (
(self.__class__.__name__),
", ".join(
[
"%s=%r" % (key, getattr(self, key))
for key in sorted(self.__dict__.keys())
if not key.startswith("_")
]
),
)
finally:
_repr_stack.remove(id(self))
_recursion_stack = set()
| BasicEntity |
python | google__jax | jax/_src/export/serialization_generated.py | {
"start": 8807,
"end": 11006
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Sharding()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSharding(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Sharding
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Sharding
def Kind(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# Sharding
def HloShardingProto(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# Sharding
def HloShardingProtoAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int8Flags, o)
return 0
# Sharding
def HloShardingProtoLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Sharding
def HloShardingProtoIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
def ShardingStart(builder):
builder.StartObject(2)
def ShardingAddKind(builder, kind):
builder.PrependInt8Slot(0, kind, 0)
def ShardingAddHloShardingProto(builder, hloShardingProto):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(hloShardingProto), 0)
def ShardingStartHloShardingProtoVector(builder, numElems):
return builder.StartVector(1, numElems, 1)
def ShardingEnd(builder):
return builder.EndObject()
| Sharding |
python | lepture__authlib | authlib/oauth2/rfc6749/errors.py | {
"start": 2238,
"end": 3478
} | class ____(OAuth2Error):
"""Client authentication failed (e.g., unknown client, no
client authentication included, or unsupported
authentication method). The authorization server MAY
return an HTTP 401 (Unauthorized) status code to indicate
which HTTP authentication schemes are supported. If the
client attempted to authenticate via the "Authorization"
request header field, the authorization server MUST
respond with an HTTP 401 (Unauthorized) status code and
include the "WWW-Authenticate" response header field
matching the authentication scheme used by the client.
https://tools.ietf.org/html/rfc6749#section-5.2
"""
error = "invalid_client"
status_code = 400
def get_headers(self):
headers = super().get_headers()
if self.status_code == 401:
error_description = self.get_error_description()
# safe escape
error_description = error_description.replace('"', "|")
extras = [
f'error="{self.error}"',
f'error_description="{error_description}"',
]
headers.append(("WWW-Authenticate", "Basic " + ", ".join(extras)))
return headers
| InvalidClientError |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/llm_math/base.py | {
"start": 955,
"end": 11394
} | class ____(Chain):
"""Chain that interprets a prompt and executes python code to do math.
!!! note
This class is deprecated. See below for a replacement implementation using
LangGraph. The benefits of this implementation are:
- Uses LLM tool calling features;
- Support for both token-by-token and step-by-step streaming;
- Support for checkpointing and memory of chat history;
- Easier to modify or extend
(e.g., with additional tools, structured responses, etc.)
Install LangGraph with:
```bash
pip install -U langgraph
```
```python
import math
from typing import Annotated, Sequence
from langchain_core.messages import BaseMessage
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt.tool_node import ToolNode
import numexpr
from typing_extensions import TypedDict
@tool
def calculator(expression: str) -> str:
\"\"\"Calculate expression using Python's numexpr library.
Expression should be a single line mathematical expression
that solves the problem.
```
Examples:
"37593 * 67" for "37593 times 67"
"37593**(1/5)" for "37593^(1/5)"
\"\"\"
local_dict = {"pi": math.pi, "e": math.e}
return str(
numexpr.evaluate(
expression.strip(),
global_dict={}, # restrict access to globals
local_dict=local_dict, # add common mathematical functions
)
)
model = ChatOpenAI(model="gpt-4o-mini", temperature=0)
tools = [calculator]
model_with_tools = model.bind_tools(tools, tool_choice="any")
class ChainState(TypedDict):
\"\"\"LangGraph state.\"\"\"
messages: Annotated[Sequence[BaseMessage], add_messages]
async def acall_chain(state: ChainState, config: RunnableConfig):
last_message = state["messages"][-1]
response = await model_with_tools.ainvoke(state["messages"], config)
return {"messages": [response]}
async def acall_model(state: ChainState, config: RunnableConfig):
response = await model.ainvoke(state["messages"], config)
return {"messages": [response]}
graph_builder = StateGraph(ChainState)
graph_builder.add_node("call_tool", acall_chain)
graph_builder.add_node("execute_tool", ToolNode(tools))
graph_builder.add_node("call_model", acall_model)
graph_builder.set_entry_point("call_tool")
graph_builder.add_edge("call_tool", "execute_tool")
graph_builder.add_edge("execute_tool", "call_model")
graph_builder.add_edge("call_model", END)
chain = graph_builder.compile()
```python
example_query = "What is 551368 divided by 82"
events = chain.astream(
{"messages": [("user", example_query)]},
stream_mode="values",
)
async for event in events:
event["messages"][-1].pretty_print()
```
```txt
================================ Human Message =================================
What is 551368 divided by 82
================================== Ai Message ==================================
Tool Calls:
calculator (call_MEiGXuJjJ7wGU4aOT86QuGJS)
Call ID: call_MEiGXuJjJ7wGU4aOT86QuGJS
Args:
expression: 551368 / 82
================================= Tool Message =================================
Name: calculator
6724.0
================================== Ai Message ==================================
551368 divided by 82 equals 6724.
```
Example:
```python
from langchain_classic.chains import LLMMathChain
from langchain_openai import OpenAI
llm_math = LLMMathChain.from_llm(OpenAI())
```
"""
llm_chain: LLMChain
llm: BaseLanguageModel | None = None
"""[Deprecated] LLM wrapper to use."""
prompt: BasePromptTemplate = PROMPT
"""[Deprecated] Prompt to use to translate to python if necessary."""
input_key: str = "question"
output_key: str = "answer"
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def _raise_deprecation(cls, values: dict) -> Any:
try:
import numexpr # noqa: F401
except ImportError as e:
msg = (
"LLMMathChain requires the numexpr package. "
"Please install it with `pip install numexpr`."
)
raise ImportError(msg) from e
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMMathChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method.",
stacklevel=5,
)
if "llm_chain" not in values and values["llm"] is not None:
prompt = values.get("prompt", PROMPT)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key."""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Expect output key."""
return [self.output_key]
def _evaluate_expression(self, expression: str) -> str:
import numexpr
try:
local_dict = {"pi": math.pi, "e": math.e}
output = str(
numexpr.evaluate(
expression.strip(),
global_dict={}, # restrict access to globals
local_dict=local_dict, # add common mathematical functions
),
)
except Exception as e:
msg = (
f'LLMMathChain._evaluate("{expression}") raised error: {e}.'
" Please try again with a valid numerical expression"
)
raise ValueError(msg) from e
# Remove any leading and trailing brackets from the output
return re.sub(r"^\[|\]$", "", output)
def _process_llm_result(
self,
llm_output: str,
run_manager: CallbackManagerForChainRun,
) -> dict[str, str]:
run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
run_manager.on_text("\nAnswer: ", verbose=self.verbose)
run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
msg = f"unknown format from LLM: {llm_output}"
raise ValueError(msg)
return {self.output_key: answer}
async def _aprocess_llm_result(
self,
llm_output: str,
run_manager: AsyncCallbackManagerForChainRun,
) -> dict[str, str]:
await run_manager.on_text(llm_output, color="green", verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
await run_manager.on_text("\nAnswer: ", verbose=self.verbose)
await run_manager.on_text(output, color="yellow", verbose=self.verbose)
answer = "Answer: " + output
elif llm_output.startswith("Answer:"):
answer = llm_output
elif "Answer:" in llm_output:
answer = "Answer: " + llm_output.split("Answer:")[-1]
else:
msg = f"unknown format from LLM: {llm_output}"
raise ValueError(msg)
return {self.output_key: answer}
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_run_manager.on_text(inputs[self.input_key])
llm_output = self.llm_chain.predict(
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return self._process_llm_result(llm_output, _run_manager)
async def _acall(
self,
inputs: dict[str, str],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
await _run_manager.on_text(inputs[self.input_key])
llm_output = await self.llm_chain.apredict(
question=inputs[self.input_key],
stop=["```output"],
callbacks=_run_manager.get_child(),
)
return await self._aprocess_llm_result(llm_output, _run_manager)
@property
def _chain_type(self) -> str:
return "llm_math_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: BasePromptTemplate = PROMPT,
**kwargs: Any,
) -> LLMMathChain:
"""Create a LLMMathChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
"""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs)
| LLMMathChain |
python | matplotlib__matplotlib | lib/matplotlib/backends/_backend_gtk.py | {
"start": 10917,
"end": 11076
} | class ____(backend_tools.ConfigureSubplotsBase):
def trigger(self, *args):
_NavigationToolbar2GTK.configure_subplots(self, None)
| ConfigureSubplotsGTK |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 175462,
"end": 177696
} | class ____(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
| InterruptedSendTimeoutTest |
python | weaviate__weaviate-python-client | weaviate/collections/classes/aggregate.py | {
"start": 5422,
"end": 5964
} | class ____(_MetricsNum):
def to_grpc(self) -> aggregate_pb2.AggregateRequest.Aggregation:
return aggregate_pb2.AggregateRequest.Aggregation(
property=self.property_name,
number=aggregate_pb2.AggregateRequest.Aggregation.Number(
count=self.count,
maximum=self.maximum,
mean=self.mean,
median=self.median,
minimum=self.minimum,
mode=self.mode,
sum=self.sum_,
),
)
| _MetricsNumber |
python | PyCQA__pylint | doc/data/messages/s/signature-differs/bad.py | {
"start": 84,
"end": 241
} | class ____(Animal):
def run(self, distance): # [signature-differs]
super(Animal, self).run(distance)
print("Fetched that stick, wuff !")
| Dog |
python | scipy__scipy | scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py | {
"start": 8616,
"end": 8756
} | class ____(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return f"<{self.name}>"
| DictWithRepr |
python | spyder-ide__spyder | spyder/plugins/editor/panels/indentationguides.py | {
"start": 371,
"end": 5352
} | class ____(Panel):
"""Indentation guides to easy identify nested blocks."""
# --- Qt Overrides
# -----------------------------------------------------------------
def __init__(self):
"""Initialize IndentationGuide panel.
i_width(int): identation width in characters.
"""
Panel.__init__(self)
self.color = Qt.darkGray
self.i_width = 4
self.bar_offset = 0
def on_install(self, editor):
"""Manages install setup of the pane."""
super().on_install(editor)
horizontal_scrollbar = editor.horizontalScrollBar()
horizontal_scrollbar.valueChanged.connect(self.update_bar_position)
horizontal_scrollbar.sliderReleased.connect(self.update)
def update_bar_position(self, value):
self.bar_offset = value
def sizeHint(self):
"""Override Qt method."""
return self.size()
def paintEvent(self, event):
"""
Overriden Qt method.
Paint indent guides.
"""
# Set painter
painter = QPainter(self)
color = QColor(self.color)
color.setAlphaF(.5)
painter.setPen(color)
# Compute offset
offset = (self.editor.document().documentMargin() +
self.editor.contentOffset().x())
# Folding info
folding_panel = self.editor.panels.get('FoldingPanel')
folding_regions = folding_panel.folding_regions
leading_whitespaces = self.editor.leading_whitespaces
# Visible block numbers
visible_blocks = self.editor.get_visible_block_numbers()
# Paint lines
for start_line in folding_regions:
end_line = folding_regions[start_line]
line_numbers = (start_line, end_line)
if self.do_paint(visible_blocks, line_numbers):
start_block = self.editor.document().findBlockByNumber(
start_line)
end_block = self.editor.document().findBlockByNumber(
end_line - 1)
content_offset = self.editor.contentOffset()
top = int(self.editor.blockBoundingGeometry(
start_block).translated(content_offset).top())
bottom = int(self.editor.blockBoundingGeometry(
end_block).translated(content_offset).bottom())
total_whitespace = leading_whitespaces.get(
max(start_line - 1, 0))
end_whitespace = leading_whitespaces.get(end_line - 1)
if end_whitespace and end_whitespace != total_whitespace:
font_metrics = self.editor.fontMetrics()
x = int(font_metrics.width(total_whitespace * '9') +
self.bar_offset + offset)
painter.drawLine(x, top, x, bottom)
# --- Other methods
# -----------------------------------------------------------------
def set_enabled(self, state):
"""Toggle edge line visibility."""
self._enabled = state
self.setVisible(state)
# We need to request folding when toggling state so the lines
# are computed when handling the folding response.
self.editor.request_folding()
def update_color(self):
"""Set color using syntax highlighter color for comments."""
self.color = self.editor.highlighter.get_color_name('comment')
def set_indentation_width(self, indentation_width):
"""Set indentation width to be used to draw indent guides."""
self.i_width = indentation_width
def do_paint(self, visible_blocks, line_numbers):
"""
Decide if we need to paint an indent guide according to the
visible region.
"""
# Line numbers for the visible region.
first_visible_line = visible_blocks[0] + 1
last_visible_line = visible_blocks[1] + 1
# Line numbers for the indent guide.
start_line = line_numbers[0]
end_line = line_numbers[1]
# Guide starts before the visible region and ends inside it.
if (start_line < first_visible_line and
(first_visible_line <= end_line <= last_visible_line)):
return True
# Guide starts before the visible region and ends after it.
if start_line <= first_visible_line and end_line >= last_visible_line:
return True
# Guide starts inside the visible region and ends after it.
if ((first_visible_line <= start_line <= last_visible_line) and
end_line > last_visible_line):
return True
# Guide starts and ends inside the visible region.
if ((first_visible_line <= start_line <= last_visible_line) and
(first_visible_line <= end_line <= last_visible_line)):
return True
# If none of those cases are true, we don't need to paint this guide.
return False
| IndentationGuide |
python | google__jax | build/tools/command.py | {
"start": 880,
"end": 1237
} | class ____:
def __init__(self, base_command: str):
self.command = [base_command]
def append(self, parameter: str):
self.command.append(parameter)
return self
def get_command_as_string(self) -> str:
return " ".join(self.command)
def get_command_as_list(self) -> list[str]:
return self.command
@dataclasses.dataclass
| CommandBuilder |
python | django__django | tests/generic_views/views.py | {
"start": 7769,
"end": 7866
} | class ____(generic.ArchiveIndexView):
queryset = Book.objects.all()
| BookArchiveWithoutDateField |
python | walkccc__LeetCode | solutions/2007. Find Original Array From Doubled Array/2007.py | {
"start": 0,
"end": 293
} | class ____:
def findOriginalArray(self, changed: list[int]) -> list[int]:
ans = []
q = collections.deque()
for num in sorted(changed):
if q and num == q[0]:
q.popleft()
else:
q.append(num * 2)
ans.append(num)
return [] if q else ans
| Solution |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_privacy_urls.py | {
"start": 19708,
"end": 20271
} | class ____(PrivateUserProfileMixin, TestCase):
def setUp(self):
super().setUp()
self.response_data.update(
{
"/accounts/login/": {"status_code": 302},
# The test user doesn't have a GitHub account, so it's redirected to the home page.
"/accounts/migrate-to-github-app/": {"status_code": 302},
}
)
def login(self):
return self.client.login(username="owner", password="test")
def is_admin(self):
return True
| PrivateUserProfileAdminAccessTest |
python | doocs__leetcode | solution/3300-3399/3345.Smallest Divisible Digit Product I/Solution.py | {
"start": 0,
"end": 258
} | class ____:
def smallestNumber(self, n: int, t: int) -> int:
for i in count(n):
p = 1
x = i
while x:
p *= x % 10
x //= 10
if p % t == 0:
return i
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/agent.py | {
"start": 773,
"end": 1049
} | class ____(BaseEvent):
"""
AgentRunStepEndEvent.
Args:
step_output (Any): Task step output.
"""
step_output: Any
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "AgentRunStepEndEvent"
| AgentRunStepEndEvent |
python | spack__spack | lib/spack/spack/cmd/common/env_utility.py | {
"start": 1362,
"end": 5099
} | class ____:
def __init__(self, context: Context = Context.BUILD):
if context == Context.BUILD:
# TODO: run deps shouldn't be required for build env.
self.direct_deps = dt.BUILD | dt.LINK | dt.RUN
elif context == Context.TEST:
self.direct_deps = dt.BUILD | dt.TEST | dt.LINK | dt.RUN
else:
raise ValueError("context can only be Context.BUILD or Context.TEST")
self.has_uninstalled_deps = False
def accept(self, item):
# The root may be installed or uninstalled.
if item.depth == 0:
return True
# Early exit after we've seen an uninstalled dep.
if self.has_uninstalled_deps:
return False
spec = item.edge.spec
if not spec.external and not spec.installed:
self.has_uninstalled_deps = True
return False
return True
def neighbors(self, item):
# Direct deps: follow build & test edges.
# Transitive deps: follow link / run.
depflag = self.direct_deps if item.depth == 0 else dt.LINK | dt.RUN
return item.edge.spec.edges_to_dependencies(depflag=depflag)
def emulate_env_utility(cmd_name, context: Context, args):
if not args.spec:
tty.die("spack %s requires a spec." % cmd_name)
# Specs may have spaces in them, so if they do, require that the
# caller put a '--' between the spec and the command to be
# executed. If there is no '--', assume that the spec is the
# first argument.
sep = "--"
if sep in args.spec:
s = args.spec.index(sep)
spec = args.spec[:s]
cmd = args.spec[s + 1 :]
else:
spec = args.spec[0]
cmd = args.spec[1:]
if not spec:
tty.die("spack %s requires a spec." % cmd_name)
specs = spack.cmd.parse_specs(spec, concretize=False)
if len(specs) > 1:
tty.die("spack %s only takes one spec." % cmd_name)
spec = specs[0]
spec = spack.cmd.matching_spec_from_env(spec)
# Require that dependencies are installed.
visitor = AreDepsInstalledVisitor(context=context)
# Mass install check needs read transaction.
with spack.store.STORE.db.read_transaction():
traverse.traverse_breadth_first_with_visitor([spec], traverse.CoverNodesVisitor(visitor))
if visitor.has_uninstalled_deps:
raise spack.error.SpackError(
f"Not all dependencies of {spec.name} are installed. "
f"Cannot setup {context} environment:",
spec.tree(
status_fn=spack.spec.Spec.install_status,
hashlen=7,
hashes=True,
# This shows more than necessary, but we cannot dynamically change deptypes
# in Spec.tree(...).
deptypes="all" if context == Context.BUILD else ("build", "test", "link", "run"),
),
)
build_environment.setup_package(spec.package, args.dirty, context)
if args.dump:
# Dump a source-able environment to a text file.
tty.msg("Dumping a source-able environment to {0}".format(args.dump))
dump_environment(args.dump)
if args.pickle:
# Dump a source-able environment to a pickle file.
tty.msg("Pickling a source-able environment to {0}".format(args.pickle))
pickle_environment(args.pickle)
if cmd:
# Execute the command with the new environment
os.execvp(cmd[0], cmd)
elif not bool(args.pickle or args.dump):
# If no command or dump/pickle option then act like the "env" command
# and print out env vars.
for key, val in os.environ.items():
print("%s=%s" % (key, val))
| AreDepsInstalledVisitor |
python | Pylons__pyramid | tests/pkgs/restbugapp/views.py | {
"start": 40,
"end": 166
} | class ____:
def __init__(self, context, request):
self.context = context
self.request = request
| BaseRESTView |
python | huggingface__transformers | src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py | {
"start": 35799,
"end": 43079
} | class ____(Qwen2_5_VLPreTrainedModel):
config: Qwen2_5_VLTextConfig
input_modalities = ("text",)
def __init__(self, config: Qwen2_5_VLTextConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Qwen2_5_VLDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self._attn_implementation = config._attn_implementation
self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.has_sliding_layers = "sliding_attention" in self.config.layer_types
self.rotary_emb = Qwen2_5_VLRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# torch.jit.trace() doesn't support cache objects in the output
if use_cache and past_key_values is None and not torch.jit.is_tracing():
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
# the hard coded `3` is for temporal, height and width.
if position_ids is None:
position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
elif position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
# NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions
# where each dim indicates visual spatial positions for temporal/height/width grids.
# There are two scenarios when FA2-like packed masking might be activated.
# 1. User specifically passed packed `position_ids` and no attention mask.
# In this case we expect the useer to create correct position ids for all 3 grids
# and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
# 2. User runs forward with no attention mask and no position ids. In this case, position ids
# are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are
# prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass
# text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation`
if position_ids.ndim == 3 and position_ids.shape[0] == 4:
text_position_ids = position_ids[0]
position_ids = position_ids[1:]
else:
# If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
text_position_ids = None
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": text_position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
# The sliding window alternating layers are not always activated depending on the config
if self.has_sliding_layers:
causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
position_ids=text_position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
@auto_docstring
| Qwen2_5_VLTextModel |
python | pytorch__pytorch | torch/_dynamo/external_utils.py | {
"start": 3208,
"end": 4444
} | class ____:
def __init__(
self,
real: torch.autograd.function.BackwardCFunction,
saved_tensors: list[torch.Tensor],
) -> None:
self.real = real
self.saved_tensors = saved_tensors
def __getattr__(self, name: str) -> Any:
if name == "saved_variables":
warnings.warn(
"'saved_variables' is deprecated; use 'saved_tensors'",
DeprecationWarning,
)
return self.saved_tensors
return getattr(self.real, name)
def call_backward(
backward_c_function: torch.autograd.function.BackwardCFunction,
saved_tensors: list[torch.Tensor],
*args: Any,
) -> Union[torch.Tensor, tuple[torch.Tensor, ...]]:
fake = FakeBackwardCFunction(backward_c_function, saved_tensors)
grads = fake._forward_cls.backward(fake, *args) # type: ignore[attr-defined]
if not isinstance(grads, tuple):
grads = (grads,)
return grads
def normalize_as_list(x: Any) -> list[Any]:
if isinstance(x, tuple):
return list(x)
elif isinstance(x, list):
return x
return [x]
def untyped_storage_size(x: torch.Tensor) -> int:
return x.untyped_storage().size()
| FakeBackwardCFunction |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/locators.py | {
"start": 37664,
"end": 41483
} | class ____(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 440.
default_locator = AggregatingLocator(
# JSONLocator(), # don't use as PEP 426 is withdrawn
SimpleScrapingLocator('https://pypi.org/simple/', timeout=3.0),
scheme='legacy')
locate = default_locator.locate
| AggregatingLocator |
python | PrefectHQ__prefect | src/prefect/utilities/schema_tools/hydration.py | {
"start": 3354,
"end": 3559
} | class ____(HydrationError):
@property
def message(self) -> str:
message = "Invalid jinja"
if self.detail:
message += f": {self.detail}"
return message
| InvalidJinja |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/components/dbt_project/scaffolder.py | {
"start": 413,
"end": 562
} | class ____(BaseModel):
init: bool = Field(default=False)
project_path: Optional[str] = None
git_url: Optional[str] = None
| DbtScaffoldParams |
python | huggingface__transformers | tests/models/marian/test_modeling_marian.py | {
"start": 20129,
"end": 20587
} | class ____(MarianIntegrationTest):
src = "fr"
tgt = "en"
src_text = [
"Donnez moi le micro.",
"Tom et Mary étaient assis à une table.", # Accents
]
expected_text = [
"Give me the microphone.",
"Tom and Mary were sitting at a table.",
]
@slow
def test_batch_generation_fr_en(self):
self._assert_generated_batch_equal_expected()
@require_sentencepiece
@require_tokenizers
| TestMarian_FR_EN |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-directory/source_google_directory/api.py | {
"start": 3851,
"end": 4179
} | class ____(StreamAPI):
def process_response(self, response: Dict) -> Iterator[dict]:
return response["users"]
def list(self, fields: Sequence[str] = None) -> Iterator[dict]:
params = {"customer": "my_customer"}
yield from self.read(partial(self._api_get, resource="users"), params=params)
| UsersAPI |
python | pytorch__pytorch | torch/ao/quantization/pt2e/_affine_quantization.py | {
"start": 27460,
"end": 29917
} | class ____(AffineQuantizedObserverBase):
def forward(self, input: torch.Tensor):
if input.numel() == 0:
return input
input_detached = input.detach()
self.original_dtype = input_detached.dtype
if self.granularity is None:
raise AssertionError("granularity is None")
self.block_size = get_block_size(input_detached.shape, self.granularity)
shape_for_reduction, reduction_dims = _get_reduction_params(
self.block_size, input_detached.size()
)
input_detached = input_detached.view(shape_for_reduction)
min_val = torch.amin(input_detached, dim=reduction_dims, keepdim=False)
max_val = torch.amax(input_detached, dim=reduction_dims, keepdim=False)
if not hasattr(self, "min_val") or not hasattr(self, "max_val"):
self.min_val = min_val
self.max_val = max_val
else:
if self.min_val.shape != min_val.shape:
raise AssertionError(
f"Can't update existing min_val - shape mismatch, self.min_val:{self.min_val.shape} != min_val:{min_val.shape}"
)
if self.max_val.shape != max_val.shape:
raise AssertionError(
f"Can't update existing max_val - shape mismatch, self.max_val {self.max_val.shape} != max_val:{max_val.shape}"
)
min_val = torch.min(self.min_val, min_val)
max_val = torch.max(self.max_val, max_val)
self.min_val.copy_(min_val)
self.max_val.copy_(max_val)
# returning original input
return input
def calculate_qparams(self) -> tuple[torch.Tensor, torch.Tensor]:
if not (hasattr(self, "min_val") and hasattr(self, "max_val")):
raise AssertionError(
"Expecting the observer has min_val and max_val, please run the observer before calling calculate_qparams"
)
return choose_qparams_affine_with_min_max(
self.min_val,
self.max_val,
self.mapping_type,
[], # BlockSize is not needed because the min/max are already reduced
self.target_dtype,
self.quant_min,
self.quant_max,
self.eps,
self.scale_dtype,
self.zero_point_dtype,
self.preserve_zero,
self.zero_point_domain,
)
| AffineQuantizedMinMaxObserver |
python | rushter__MLAlgorithms | mla/neuralnet/constraints.py | {
"start": 121,
"end": 412
} | class ____(object):
def __init__(self, m=2, axis=0):
self.axis = axis
self.m = m
def clip(self, p):
norms = np.sqrt(np.sum(p**2, axis=self.axis))
desired = np.clip(norms, 0, self.m)
p = p * (desired / (EPSILON + norms))
return p
| MaxNorm |
python | dagster-io__dagster | python_modules/libraries/dagster-datahub/dagster_datahub/resources.py | {
"start": 511,
"end": 2993
} | class ____(ConfigurableResource):
connection: str = Field(description="Datahub GMS Server")
token: Optional[str] = Field(default=None, description="Personal Access Token")
connect_timeout_sec: Optional[float] = None
read_timeout_sec: Optional[float] = None
retry_status_codes: Optional[list[int]] = None
retry_methods: Optional[list[str]] = None
retry_max_times: Optional[int] = None
extra_headers: Optional[dict[str, str]] = None
ca_certificate_path: Optional[str] = None
server_telemetry_id: Optional[str] = None # No-op - no longer accepted in DatahubRestEmitter
disable_ssl_verification: bool = False
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def get_emitter(self) -> DatahubRestEmitter:
return DatahubRestEmitter(
gms_server=self.connection,
token=self.token,
connect_timeout_sec=self.connect_timeout_sec,
read_timeout_sec=self.read_timeout_sec,
retry_status_codes=self.retry_status_codes,
retry_methods=self.retry_methods,
retry_max_times=self.retry_max_times,
extra_headers=self.extra_headers,
ca_certificate_path=self.ca_certificate_path,
disable_ssl_verification=self.disable_ssl_verification,
)
@dagster_maintained_resource
@resource(config_schema=DatahubRESTEmitterResource.to_config_schema())
def datahub_rest_emitter(init_context: InitResourceContext) -> DatahubRestEmitter:
emitter = DatahubRestEmitter(
gms_server=init_context.resource_config.get("connection"),
token=init_context.resource_config.get("token"),
connect_timeout_sec=init_context.resource_config.get("connect_timeout_sec"),
read_timeout_sec=init_context.resource_config.get("read_timeout_sec"),
retry_status_codes=init_context.resource_config.get("retry_status_codes"),
retry_methods=init_context.resource_config.get("retry_methods"),
retry_max_times=init_context.resource_config.get("retry_max_times"),
extra_headers=init_context.resource_config.get("extra_headers"),
ca_certificate_path=init_context.resource_config.get("ca_certificate_path"),
disable_ssl_verification=init_context.resource_config.get("disable_ssl_verification"),
)
# Attempt to hit the server to ensure the resource is properly configured
emitter.test_connection()
return emitter
| DatahubRESTEmitterResource |
python | optuna__optuna | optuna/visualization/_pareto_front.py | {
"start": 732,
"end": 16130
} | class ____(NamedTuple):
n_targets: int
target_names: list[str]
best_trials_with_values: list[tuple[FrozenTrial, list[float]]]
non_best_trials_with_values: list[tuple[FrozenTrial, list[float]]]
infeasible_trials_with_values: list[tuple[FrozenTrial, list[float]]]
axis_order: list[int]
include_dominated_trials: bool
has_constraints: bool
def plot_pareto_front(
study: Study,
*,
target_names: list[str] | None = None,
include_dominated_trials: bool = True,
axis_order: list[int] | None = None,
constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None,
targets: Callable[[FrozenTrial], Sequence[float]] | None = None,
) -> "go.Figure":
"""Plot the Pareto front of a study.
.. seealso::
Please refer to :ref:`multi_objective` for the tutorial of the Pareto front visualization.
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted for their objective
values. The number of objectives must be either 2 or 3 when ``targets`` is :obj:`None`.
target_names:
Objective name list used as the axis titles. If :obj:`None` is specified,
"Objective {objective_index}" is used instead. If ``targets`` is specified
for a study that does not contain any completed trial,
``target_name`` must be specified.
include_dominated_trials:
A flag to include all dominated trial's objective values.
axis_order:
A list of indices indicating the axis order. If :obj:`None` is specified,
default order is used. ``axis_order`` and ``targets`` cannot be used at the same time.
.. warning::
Deprecated in v3.0.0. This feature will be removed in the future. The removal of
this feature is currently scheduled for v5.0.0, but this schedule is subject to
change. See https://github.com/optuna/optuna/releases/tag/v3.0.0.
constraints_func:
An optional function that computes the objective constraints. It must take a
:class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must
be a sequence of :obj:`float` s. A value strictly larger than 0 means that a
constraint is violated. A value equal to or smaller than 0 is considered feasible.
This specification is the same as in, for example,
:class:`~optuna.samplers.NSGAIISampler`.
If given, trials are classified into three categories: feasible and best, feasible but
non-best, and infeasible. Categories are shown in different colors. Here, whether a
trial is best (on Pareto front) or not is determined ignoring all infeasible trials.
.. warning::
Deprecated in v4.0.0. This feature will be removed in the future. The removal of
this feature is currently scheduled for v6.0.0, but this schedule is subject to
change. See https://github.com/optuna/optuna/releases/tag/v4.0.0.
targets:
A function that returns targets values to display.
The argument to this function is :class:`~optuna.trial.FrozenTrial`.
``axis_order`` and ``targets`` cannot be used at the same time.
If ``study.n_objectives`` is neither 2 nor 3, ``targets`` must be specified.
.. note::
Added in v3.0.0 as an experimental feature. The interface may change in newer
versions without prior notice.
See https://github.com/optuna/optuna/releases/tag/v3.0.0.
Returns:
A :class:`plotly.graph_objects.Figure` object.
"""
_imports.check()
info = _get_pareto_front_info(
study, target_names, include_dominated_trials, axis_order, constraints_func, targets
)
return _get_pareto_front_plot(info)
def _get_pareto_front_plot(info: _ParetoFrontInfo) -> "go.Figure":
include_dominated_trials = info.include_dominated_trials
has_constraints = info.has_constraints
if not has_constraints:
data = [
_make_scatter_object(
info.n_targets,
info.axis_order,
include_dominated_trials,
info.non_best_trials_with_values,
hovertemplate="%{text}<extra>Trial</extra>",
dominated_trials=True,
),
_make_scatter_object(
info.n_targets,
info.axis_order,
include_dominated_trials,
info.best_trials_with_values,
hovertemplate="%{text}<extra>Best Trial</extra>",
dominated_trials=False,
),
]
else:
data = [
_make_scatter_object(
info.n_targets,
info.axis_order,
include_dominated_trials,
info.infeasible_trials_with_values,
hovertemplate="%{text}<extra>Infeasible Trial</extra>",
infeasible=True,
),
_make_scatter_object(
info.n_targets,
info.axis_order,
include_dominated_trials,
info.non_best_trials_with_values,
hovertemplate="%{text}<extra>Feasible Trial</extra>",
dominated_trials=True,
),
_make_scatter_object(
info.n_targets,
info.axis_order,
include_dominated_trials,
info.best_trials_with_values,
hovertemplate="%{text}<extra>Best Trial</extra>",
dominated_trials=False,
),
]
if info.n_targets == 2:
layout = go.Layout(
title="Pareto-front Plot",
xaxis_title=info.target_names[info.axis_order[0]],
yaxis_title=info.target_names[info.axis_order[1]],
)
else:
layout = go.Layout(
title="Pareto-front Plot",
scene={
"xaxis_title": info.target_names[info.axis_order[0]],
"yaxis_title": info.target_names[info.axis_order[1]],
"zaxis_title": info.target_names[info.axis_order[2]],
},
)
return go.Figure(data=data, layout=layout)
def _get_pareto_front_info(
study: Study,
target_names: list[str] | None = None,
include_dominated_trials: bool = True,
axis_order: list[int] | None = None,
constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None,
targets: Callable[[FrozenTrial], Sequence[float]] | None = None,
) -> _ParetoFrontInfo:
if axis_order is not None:
msg = _deprecated._DEPRECATION_WARNING_TEMPLATE.format(
name="`axis_order`", d_ver="3.0.0", r_ver="5.0.0"
)
optuna_warn(msg, FutureWarning)
if constraints_func is not None:
msg = _deprecated._DEPRECATION_WARNING_TEMPLATE.format(
name="`constraints_func`", d_ver="4.0.0", r_ver="6.0.0"
)
optuna_warn(msg, FutureWarning)
if targets is not None and axis_order is not None:
raise ValueError(
"Using both `targets` and `axis_order` is not supported. "
"Use either `targets` or `axis_order`."
)
feasible_trials = []
infeasible_trials = []
has_constraints = False
for trial in study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)):
if constraints_func is not None:
# NOTE(nabenabe0928): This part is deprecated.
has_constraints = True
if all(map(lambda x: x <= 0.0, constraints_func(trial))):
feasible_trials.append(trial)
else:
infeasible_trials.append(trial)
continue
constraints = trial.system_attrs.get(_CONSTRAINTS_KEY)
has_constraints |= constraints is not None
if constraints is None or all(x <= 0.0 for x in constraints):
feasible_trials.append(trial)
else:
infeasible_trials.append(trial)
best_trials = _get_pareto_front_trials_by_trials(feasible_trials, study.directions)
if include_dominated_trials:
non_best_trials = _get_non_pareto_front_trials(feasible_trials, best_trials)
else:
non_best_trials = []
if len(best_trials) == 0:
what_trial = "completed" if has_constraints else "completed and feasible"
_logger.warning(f"Your study does not have any {what_trial} trials. ")
_targets = targets
if _targets is None:
if len(study.directions) in (2, 3):
_targets = _targets_default
else:
raise ValueError(
"`plot_pareto_front` function only supports 2 or 3 objective"
" studies when using `targets` is `None`. Please use `targets`"
" if your objective studies have more than 3 objectives."
)
def _make_trials_with_values(
trials: list[FrozenTrial],
targets: Callable[[FrozenTrial], Sequence[float]],
) -> list[tuple[FrozenTrial, list[float]]]:
target_values = [targets(trial) for trial in trials]
for v in target_values:
if not isinstance(v, Sequence):
raise ValueError(
"`targets` should return a sequence of target values."
f" your `targets` returns {type(v)}"
)
return [(trial, list(v)) for trial, v in zip(trials, target_values)]
best_trials_with_values = _make_trials_with_values(best_trials, _targets)
non_best_trials_with_values = _make_trials_with_values(non_best_trials, _targets)
infeasible_trials_with_values = _make_trials_with_values(infeasible_trials, _targets)
def _infer_n_targets(
trials_with_values: Sequence[tuple[FrozenTrial, Sequence[float]]],
) -> int | None:
if len(trials_with_values) > 0:
return len(trials_with_values[0][1])
return None
# Check for `non_best_trials_with_values` can be skipped, because if `best_trials_with_values`
# is empty, then `non_best_trials_with_values` will also be empty.
n_targets = _infer_n_targets(best_trials_with_values) or _infer_n_targets(
infeasible_trials_with_values
)
if n_targets is None:
if target_names is not None:
n_targets = len(target_names)
elif targets is None:
n_targets = len(study.directions)
else:
raise ValueError(
"If `targets` is specified for empty studies, `target_names` must be specified."
)
if n_targets not in (2, 3):
raise ValueError(
"`plot_pareto_front` function only supports 2 or 3 targets."
f" you used {n_targets} targets now."
)
if target_names is None:
metric_names = study.metric_names
if metric_names is None:
target_names = [f"Objective {i}" for i in range(n_targets)]
else:
target_names = metric_names
elif len(target_names) != n_targets:
raise ValueError(f"The length of `target_names` is supposed to be {n_targets}.")
if axis_order is None:
axis_order = list(range(n_targets))
else:
if len(axis_order) != n_targets:
raise ValueError(
f"Size of `axis_order` {axis_order}. Expect: {n_targets}, "
f"Actual: {len(axis_order)}."
)
if len(set(axis_order)) != n_targets:
raise ValueError(f"Elements of given `axis_order` {axis_order} are not unique!.")
if max(axis_order) > n_targets - 1:
raise ValueError(
f"Given `axis_order` {axis_order} contains invalid index {max(axis_order)} "
f"higher than {n_targets - 1}."
)
if min(axis_order) < 0:
raise ValueError(
f"Given `axis_order` {axis_order} contains invalid index {min(axis_order)} "
"lower than 0."
)
return _ParetoFrontInfo(
n_targets=n_targets,
target_names=target_names,
best_trials_with_values=best_trials_with_values,
non_best_trials_with_values=non_best_trials_with_values,
infeasible_trials_with_values=infeasible_trials_with_values,
axis_order=axis_order,
include_dominated_trials=include_dominated_trials,
has_constraints=has_constraints,
)
def _targets_default(trial: FrozenTrial) -> Sequence[float]:
return trial.values
def _get_non_pareto_front_trials(
trials: list[FrozenTrial], pareto_trials: list[FrozenTrial]
) -> list[FrozenTrial]:
non_pareto_trials = []
for trial in trials:
if trial not in pareto_trials:
non_pareto_trials.append(trial)
return non_pareto_trials
def _make_scatter_object(
n_targets: int,
axis_order: Sequence[int],
include_dominated_trials: bool,
trials_with_values: Sequence[tuple[FrozenTrial, Sequence[float]]],
hovertemplate: str,
infeasible: bool = False,
dominated_trials: bool = False,
) -> "go.Scatter" | "go.Scatter3d":
trials_with_values = trials_with_values or []
marker = _make_marker(
[trial for trial, _ in trials_with_values],
include_dominated_trials,
dominated_trials=dominated_trials,
infeasible=infeasible,
)
if n_targets == 2:
return go.Scatter(
x=[values[axis_order[0]] for _, values in trials_with_values],
y=[values[axis_order[1]] for _, values in trials_with_values],
text=[_make_hovertext(trial) for trial, _ in trials_with_values],
mode="markers",
hovertemplate=hovertemplate,
marker=marker,
showlegend=False,
)
elif n_targets == 3:
return go.Scatter3d(
x=[values[axis_order[0]] for _, values in trials_with_values],
y=[values[axis_order[1]] for _, values in trials_with_values],
z=[values[axis_order[2]] for _, values in trials_with_values],
text=[_make_hovertext(trial) for trial, _ in trials_with_values],
mode="markers",
hovertemplate=hovertemplate,
marker=marker,
showlegend=False,
)
else:
assert False, "Must not reach here"
def _make_marker(
trials: Sequence[FrozenTrial],
include_dominated_trials: bool,
dominated_trials: bool = False,
infeasible: bool = False,
) -> dict[str, Any]:
if dominated_trials and not include_dominated_trials:
assert len(trials) == 0
if infeasible:
return {
"color": "#cccccc",
}
elif dominated_trials:
return {
"line": {"width": 0.5, "color": "Grey"},
"color": [t.number for t in trials],
"colorscale": "Blues",
"colorbar": {
"title": "Trial",
},
}
else:
return {
"line": {"width": 0.5, "color": "Grey"},
"color": [t.number for t in trials],
"colorscale": "Reds",
"colorbar": {
"title": "Best Trial",
"x": 1.1 if include_dominated_trials else 1,
"xpad": 40,
},
}
| _ParetoFrontInfo |
python | ray-project__ray | doc/source/serve/doc_code/managing_deployments.py | {
"start": 136,
"end": 402
} | class ____:
pass
# Creates one initial replica.
serve.run(SimpleDeployment.bind())
# Re-deploys, creating an additional replica.
# This could be the SAME Python script, modified and re-run.
@serve.deployment(name="my_deployment", num_replicas=2)
| SimpleDeployment |
python | apache__avro | lang/py/avro/errors.py | {
"start": 4243,
"end": 4387
} | class ____(AvroException):
"""Raised when attempting to generate a fingerprint with an unknown algorithm"""
| UnknownFingerprintAlgorithmException |
python | openai__openai-python | tests/test_transform.py | {
"start": 12262,
"end": 13305
} | class ____(TypedDict):
foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")]
@parametrize
@pytest.mark.asyncio
async def test_iterable_of_dictionaries(use_async: bool) -> None:
assert await transform({"foo": [{"foo_baz": "bar"}]}, TypedDictIterableUnion, use_async) == {
"FOO": [{"fooBaz": "bar"}]
}
assert cast(Any, await transform({"foo": ({"foo_baz": "bar"},)}, TypedDictIterableUnion, use_async)) == {
"FOO": [{"fooBaz": "bar"}]
}
def my_iter() -> Iterable[Baz8]:
yield {"foo_baz": "hello"}
yield {"foo_baz": "world"}
assert await transform({"foo": my_iter()}, TypedDictIterableUnion, use_async) == {
"FOO": [{"fooBaz": "hello"}, {"fooBaz": "world"}]
}
@parametrize
@pytest.mark.asyncio
async def test_dictionary_items(use_async: bool) -> None:
class DictItems(TypedDict):
foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")]
assert await transform({"foo": {"foo_baz": "bar"}}, Dict[str, DictItems], use_async) == {"foo": {"fooBaz": "bar"}}
| Baz8 |
python | django-import-export__django-import-export | tests/core/tests/test_mixins.py | {
"start": 5074,
"end": 12991
} | class ____(AdminTestMixin, TestCase):
"""
Tests for regression where methods in ModelAdmin with
BaseImportMixin / BaseExportMixin do not get called.
see #1315.
"""
request = MagicMock(spec=HttpRequest)
class BaseImportModelAdminTest(mixins.BaseImportMixin):
call_count = 0
def get_resource_classes(self, request, **kwargs):
self.call_count += 1
def get_resource_kwargs(self, request, **kwargs):
self.call_count += 1
class BaseExportModelAdminTest(mixins.BaseExportMixin):
call_count = 0
def get_resource_classes(self, request, **kwargs):
self.call_count += 1
def get_export_resource_kwargs(self, request, **kwargs):
self.call_count += 1
def test_get_import_resource_class_calls_self_get_resource_class(self):
admin = self.BaseImportModelAdminTest()
admin.get_import_resource_classes(self.request)
self.assertEqual(1, admin.call_count)
def test_get_import_resource_kwargs_calls_self_get_resource_kwargs(self):
admin = self.BaseImportModelAdminTest()
admin.get_import_resource_kwargs(self.request)
self.assertEqual(1, admin.call_count)
def test_get_export_resource_class_calls_self_get_resource_class(self):
admin = self.BaseExportModelAdminTest()
admin.get_export_resource_classes(self.request)
self.assertEqual(1, admin.call_count)
def test_get_export_resource_kwargs_calls_self_get_resource_kwargs(self):
admin = self.BaseExportModelAdminTest()
admin.get_export_resource_kwargs(self.request)
self.assertEqual(1, admin.call_count)
class BaseModelResourceClassTest(mixins.BaseImportMixin, mixins.BaseExportMixin):
resource_class = resources.Resource
export_call_count = 0
import_call_count = 0
def get_export_resource_class(self):
self.export_call_count += 1
def get_import_resource_class(self):
self.import_call_count += 1
def test_deprecated_resource_class_raises_warning(self):
"""Test that the mixin throws error if user didn't
migrate to resource_classes"""
admin = self.BaseModelResourceClassTest()
msg = (
"The 'get_export_resource_class()' method has been deprecated. "
"Please implement the new 'get_export_resource_classes()' method in "
"core.tests.test_mixins.MixinModelAdminTest.BaseModelResourceClassTest"
)
with self.assertWarns(DeprecationWarning, msg=msg):
admin.get_export_resource_classes(self.request)
msg = (
"The 'get_import_resource_class()' method has been deprecated. "
"Please implement the new 'get_import_resource_classes()' method in "
"core.tests.test_mixins.MixinModelAdminTest.BaseModelResourceClassTest"
)
with self.assertWarns(DeprecationWarning, msg=msg):
admin.get_import_resource_classes(self.request)
msg = (
"The 'resource_class' field has been deprecated. "
"Please implement the new 'resource_classes' field in "
"core.tests.test_mixins.MixinModelAdminTest.BaseModelResourceClassTest"
)
with self.assertWarns(DeprecationWarning, msg=msg):
self.assertEqual(
admin.get_resource_classes(self.request), [resources.Resource]
)
self.assertEqual(1, admin.export_call_count)
self.assertEqual(1, admin.import_call_count)
class BaseModelGetExportResourceClassTest(mixins.BaseExportMixin):
def get_resource_class(self):
pass
def test_deprecated_get_resource_class_raises_warning(self):
"""Test that the mixin throws error if user
didn't migrate to resource_classes"""
admin = self.BaseModelGetExportResourceClassTest()
msg = (
"The 'get_resource_class()' method has been deprecated. "
"Please implement the new 'get_resource_classes()' method in "
"core.tests.test_mixins.MixinModelAdminTest."
"BaseModelGetExportResourceClassTest"
)
with self.assertWarns(DeprecationWarning, msg=msg):
admin.get_resource_classes(self.request)
class BaseModelAdminFaultyResourceClassesTest(mixins.BaseExportMixin):
resource_classes = resources.Resource
def test_faulty_resource_class_raises_exception(self):
"""Test fallback mechanism to old get_export_resource_class() method"""
admin = self.BaseModelAdminFaultyResourceClassesTest()
with self.assertRaisesRegex(
Exception, r"^The resource_classes field type must be subscriptable"
):
admin.get_export_resource_classes(self.request)
class BaseModelAdminBothResourceTest(mixins.BaseExportMixin):
call_count = 0
resource_class = resources.Resource
resource_classes = [resources.Resource]
def test_both_resource_class_raises_exception(self):
"""Test fallback mechanism to old get_export_resource_class() method"""
admin = self.BaseModelAdminBothResourceTest()
with self.assertRaisesRegex(
Exception, "Only one of 'resource_class' and 'resource_classes' can be set"
):
admin.get_export_resource_classes(self.request)
class BaseModelExportChooseTest(AdminTestMixin, mixins.BaseExportMixin):
resource_classes = [resources.Resource, FooResource]
@mock.patch("import_export.admin.SelectableFieldsExportForm")
def test_choose_export_resource_class(self, form):
"""Test choose_export_resource_class() method"""
admin = self.BaseModelExportChooseTest()
self.assertEqual(
admin.choose_export_resource_class(form, self.request), resources.Resource
)
form.data = {"django-import-export-resource": 1}
self._prepend_form_prefix(form.data)
self.assertEqual(
admin.choose_export_resource_class(form, self.request), FooResource
)
class BaseModelImportChooseTest(mixins.BaseImportMixin):
resource_classes = [resources.Resource, FooResource]
@mock.patch("import_export.admin.ImportForm")
def test_choose_import_resource_class(self, form):
"""Test choose_import_resource_class() method"""
admin = self.BaseModelImportChooseTest()
request = MagicMock(spec=HttpRequest)
self.assertEqual(
admin.choose_import_resource_class(form, request),
resources.Resource,
)
form.data = {"django-import-export-resource": 1}
self._prepend_form_prefix(form.data)
self.assertEqual(admin.choose_import_resource_class(form, request), FooResource)
class BaseModelResourceClassOldTest(mixins.BaseImportMixin, mixins.BaseExportMixin):
def get_resource_class(self):
return FooResource
def test_get_resource_class_old(self):
"""
Test that if only the old get_resource_class() method is defined,
the get_export_resource_classes() and get_import_resource_classes()
still return list of resources.
"""
admin = self.BaseModelResourceClassOldTest()
msg = (
"The 'get_resource_class()' method has been deprecated. "
"Please implement the new 'get_resource_classes()' method in "
"core.tests.test_mixins.MixinModelAdminTest.BaseModelResourceClassOldTest"
)
with self.assertWarns(DeprecationWarning, msg=msg):
self.assertEqual(
admin.get_export_resource_classes(self.request), [FooResource]
)
with self.assertWarns(DeprecationWarning, msg=msg):
self.assertEqual(
admin.get_import_resource_classes(self.request), [FooResource]
)
| MixinModelAdminTest |
python | getsentry__sentry | src/sentry/prevent/migrations/0002_alter_integration_id_not_null.py | {
"start": 203,
"end": 1649
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("prevent", "0001_create_prevent_ai_configuration"),
]
operations = [
migrations.AlterField(
model_name="preventaiconfiguration",
name="integration_id",
field=sentry.db.models.fields.hybrid_cloud_foreign_key.HybridCloudForeignKey(
"sentry.Integration", db_index=True, on_delete="CASCADE"
),
),
]
| Migration |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/s3/io_manager.py | {
"start": 2975,
"end": 5545
} | class ____(ConfigurableIOManager):
"""Persistent IO manager using S3 for storage.
Serializes objects via pickling. Suitable for objects storage for distributed executors, so long
as each execution node has network connectivity and credentials for S3 and the backing bucket.
Assigns each op output to a unique filepath containing run ID, step key, and output name.
Assigns each asset to a single filesystem path, at "<base_dir>/<asset_key>". If the asset key
has multiple components, the final component is used as the name of the file, and the preceding
components as parent directories under the base_dir.
Subsequent materializations of an asset will overwrite previous materializations of that asset.
With a base directory of "/my/base/path", an asset with key
`AssetKey(["one", "two", "three"])` would be stored in a file called "three" in a directory
with path "/my/base/path/one/two/".
Example usage:
.. code-block:: python
from dagster import asset, Definitions
from dagster_aws.s3 import S3PickleIOManager, S3Resource
@asset
def asset1():
# create df ...
return df
@asset
def asset2(asset1):
return asset1[:5]
Definitions(
assets=[asset1, asset2],
resources={
"io_manager": S3PickleIOManager(
s3_resource=S3Resource(),
s3_bucket="my-cool-bucket",
s3_prefix="my-cool-prefix",
)
}
)
"""
s3_resource: ResourceDependency[S3Resource]
s3_bucket: str = Field(description="S3 bucket to use for the file manager.")
s3_prefix: str = Field(
default="dagster", description="Prefix to use for the S3 bucket for this file manager."
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
@cached_method
def inner_io_manager(self) -> PickledObjectS3IOManager:
return PickledObjectS3IOManager(
s3_bucket=self.s3_bucket,
s3_session=self.s3_resource.get_client(),
s3_prefix=self.s3_prefix,
)
def load_input(self, context: InputContext) -> Any:
return self.inner_io_manager().load_input(context)
def handle_output(self, context: OutputContext, obj: Any) -> None:
return self.inner_io_manager().handle_output(context, obj)
@deprecated(
breaking_version="2.0",
additional_warn_text="Please use S3PickleIOManager instead.",
)
| S3PickleIOManager |
python | getsentry__sentry | tests/sentry/utils/test_exceptions.py | {
"start": 470,
"end": 552
} | class ____(Exception):
"""Custom exception for testing."""
pass
| CustomError |
python | pytorch__pytorch | test/test_xnnpack_integration.py | {
"start": 45185,
"end": 54728
} | class ____(TestCase):
@staticmethod
def validate_transform_conv1d_to_conv2d(
self, pattern_count_transformed_map, pattern_count_optimized_map, data_shape
):
input_data = torch.normal(1, 20, size=data_shape)
for jit_method in ["script", "trace"]:
module_instance = self
if jit_method == "script":
scripted_model = torch.jit.script(module_instance)
else:
scripted_model = torch.jit.trace(module_instance, input_data)
scripted_model.eval()
ref_result = scripted_model(input_data)
torch._C._jit_pass_transform_conv1d_to_conv2d(scripted_model._c)
optimized_scripted_model = optimize_for_mobile(scripted_model)
buffer = io.BytesIO()
torch.jit.save(scripted_model, buffer)
buffer.seek(0)
deserialized_scripted_model = torch.jit.load(buffer)
for pattern, v in pattern_count_transformed_map.items():
if v == 0:
FileCheck().check(pattern).run(deserialized_scripted_model.graph)
elif v == -1:
FileCheck().check_not(pattern).run(
deserialized_scripted_model.graph
)
else:
FileCheck().check_count(pattern, v, exactly=True).run(
deserialized_scripted_model.graph
)
transformed_result = deserialized_scripted_model(input_data)
torch.testing.assert_close(
ref_result, transformed_result, rtol=1e-2, atol=1e-3
)
optimized_buffer = io.BytesIO()
torch.jit.save(optimized_scripted_model, optimized_buffer)
optimized_buffer.seek(0)
deserialized_optimized_scripted_model = torch.jit.load(optimized_buffer)
for pattern, v in pattern_count_optimized_map.items():
if v == 0:
FileCheck().check(pattern).run(
deserialized_optimized_scripted_model.graph
)
elif v == -1:
FileCheck().check_not(pattern).run(
deserialized_optimized_scripted_model.graph
)
else:
FileCheck().check_count(pattern, v, exactly=True).run(
deserialized_optimized_scripted_model.graph
)
xnnpack_result = deserialized_optimized_scripted_model(input_data)
torch.testing.assert_close(ref_result, xnnpack_result, rtol=1e-2, atol=1e-3)
@unittest.skipIf(IS_FBCODE, "T137513244")
def test_conv1d_basic(self):
batch_size_list = range(1, 3)
input_channels_per_group_list = range(10, 12)
width_list = range(10, 12)
output_channels_per_group_list = range(10, 12)
groups_list = range(1, 3)
kernel_list = range(1, 4)
stride_list = range(1, 3)
padding_list = range(3)
dilation_list = range(1, 3)
for hparams in itertools.product(
batch_size_list,
input_channels_per_group_list,
width_list,
output_channels_per_group_list,
groups_list,
kernel_list,
stride_list,
padding_list,
dilation_list,
):
(
batch_size,
input_channels_per_group,
width,
output_channels_per_group,
groups,
kernel,
stride,
padding,
dilation,
) = hparams
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
conv_weight_shape = (output_channels, input_channels_per_group, kernel)
conv_bias_shape = output_channels
class Conv1D(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(
torch.rand(conv_weight_shape), requires_grad=False
)
self.bias = torch.nn.Parameter(
torch.rand(conv_bias_shape), requires_grad=False
)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
return F.conv1d(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
data_shape = (batch_size, input_channels, width)
pattern_count_transformed_map = {
"Tensor = aten::conv1d": -1,
"Tensor = aten::conv2d": 1,
}
pattern_count_optimized_map = {
"Tensor = aten::conv1d": -1,
"Tensor = aten::conv2d": -1,
"prepacked::conv2d_clamp_prepack": -1,
"prepacked::conv2d_clamp_run": 1,
}
TestXNNPACKConv1dTransformPass.validate_transform_conv1d_to_conv2d(
Conv1D(),
pattern_count_transformed_map,
pattern_count_optimized_map,
data_shape,
)
# See https://github.com/pytorch/pytorch/issues/46066
@slowTest
def test_conv1d_with_relu_fc(self):
batch_size_list = range(1, 3)
input_channels_per_group_list = range(10, 12)
width_list = range(10, 12)
output_channels_per_group_list = range(10, 12)
groups_list = range(1, 3)
kernel_list = range(1, 4)
stride_list = range(1, 3)
padding_list = range(3)
dilation_list = range(1, 3)
output_features_list = range(1, 3)
for hparams in itertools.product(
batch_size_list,
input_channels_per_group_list,
width_list,
output_channels_per_group_list,
groups_list,
kernel_list,
stride_list,
padding_list,
dilation_list,
output_features_list,
):
(
batch_size,
input_channels_per_group,
width,
output_channels_per_group,
groups,
kernel,
stride,
padding,
dilation,
output_features,
) = hparams
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
conv_weight_shape = (output_channels, input_channels_per_group, kernel)
conv_bias_shape = output_channels
conv_output_width = (
int((width + 2 * padding - dilation * (kernel - 1) - 1) / stride) + 1
)
fc_weight_shape = (output_features, output_channels * conv_output_width)
fc_bias_shape = output_features
class Net(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv_weight = torch.nn.Parameter(
torch.rand(conv_weight_shape), requires_grad=False
)
self.conv_bias = torch.nn.Parameter(
torch.rand(conv_bias_shape), requires_grad=False
)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.fc_weight = torch.nn.Parameter(
torch.rand(fc_weight_shape), requires_grad=False
)
self.fc_bias = torch.nn.Parameter(
torch.rand(fc_bias_shape), requires_grad=False
)
def forward(self, x):
x = F.conv1d(
x,
self.conv_weight,
self.conv_bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
x = F.relu(x)
x = x.view(x.size(0), -1)
x = F.linear(x, self.fc_weight, self.fc_bias)
return x
data_shape = (batch_size, input_channels, width)
pattern_count_transformed_map = {
"Tensor = aten::conv1d": -1,
"Tensor = aten::conv2d": 1,
}
pattern_count_optimized_map = {
"Tensor = aten::conv1d": -1,
"Tensor = aten::conv2d": -1,
"prepacked::conv2d_clamp_prepack": -1,
"prepacked::conv2d_clamp_run": 1,
}
TestXNNPACKConv1dTransformPass.validate_transform_conv1d_to_conv2d(
Net(),
pattern_count_transformed_map,
pattern_count_optimized_map,
data_shape,
)
if __name__ == "__main__":
run_tests()
| TestXNNPACKConv1dTransformPass |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_int_literal.py | {
"start": 1599,
"end": 8519
} | class ____(__TestCase):
def test_hex_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0x0, 0X0)
self.assertEqual(0x1, 0X1)
self.assertEqual(0x123456789abcdef, 0X123456789abcdef)
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
self.assertEqual(0x7fffffff, 2147483647)
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
self.assertEqual(-(0x7fffffff), -2147483647)
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
self.assertEqual(-0x7fffffff, -2147483647)
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# Positive constants
self.assertEqual(0x80000000, 2147483648)
self.assertEqual(0xffffffff, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), -2147483648)
self.assertEqual(-(0xffffffff), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, -2147483648)
self.assertEqual(-0xffffffff, -4294967295)
# Positive constants
self.assertEqual(0x8000000000000000, 9223372036854775808)
self.assertEqual(0xffffffffffffffff, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), -9223372036854775808)
self.assertEqual(-(0xffffffffffffffff), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, -9223372036854775808)
self.assertEqual(-0xffffffffffffffff, -18446744073709551615)
def test_oct_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0o0, 0O0)
self.assertEqual(0o1, 0O1)
self.assertEqual(0o1234567, 0O1234567)
# Baseline tests
self.assertEqual(0o0, 0)
self.assertEqual(0o20, 16)
self.assertEqual(0o17777777777, 2147483647)
self.assertEqual(0o777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o0), 0)
self.assertEqual(-(0o20), -16)
self.assertEqual(-(0o17777777777), -2147483647)
self.assertEqual(-(0o777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0o0, 0)
self.assertEqual(-0o20, -16)
self.assertEqual(-0o17777777777, -2147483647)
self.assertEqual(-0o777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# Positive constants
self.assertEqual(0o20000000000, 2147483648)
self.assertEqual(0o37777777777, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o20000000000), -2147483648)
self.assertEqual(-(0o37777777777), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o20000000000, -2147483648)
self.assertEqual(-0o37777777777, -4294967295)
# Positive constants
self.assertEqual(0o1000000000000000000000, 9223372036854775808)
self.assertEqual(0o1777777777777777777777, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o1000000000000000000000), -9223372036854775808)
self.assertEqual(-(0o1777777777777777777777), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o1000000000000000000000, -9223372036854775808)
self.assertEqual(-0o1777777777777777777777, -18446744073709551615)
def test_bin_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0b0, 0B0)
self.assertEqual(0b1, 0B1)
self.assertEqual(0b10101010101, 0B10101010101)
# Baseline tests
self.assertEqual(0b0, 0)
self.assertEqual(0b10000, 16)
self.assertEqual(0b1111111111111111111111111111111, 2147483647)
self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b0), 0)
self.assertEqual(-(0b10000), -16)
self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)
self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0b0, 0)
self.assertEqual(-0b10000, -16)
self.assertEqual(-0b1111111111111111111111111111111, -2147483647)
self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)
def test_bin_unsigned(self):
# Positive constants
self.assertEqual(0b10000000000000000000000000000000, 2147483648)
self.assertEqual(0b11111111111111111111111111111111, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b10000000000000000000000000000000), -2147483648)
self.assertEqual(-(0b11111111111111111111111111111111), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b10000000000000000000000000000000, -2147483648)
self.assertEqual(-0b11111111111111111111111111111111, -4294967295)
# Positive constants
self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808)
self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808)
self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808)
self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615)
if __name__ == "__main__":
run_tests()
| TestHexOctBin |
python | pypa__pip | src/pip/_internal/operations/prepare.py | {
"start": 7254,
"end": 28914
} | class ____:
"""Prepares a Requirement"""
def __init__( # noqa: PLR0913 (too many parameters)
self,
*,
build_dir: str,
download_dir: str | None,
src_dir: str,
build_isolation: bool,
build_isolation_installer: BuildEnvironmentInstaller,
check_build_deps: bool,
build_tracker: BuildTracker,
session: PipSession,
progress_bar: BarType,
finder: PackageFinder,
require_hashes: bool,
use_user_site: bool,
lazy_wheel: bool,
verbosity: int,
legacy_resolver: bool,
resume_retries: int,
) -> None:
super().__init__()
self.src_dir = src_dir
self.build_dir = build_dir
self.build_tracker = build_tracker
self._session = session
self._download = Downloader(session, progress_bar, resume_retries)
self.finder = finder
# Where still-packed archives should be written to. If None, they are
# not saved, and are deleted immediately after unpacking.
self.download_dir = download_dir
# Is build isolation allowed?
self.build_isolation = build_isolation
self.build_env_installer = build_isolation_installer
# Should check build dependencies?
self.check_build_deps = check_build_deps
# Should hash-checking be required?
self.require_hashes = require_hashes
# Should install in user site-packages?
self.use_user_site = use_user_site
# Should wheels be downloaded lazily?
self.use_lazy_wheel = lazy_wheel
# How verbose should underlying tooling be?
self.verbosity = verbosity
# Are we using the legacy resolver?
self.legacy_resolver = legacy_resolver
# Memoized downloaded files, as mapping of url: path.
self._downloaded: dict[str, str] = {}
# Previous "header" printed for a link-based InstallRequirement
self._previous_requirement_header = ("", "")
def _log_preparing_link(self, req: InstallRequirement) -> None:
"""Provide context for the requirement being prepared."""
if req.link.is_file and not req.is_wheel_from_cache:
message = "Processing %s"
information = str(display_path(req.link.file_path))
else:
message = "Collecting %s"
information = redact_auth_from_requirement(req.req) if req.req else str(req)
# If we used req.req, inject requirement source if available (this
# would already be included if we used req directly)
if req.req and req.comes_from:
if isinstance(req.comes_from, str):
comes_from: str | None = req.comes_from
else:
comes_from = req.comes_from.from_path()
if comes_from:
information += f" (from {comes_from})"
if (message, information) != self._previous_requirement_header:
self._previous_requirement_header = (message, information)
logger.info(message, information)
if req.is_wheel_from_cache:
with indent_log():
logger.info("Using cached %s", req.link.filename)
def _ensure_link_req_src_dir(
self, req: InstallRequirement, parallel_builds: bool
) -> None:
"""Ensure source_dir of a linked InstallRequirement."""
# Since source_dir is only set for editable requirements.
if req.link.is_wheel:
# We don't need to unpack wheels, so no need for a source
# directory.
return
assert req.source_dir is None
if req.link.is_existing_dir():
# build local directories in-tree
req.source_dir = req.link.file_path
return
# We always delete unpacked sdists after pip runs.
req.ensure_has_source_dir(
self.build_dir,
autodelete=True,
parallel_builds=parallel_builds,
)
req.ensure_pristine_source_checkout()
def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes:
# By the time this is called, the requirement's link should have
# been checked so we can tell what kind of requirements req is
# and raise some more informative errors than otherwise.
# (For example, we can raise VcsHashUnsupported for a VCS URL
# rather than HashMissing.)
if not self.require_hashes:
return req.hashes(trust_internet=True)
# We could check these first 2 conditions inside unpack_url
# and save repetition of conditions, but then we would
# report less-useful error messages for unhashable
# requirements, complaining that there's no hash provided.
if req.link.is_vcs:
raise VcsHashUnsupported()
if req.link.is_existing_dir():
raise DirectoryUrlHashUnsupported()
# Unpinned packages are asking for trouble when a new version
# is uploaded. This isn't a security check, but it saves users
# a surprising hash mismatch in the future.
# file:/// URLs aren't pinnable, so don't complain about them
# not being pinned.
if not req.is_direct and not req.is_pinned:
raise HashUnpinned()
# If known-good hashes are missing for this requirement,
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
return req.hashes(trust_internet=False) or MissingHashes()
def _fetch_metadata_only(
self,
req: InstallRequirement,
) -> BaseDistribution | None:
if self.legacy_resolver:
logger.debug(
"Metadata-only fetching is not used in the legacy resolver",
)
return None
if self.require_hashes:
logger.debug(
"Metadata-only fetching is not used as hash checking is required",
)
return None
# Try PEP 658 metadata first, then fall back to lazy wheel if unavailable.
return self._fetch_metadata_using_link_data_attr(
req
) or self._fetch_metadata_using_lazy_wheel(req.link)
def _fetch_metadata_using_link_data_attr(
self,
req: InstallRequirement,
) -> BaseDistribution | None:
"""Fetch metadata from the data-dist-info-metadata attribute, if possible."""
# (1) Get the link to the metadata file, if provided by the backend.
metadata_link = req.link.metadata_link()
if metadata_link is None:
return None
assert req.req is not None
logger.verbose(
"Obtaining dependency information for %s from %s",
req.req,
metadata_link,
)
# (2) Download the contents of the METADATA file, separate from the dist itself.
metadata_file = get_http_url(
metadata_link,
self._download,
hashes=metadata_link.as_hashes(),
)
with open(metadata_file.path, "rb") as f:
metadata_contents = f.read()
# (3) Generate a dist just from those file contents.
metadata_dist = get_metadata_distribution(
metadata_contents,
req.link.filename,
req.req.name,
)
# (4) Ensure the Name: field from the METADATA file matches the name from the
# install requirement.
#
# NB: raw_name will fall back to the name from the install requirement if
# the Name: field is not present, but it's noted in the raw_name docstring
# that that should NEVER happen anyway.
if canonicalize_name(metadata_dist.raw_name) != canonicalize_name(req.req.name):
raise MetadataInconsistent(
req, "Name", req.req.name, metadata_dist.raw_name
)
return metadata_dist
def _fetch_metadata_using_lazy_wheel(
self,
link: Link,
) -> BaseDistribution | None:
"""Fetch metadata using lazy wheel, if possible."""
# --use-feature=fast-deps must be provided.
if not self.use_lazy_wheel:
return None
if link.is_file or not link.is_wheel:
logger.debug(
"Lazy wheel is not used as %r does not point to a remote wheel",
link,
)
return None
wheel = Wheel(link.filename)
name = wheel.name
logger.info(
"Obtaining dependency information from %s %s",
name,
wheel.version,
)
url = link.url.split("#", 1)[0]
try:
return dist_from_wheel_url(name, url, self._session)
except HTTPRangeRequestUnsupported:
logger.debug("%s does not support range requests", url)
return None
def _complete_partial_requirements(
self,
partially_downloaded_reqs: Iterable[InstallRequirement],
parallel_builds: bool = False,
) -> None:
"""Download any requirements which were only fetched by metadata."""
# Download to a temporary directory. These will be copied over as
# needed for downstream 'download', 'wheel', and 'install' commands.
temp_dir = TempDirectory(kind="unpack", globally_managed=True).path
# Map each link to the requirement that owns it. This allows us to set
# `req.local_file_path` on the appropriate requirement after passing
# all the links at once into BatchDownloader.
links_to_fully_download: dict[Link, InstallRequirement] = {}
for req in partially_downloaded_reqs:
assert req.link
links_to_fully_download[req.link] = req
batch_download = self._download.batch(links_to_fully_download.keys(), temp_dir)
for link, (filepath, _) in batch_download:
logger.debug("Downloading link %s to %s", link, filepath)
req = links_to_fully_download[link]
# Record the downloaded file path so wheel reqs can extract a Distribution
# in .get_dist().
req.local_file_path = filepath
# Record that the file is downloaded so we don't do it again in
# _prepare_linked_requirement().
self._downloaded[req.link.url] = filepath
# If this is an sdist, we need to unpack it after downloading, but the
# .source_dir won't be set up until we are in _prepare_linked_requirement().
# Add the downloaded archive to the install requirement to unpack after
# preparing the source dir.
if not req.is_wheel:
req.needs_unpacked_archive(Path(filepath))
# This step is necessary to ensure all lazy wheels are processed
# successfully by the 'download', 'wheel', and 'install' commands.
for req in partially_downloaded_reqs:
self._prepare_linked_requirement(req, parallel_builds)
def prepare_linked_requirement(
self, req: InstallRequirement, parallel_builds: bool = False
) -> BaseDistribution:
"""Prepare a requirement to be obtained from req.link."""
assert req.link
self._log_preparing_link(req)
with indent_log():
# Check if the relevant file is already available
# in the download directory
file_path = None
if self.download_dir is not None and req.link.is_wheel:
hashes = self._get_linked_req_hashes(req)
file_path = _check_download_dir(
req.link,
self.download_dir,
hashes,
# When a locally built wheel has been found in cache, we don't warn
# about re-downloading when the already downloaded wheel hash does
# not match. This is because the hash must be checked against the
# original link, not the cached link. It that case the already
# downloaded file will be removed and re-fetched from cache (which
# implies a hash check against the cache entry's origin.json).
warn_on_hash_mismatch=not req.is_wheel_from_cache,
)
if file_path is not None:
# The file is already available, so mark it as downloaded
self._downloaded[req.link.url] = file_path
else:
# The file is not available, attempt to fetch only metadata
metadata_dist = self._fetch_metadata_only(req)
if metadata_dist is not None:
req.needs_more_preparation = True
req.set_dist(metadata_dist)
# Ensure download_info is available even in dry-run mode
if req.download_info is None:
req.download_info = direct_url_from_link(
req.link, req.source_dir
)
return metadata_dist
# None of the optimizations worked, fully prepare the requirement
return self._prepare_linked_requirement(req, parallel_builds)
def prepare_linked_requirements_more(
self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False
) -> None:
"""Prepare linked requirements more, if needed."""
reqs = [req for req in reqs if req.needs_more_preparation]
for req in reqs:
# Determine if any of these requirements were already downloaded.
if self.download_dir is not None and req.link.is_wheel:
hashes = self._get_linked_req_hashes(req)
file_path = _check_download_dir(req.link, self.download_dir, hashes)
if file_path is not None:
self._downloaded[req.link.url] = file_path
req.needs_more_preparation = False
# Prepare requirements we found were already downloaded for some
# reason. The other downloads will be completed separately.
partially_downloaded_reqs: list[InstallRequirement] = []
for req in reqs:
if req.needs_more_preparation:
partially_downloaded_reqs.append(req)
else:
self._prepare_linked_requirement(req, parallel_builds)
# TODO: separate this part out from RequirementPreparer when the v1
# resolver can be removed!
self._complete_partial_requirements(
partially_downloaded_reqs,
parallel_builds=parallel_builds,
)
def _prepare_linked_requirement(
self, req: InstallRequirement, parallel_builds: bool
) -> BaseDistribution:
assert req.link
link = req.link
hashes = self._get_linked_req_hashes(req)
if hashes and req.is_wheel_from_cache:
assert req.download_info is not None
assert link.is_wheel
assert link.is_file
# We need to verify hashes, and we have found the requirement in the cache
# of locally built wheels.
if (
isinstance(req.download_info.info, ArchiveInfo)
and req.download_info.info.hashes
and hashes.has_one_of(req.download_info.info.hashes)
):
# At this point we know the requirement was built from a hashable source
# artifact, and we verified that the cache entry's hash of the original
# artifact matches one of the hashes we expect. We don't verify hashes
# against the cached wheel, because the wheel is not the original.
hashes = None
else:
logger.warning(
"The hashes of the source archive found in cache entry "
"don't match, ignoring cached built wheel "
"and re-downloading source."
)
req.link = req.cached_wheel_source_link
link = req.link
self._ensure_link_req_src_dir(req, parallel_builds)
if link.is_existing_dir():
local_file = None
elif link.url not in self._downloaded:
try:
local_file = unpack_url(
link,
req.source_dir,
self._download,
self.verbosity,
self.download_dir,
hashes,
)
except NetworkConnectionError as exc:
raise InstallationError(
f"Could not install requirement {req} because of HTTP "
f"error {exc} for URL {link}"
)
else:
file_path = self._downloaded[link.url]
if hashes:
hashes.check_against_path(file_path)
local_file = File(file_path, content_type=None)
# If download_info is set, we got it from the wheel cache.
if req.download_info is None:
# Editables don't go through this function (see
# prepare_editable_requirement).
assert not req.editable
req.download_info = direct_url_from_link(link, req.source_dir)
# Make sure we have a hash in download_info. If we got it as part of the
# URL, it will have been verified and we can rely on it. Otherwise we
# compute it from the downloaded file.
# FIXME: https://github.com/pypa/pip/issues/11943
if (
isinstance(req.download_info.info, ArchiveInfo)
and not req.download_info.info.hashes
and local_file
):
hash = hash_file(local_file.path)[0].hexdigest()
# We populate info.hash for backward compatibility.
# This will automatically populate info.hashes.
req.download_info.info.hash = f"sha256={hash}"
# For use in later processing,
# preserve the file path on the requirement.
if local_file:
req.local_file_path = local_file.path
dist = _get_prepared_distribution(
req,
self.build_tracker,
self.build_env_installer,
self.build_isolation,
self.check_build_deps,
)
return dist
def save_linked_requirement(self, req: InstallRequirement) -> None:
assert self.download_dir is not None
assert req.link is not None
link = req.link
if link.is_vcs or (link.is_existing_dir() and req.editable):
# Make a .zip of the source_dir we already created.
req.archive(self.download_dir)
return
if link.is_existing_dir():
logger.debug(
"Not copying link to destination directory "
"since it is a directory: %s",
link,
)
return
if req.local_file_path is None:
# No distribution was downloaded for this requirement.
return
download_location = os.path.join(self.download_dir, link.filename)
if not os.path.exists(download_location):
shutil.copy(req.local_file_path, download_location)
download_path = display_path(download_location)
logger.info("Saved %s", download_path)
def prepare_editable_requirement(
self,
req: InstallRequirement,
) -> BaseDistribution:
"""Prepare an editable requirement."""
assert req.editable, "cannot prepare a non-editable req as editable"
logger.info("Obtaining %s", req)
with indent_log():
if self.require_hashes:
raise InstallationError(
f"The editable requirement {req} cannot be installed when "
"requiring hashes, because there is no single file to "
"hash."
)
req.ensure_has_source_dir(self.src_dir)
req.update_editable()
assert req.source_dir
req.download_info = direct_url_for_editable(req.unpacked_source_directory)
dist = _get_prepared_distribution(
req,
self.build_tracker,
self.build_env_installer,
self.build_isolation,
self.check_build_deps,
)
req.check_if_exists(self.use_user_site)
return dist
def prepare_installed_requirement(
self,
req: InstallRequirement,
skip_reason: str,
) -> BaseDistribution:
"""Prepare an already-installed requirement."""
assert req.satisfied_by, "req should have been satisfied but isn't"
assert skip_reason is not None, (
"did not get skip reason skipped but req.satisfied_by "
f"is set to {req.satisfied_by}"
)
logger.info(
"Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version
)
with indent_log():
if self.require_hashes:
logger.debug(
"Since it is already installed, we are trusting this "
"package without checking its hash. To ensure a "
"completely repeatable environment, install into an "
"empty virtualenv."
)
return InstalledDistribution(req).get_metadata_distribution()
| RequirementPreparer |
python | tensorflow__tensorflow | tensorflow/compiler/tests/gather_test.py | {
"start": 6658,
"end": 8937
} | class ____(test.Benchmark):
"""Microbenchmarks for the gather op."""
def _benchmarkGather(self, name, axis, gather_indices, use_xla_jit):
def BuilderFn():
inputs = variables.Variable(
array_ops.zeros([100, 100, 10, 100, 50], dtype=dtypes.float32),
dtype=dtypes.float32,
name='input')
indices = variables.Variable(
gather_indices, dtype=dtypes.int32, name='indices')
gather_t = array_ops.gather(inputs, indices, axis=axis)
return '%s.axis%d' % (name, axis), [gather_t]
xla_test.Benchmark(self, BuilderFn, use_xla_jit=use_xla_jit, device='cpu')
def _benchmarkSliceGather(self, axis, use_xla_jit):
"""Benchmarks a gather op that's really a dynamic slice."""
self._benchmarkGather('slice_gather', axis, [1], use_xla_jit)
def _benchmarkNontrivialGather(self, axis, use_xla_jit):
self._benchmarkGather('nontrivial_gather', axis, [9, 1, 0, 2] * 4,
use_xla_jit)
def benchmarkSliceGatherAxis0(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=False)
def benchmarkSliceGatherAxis0XLA(self):
self._benchmarkSliceGather(axis=0, use_xla_jit=True)
def benchmarkSliceGatherAxis1(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=False)
def benchmarkSliceGatherAxis1XLA(self):
self._benchmarkSliceGather(axis=1, use_xla_jit=True)
def benchmarkSliceGatherAxis4(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=False)
def benchmarkSliceGatherAxis4XLA(self):
self._benchmarkSliceGather(axis=4, use_xla_jit=True)
def benchmarkNontrivialGatherAxis0(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=False)
def benchmarkNontrivialGatherAxis0XLA(self):
self._benchmarkNontrivialGather(axis=0, use_xla_jit=True)
def benchmarkNontrivialGatherAxis1(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=False)
def benchmarkNontrivialGatherAxis1XLA(self):
self._benchmarkNontrivialGather(axis=1, use_xla_jit=True)
def benchmarkNontrivialGatherAxis4(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=False)
def benchmarkNontrivialGatherAxis4XLA(self):
self._benchmarkNontrivialGather(axis=4, use_xla_jit=True)
if __name__ == '__main__':
test.main()
| GatherBenchmark |
python | Textualize__textual | examples/json_tree.py | {
"start": 202,
"end": 2808
} | class ____(App):
BINDINGS = [
("a", "add", "Add node"),
("c", "clear", "Clear"),
("t", "toggle_root", "Toggle root"),
]
def compose(self) -> ComposeResult:
yield Header()
yield Footer()
yield Tree("Root")
@classmethod
def add_json(cls, node: TreeNode, json_data: object) -> None:
"""Adds JSON data to a node.
Args:
node (TreeNode): A Tree node.
json_data (object): An object decoded from JSON.
"""
from rich.highlighter import ReprHighlighter
highlighter = ReprHighlighter()
def add_node(name: str, node: TreeNode, data: object) -> None:
"""Adds a node to the tree.
Args:
name (str): Name of the node.
node (TreeNode): Parent node.
data (object): Data associated with the node.
"""
if isinstance(data, dict):
node.set_label(Text(f"{{}} {name}"))
for key, value in data.items():
new_node = node.add("")
add_node(key, new_node, value)
elif isinstance(data, list):
node.set_label(Text(f"[] {name}"))
for index, value in enumerate(data):
new_node = node.add("")
add_node(str(index), new_node, value)
else:
node.allow_expand = False
if name:
label = Text.assemble(
Text.from_markup(f"[b]{name}[/b]="), highlighter(repr(data))
)
else:
label = Text(repr(data))
node.set_label(label)
add_node("JSON", node, json_data)
def on_mount(self) -> None:
"""Load some JSON when the app starts."""
file_path = Path(__file__).parent / "food.json"
with open(file_path) as data_file:
self.json_data = json.load(data_file)
def action_add(self) -> None:
"""Add a node to the tree."""
tree = self.query_one(Tree)
json_node = tree.root.add("JSON")
self.add_json(json_node, self.json_data)
tree.root.expand()
def action_clear(self) -> None:
"""Clear the tree (remove all nodes)."""
tree = self.query_one(Tree)
tree.clear()
def action_toggle_root(self) -> None:
"""Toggle the root node."""
tree = self.query_one(Tree)
tree.show_root = not tree.show_root
if __name__ == "__main__":
app = TreeApp()
app.run()
| TreeApp |
python | pydantic__pydantic | pydantic/_internal/_config.py | {
"start": 10239,
"end": 14674
} | class ____:
"""A stack of `ConfigWrapper` instances."""
def __init__(self, config_wrapper: ConfigWrapper):
self._config_wrapper_stack: list[ConfigWrapper] = [config_wrapper]
@property
def tail(self) -> ConfigWrapper:
return self._config_wrapper_stack[-1]
@contextmanager
def push(self, config_wrapper: ConfigWrapper | ConfigDict | None):
if config_wrapper is None:
yield
return
if not isinstance(config_wrapper, ConfigWrapper):
config_wrapper = ConfigWrapper(config_wrapper, check=False)
self._config_wrapper_stack.append(config_wrapper)
try:
yield
finally:
self._config_wrapper_stack.pop()
config_defaults = ConfigDict(
title=None,
str_to_lower=False,
str_to_upper=False,
str_strip_whitespace=False,
str_min_length=0,
str_max_length=None,
# let the model / dataclass decide how to handle it
extra=None,
frozen=False,
populate_by_name=False,
use_enum_values=False,
validate_assignment=False,
arbitrary_types_allowed=False,
from_attributes=False,
loc_by_alias=True,
alias_generator=None,
model_title_generator=None,
field_title_generator=None,
ignored_types=(),
allow_inf_nan=True,
json_schema_extra=None,
strict=False,
revalidate_instances='never',
ser_json_timedelta='iso8601',
ser_json_temporal='iso8601',
val_temporal_unit='infer',
ser_json_bytes='utf8',
val_json_bytes='utf8',
ser_json_inf_nan='null',
validate_default=False,
validate_return=False,
protected_namespaces=('model_validate', 'model_dump'),
hide_input_in_errors=False,
json_encoders=None,
defer_build=False,
schema_generator=None,
plugin_settings=None,
json_schema_serialization_defaults_required=False,
json_schema_mode_override=None,
coerce_numbers_to_str=False,
regex_engine='rust-regex',
validation_error_cause=False,
use_attribute_docstrings=False,
cache_strings=True,
validate_by_alias=True,
validate_by_name=False,
serialize_by_alias=False,
url_preserve_empty_path=False,
)
def prepare_config(config: ConfigDict | dict[str, Any] | type[Any] | None) -> ConfigDict:
"""Create a `ConfigDict` instance from an existing dict, a class (e.g. old class-based config) or None.
Args:
config: The input config.
Returns:
A ConfigDict object created from config.
"""
if config is None:
return ConfigDict()
if not isinstance(config, dict):
warnings.warn(DEPRECATION_MESSAGE, PydanticDeprecatedSince20, stacklevel=4)
config = {k: getattr(config, k) for k in dir(config) if not k.startswith('__')}
config_dict = cast(ConfigDict, config)
check_deprecated(config_dict)
return config_dict
config_keys = set(ConfigDict.__annotations__.keys())
V2_REMOVED_KEYS = {
'allow_mutation',
'error_msg_templates',
'fields',
'getter_dict',
'smart_union',
'underscore_attrs_are_private',
'json_loads',
'json_dumps',
'copy_on_model_validation',
'post_init_call',
}
V2_RENAMED_KEYS = {
'allow_population_by_field_name': 'validate_by_name',
'anystr_lower': 'str_to_lower',
'anystr_strip_whitespace': 'str_strip_whitespace',
'anystr_upper': 'str_to_upper',
'keep_untouched': 'ignored_types',
'max_anystr_length': 'str_max_length',
'min_anystr_length': 'str_min_length',
'orm_mode': 'from_attributes',
'schema_extra': 'json_schema_extra',
'validate_all': 'validate_default',
}
def check_deprecated(config_dict: ConfigDict) -> None:
"""Check for deprecated config keys and warn the user.
Args:
config_dict: The input config.
"""
deprecated_removed_keys = V2_REMOVED_KEYS & config_dict.keys()
deprecated_renamed_keys = V2_RENAMED_KEYS.keys() & config_dict.keys()
if deprecated_removed_keys or deprecated_renamed_keys:
renamings = {k: V2_RENAMED_KEYS[k] for k in sorted(deprecated_renamed_keys)}
renamed_bullets = [f'* {k!r} has been renamed to {v!r}' for k, v in renamings.items()]
removed_bullets = [f'* {k!r} has been removed' for k in sorted(deprecated_removed_keys)]
message = '\n'.join(['Valid config keys have changed in V2:'] + renamed_bullets + removed_bullets)
warnings.warn(message, UserWarning)
| ConfigWrapperStack |
python | spyder-ide__spyder | spyder/plugins/layout/layouts.py | {
"start": 1870,
"end": 2698
} | class ____(BaseGridLayoutType):
ID = DefaultLayouts.HorizontalSplitLayout
def __init__(self, parent_plugin):
super().__init__(parent_plugin)
self.add_area(
[Plugins.Editor],
row=0,
column=0,
)
self.add_area(
[
Plugins.IPythonConsole,
Plugins.Explorer,
Plugins.Help,
Plugins.VariableExplorer,
Plugins.Debugger,
Plugins.Profiler,
Plugins.Plots,
Plugins.History,
],
row=0,
column=1,
default=True,
)
self.set_column_stretch(0, 5)
self.set_column_stretch(1, 4)
def get_name(self):
return _("Horizontal split")
| HorizontalSplitLayout |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 91565,
"end": 92068
} | class ____(fixtures.TestBase):
def test_object(self):
eq_(set(util.class_hierarchy(object)), {object})
def test_single(self):
class A:
pass
class B:
pass
eq_(set(util.class_hierarchy(A)), {A, object})
eq_(set(util.class_hierarchy(B)), {B, object})
class C(A, B):
pass
eq_(set(util.class_hierarchy(A)), {A, B, C, object})
eq_(set(util.class_hierarchy(B)), {A, B, C, object})
| TestClassHierarchy |
python | sympy__sympy | sympy/physics/biomechanics/tests/test_mixin.py | {
"start": 145,
"end": 1322
} | class ____:
@staticmethod
def test_subclass():
class Subclass(_NamedMixin):
def __init__(self, name):
self.name = name
instance = Subclass('name')
assert instance.name == 'name'
@pytest.fixture(autouse=True)
def _named_mixin_fixture(self):
class Subclass(_NamedMixin):
def __init__(self, name):
self.name = name
self.Subclass = Subclass
@pytest.mark.parametrize('name', ['a', 'name', 'long_name'])
def test_valid_name_argument(self, name):
instance = self.Subclass(name)
assert instance.name == name
@pytest.mark.parametrize('invalid_name', [0, 0.0, None, False])
def test_invalid_name_argument_not_str(self, invalid_name):
with pytest.raises(TypeError):
_ = self.Subclass(invalid_name)
def test_invalid_name_argument_zero_length_str(self):
with pytest.raises(ValueError):
_ = self.Subclass('')
def test_name_attribute_is_immutable(self):
instance = self.Subclass('name')
with pytest.raises(AttributeError):
instance.name = 'new_name'
| TestNamedMixin |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 15590,
"end": 16369
} | class ____(SimpleFormatNode):
"""
Choose a string if a regex group was not found.
This node represents the expression ${group :- value_if_not_exists}, where
value_if_not_exists is evaluated if $group is not present on the
regex match, otherwise the group value is returned.
"""
KIND = FormatKind.ELSE
def __init__(self, group_number, negative_match):
SimpleFormatNode.__init__(self, group_number)
self.negative_match = negative_match
def transform_regex(self, regex_result):
result = ''
if regex_result.group(self.group_number) is None:
result = self.negative_match.transform_regex(regex_result)
else:
result = regex_result.group(self.group_number)
return result
| ElseNode |
python | django__django | tests/lookup/models.py | {
"start": 1842,
"end": 2035
} | class ____(models.Model):
season = models.ForeignKey(Season, models.CASCADE, related_name="games")
home = models.CharField(max_length=100)
away = models.CharField(max_length=100)
| Game |
python | walkccc__LeetCode | solutions/2001. Number of Pairs of Interchangeable Rectangles/2001-2.py | {
"start": 0,
"end": 390
} | class ____:
def interchangeableRectangles(self, rectangles: list[list[int]]) -> int:
ratioCount = collections.Counter()
def gcd(a: int, b: int) -> int:
return a if b == 0 else gcd(b, a % b)
for width, height in rectangles:
d = gcd(width, height)
ratioCount[(width // d, height // d)] += 1
return sum(c * (c - 1) // 2 for c in ratioCount.values())
| Solution |
python | kennethreitz__tablib | src/tablib/packages/dbfpy/dbfnew.py | {
"start": 2595,
"end": 5254
} | class ____:
"""New .DBF creation helper.
Example Usage:
dbfn = dbf_new()
dbfn.add_field("name",'C',80)
dbfn.add_field("price",'N',10,2)
dbfn.add_field("date",'D',8)
dbfn.write("tst.dbf")
Note:
This module cannot handle Memo-fields,
they are special.
"""
__slots__ = ("fields",)
FieldDefinitionClass = _FieldDefinition
def __init__(self):
self.fields = []
def add_field(self, name, typ, len, dec=0):
"""Add field definition.
Arguments:
name:
field name (str object). field name must not
contain ASCII NULs and it's length shouldn't
exceed 10 characters.
typ:
type of the field. this must be a single character
from the "CNLMDT" set meaning character, numeric,
logical, memo, date and date/time respectively.
len:
length of the field. this argument is used only for
the character and numeric fields. all other fields
have fixed length.
FIXME: use None as a default for this argument?
dec:
decimal precision. used only for the numric fields.
"""
self.fields.append(self.FieldDefinitionClass(name, typ, len, dec))
def write(self, filename):
"""Create empty .DBF file using current structure."""
_dbfh = DbfHeader()
_dbfh.setCurrentDate()
for _fldDef in self.fields:
_fldDef.appendToHeader(_dbfh)
_dbfStream = open(filename, "wb")
_dbfh.write(_dbfStream)
_dbfStream.close()
if __name__ == '__main__':
# create a new DBF-File
dbfn = dbf_new()
dbfn.add_field("name", 'C', 80)
dbfn.add_field("price", 'N', 10, 2)
dbfn.add_field("date", 'D', 8)
dbfn.write("tst.dbf")
# test new dbf
print("*** created tst.dbf: ***")
dbft = Dbf('tst.dbf', readOnly=0)
print(repr(dbft))
# add a record
rec = DbfRecord(dbft)
rec['name'] = 'something'
rec['price'] = 10.5
rec['date'] = (2000, 1, 12)
rec.store()
# add another record
rec = DbfRecord(dbft)
rec['name'] = 'foo and bar'
rec['price'] = 12234
rec['date'] = (1992, 7, 15)
rec.store()
# show the records
print("*** inserted 2 records into tst.dbf: ***")
print(repr(dbft))
for i1 in range(len(dbft)):
rec = dbft[i1]
for fldName in dbft.fieldNames:
print('{}:\t {}'.format(fldName, rec[fldName]))
print()
dbft.close()
# vim: set et sts=4 sw=4 :
| dbf_new |
python | pypa__warehouse | tests/unit/accounts/test_services.py | {
"start": 70489,
"end": 73650
} | class ____:
def test_verify_service(self):
assert verifyClass(IDomainStatusService, services.DomainrDomainStatusService)
def test_successful_domain_status_check(self):
response = pretend.stub(
json=lambda: {
"status": [{"domain": "example.com", "status": "undelegated inactive"}]
},
raise_for_status=lambda: None,
)
session = pretend.stub(get=pretend.call_recorder(lambda *a, **kw: response))
svc = services.DomainrDomainStatusService(
session=session, client_id="some_client_id"
)
assert svc.get_domain_status("example.com") == ["undelegated", "inactive"]
assert session.get.calls == [
pretend.call(
"https://api.domainr.com/v2/status",
params={"client_id": "some_client_id", "domain": "example.com"},
timeout=5,
)
]
def test_domainr_exception_returns_empty(self):
class DomainrException(requests.HTTPError):
def __init__(self):
self.response = pretend.stub(status_code=400)
response = pretend.stub(raise_for_status=pretend.raiser(DomainrException))
session = pretend.stub(get=pretend.call_recorder(lambda *a, **kw: response))
svc = services.DomainrDomainStatusService(
session=session, client_id="some_client_id"
)
assert svc.get_domain_status("example.com") is None
assert session.get.calls == [
pretend.call(
"https://api.domainr.com/v2/status",
params={"client_id": "some_client_id", "domain": "example.com"},
timeout=5,
)
]
def test_domainr_response_contains_errors_returns_none(self):
response = pretend.stub(
json=lambda: {
"status": [],
"errors": [
{
"code": 400,
"detail": "unknown zone: ocm",
"message": "Bad request",
}
],
},
raise_for_status=lambda: None,
)
session = pretend.stub(get=pretend.call_recorder(lambda *a, **kw: response))
svc = services.DomainrDomainStatusService(
session=session, client_id="some_client_id"
)
assert svc.get_domain_status("example.ocm") is None
assert session.get.calls == [
pretend.call(
"https://api.domainr.com/v2/status",
params={"client_id": "some_client_id", "domain": "example.ocm"},
timeout=5,
)
]
def test_factory(self):
context = pretend.stub()
request = pretend.stub(
http=pretend.stub(),
registry=pretend.stub(
settings={"domain_status.client_id": "some_client_id"}
),
)
svc = services.DomainrDomainStatusService.create_service(context, request)
assert svc._http is request.http
assert svc.client_id == "some_client_id"
| TestDomainrDomainStatusService |
python | mamba-org__mamba | micromamba/tests/test_linking.py | {
"start": 252,
"end": 6199
} | class ____:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
env_name = helpers.random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + helpers.random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
@classmethod
def setup_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestLinking.root_prefix
@classmethod
def teardown_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestLinking.current_root_prefix
os.environ["CONDA_PREFIX"] = TestLinking.current_prefix
if Path(TestLinking.root_prefix).exists():
helpers.rmtree(TestLinking.root_prefix)
@classmethod
def teardown_method(cls):
if Path(TestLinking.prefix).exists():
helpers.rmtree(TestLinking.prefix)
def test_link(self, existing_cache, test_pkg):
helpers.create(package_to_test, "-n", TestLinking.env_name, "--json", no_dry_run=True)
install_env_dir = helpers.get_env(TestLinking.env_name)
pkg_checker = helpers.PackageChecker(package_to_test, install_env_dir)
linked_file_path = pkg_checker.find_installed(file_in_package_to_test)
assert linked_file_path
assert linked_file_path.exists()
assert not linked_file_path.is_symlink()
linked_file_rel_path = linked_file_path.relative_to(install_env_dir)
cache_file = existing_cache / test_pkg / linked_file_rel_path
assert cache_file.stat().st_dev == linked_file_path.stat().st_dev
assert cache_file.stat().st_ino == linked_file_path.stat().st_ino
def test_copy(self, existing_cache, test_pkg):
helpers.create(
package_to_test,
"-n",
TestLinking.env_name,
"--json",
"--always-copy",
no_dry_run=True,
)
install_env_dir = helpers.get_env(TestLinking.env_name)
pkg_checker = helpers.PackageChecker(package_to_test, install_env_dir)
linked_file_path = pkg_checker.find_installed(file_in_package_to_test)
assert linked_file_path
assert linked_file_path.exists()
assert not linked_file_path.is_symlink()
linked_file_rel_path = linked_file_path.relative_to(install_env_dir)
cache_file = existing_cache / test_pkg / linked_file_rel_path
assert cache_file.stat().st_dev == linked_file_path.stat().st_dev
assert cache_file.stat().st_ino != linked_file_path.stat().st_ino
@pytest.mark.skipif(
platform.system() == "Windows",
reason="Softlinking needs admin privileges on win",
)
def test_always_softlink(self, existing_cache, test_pkg):
helpers.create(
package_to_test,
"-n",
TestLinking.env_name,
"--json",
"--always-softlink",
no_dry_run=True,
)
install_env_dir = helpers.get_env(TestLinking.env_name)
pkg_checker = helpers.PackageChecker(package_to_test, install_env_dir)
linked_file_path = pkg_checker.find_installed(file_in_package_to_test)
assert linked_file_path
assert linked_file_path.exists()
assert linked_file_path.is_symlink()
linked_file_rel_path = linked_file_path.relative_to(install_env_dir)
cache_file = existing_cache / test_pkg / linked_file_rel_path
assert cache_file.stat().st_dev == linked_file_path.stat().st_dev
assert cache_file.stat().st_ino == linked_file_path.stat().st_ino
assert os.readlink(linked_file_path) == str(cache_file)
@pytest.mark.parametrize("allow_softlinks", [True, False])
@pytest.mark.parametrize("always_copy", [True, False])
def test_cross_device(self, allow_softlinks, always_copy, existing_cache, test_pkg):
if platform.system() != "Linux":
pytest.skip("o/s is not linux")
create_args = [package_to_test, "-n", TestLinking.env_name, "--json"]
if allow_softlinks:
create_args.append("--allow-softlinks")
if always_copy:
create_args.append("--always-copy")
helpers.create(*create_args, no_dry_run=True)
same_device = existing_cache.stat().st_dev == Path(TestLinking.prefix).stat().st_dev
is_softlink = not same_device and allow_softlinks and not always_copy
is_hardlink = same_device and not always_copy
install_env_dir = helpers.get_env(TestLinking.env_name)
pkg_checker = helpers.PackageChecker(package_to_test, install_env_dir)
linked_file_path = pkg_checker.find_installed(file_in_package_to_test)
assert linked_file_path
assert linked_file_path.exists()
linked_file_rel_path = linked_file_path.relative_to(install_env_dir)
cache_file = existing_cache / test_pkg / linked_file_rel_path
assert cache_file.stat().st_dev == linked_file_path.stat().st_dev
assert (cache_file.stat().st_ino == linked_file_path.stat().st_ino) == is_hardlink
assert linked_file_path.is_symlink() == is_softlink
def test_unlink_missing_file(self):
helpers.create(package_to_test, "-n", TestLinking.env_name, "--json", no_dry_run=True)
pkg_checker = helpers.PackageChecker(package_to_test, helpers.get_env(TestLinking.env_name))
linked_file_path = pkg_checker.find_installed(file_in_package_to_test)
assert linked_file_path
assert linked_file_path.exists()
assert not linked_file_path.is_symlink()
os.remove(linked_file_path)
helpers.remove(package_to_test, "-n", TestLinking.env_name)
@pytest.mark.skipif(
sys.platform == "darwin" and platform.machine() == "arm64",
reason="Python 3.7 not available",
)
def test_link_missing_scripts_dir(self): # issue 2808
helpers.create("python=3.7", "pypy", "-n", TestLinking.env_name, "--json", no_dry_run=True)
| TestLinking |
python | getsentry__sentry | src/sentry/net/http.py | {
"start": 4419,
"end": 4776
} | class ____(HTTPSConnectionPool):
ConnectionCls = SafeHTTPSConnection
def __init__(self, *args, is_ipaddress_permitted: IsIpAddressPermitted = None, **kwargs):
super().__init__(*args, **kwargs)
self.ConnectionCls = partial(
self.ConnectionCls, is_ipaddress_permitted=is_ipaddress_permitted
)
| SafeHTTPSConnectionPool |
python | run-llama__llama_index | llama-index-integrations/callbacks/llama-index-callbacks-agentops/llama_index/callbacks/agentops/base.py | {
"start": 8660,
"end": 10212
} | class ____(BaseInstrumentationHandler):
@classmethod
def init(
cls,
api_key: Optional[str] = None,
parent_key: Optional[str] = None,
endpoint: Optional[str] = None,
max_wait_time: Optional[int] = None,
max_queue_size: Optional[int] = None,
tags: Optional[List[str]] = None,
instrument_llm_calls=True,
inherited_session_id: Optional[str] = None,
):
client_params: Dict[str, Any] = {
"api_key": api_key,
"parent_key": parent_key,
"endpoint": endpoint,
"max_wait_time": max_wait_time,
"max_queue_size": max_queue_size,
"tags": tags,
"instrument_llm_calls": instrument_llm_calls,
"auto_start_session": True,
"inherited_session_id": inherited_session_id,
"skip_auto_end_session": False,
}
ao_client = AOClient(
**{k: v for k, v in client_params.items() if v is not None}
)
# Create synchronized span and event handler, attach to root dispatcher
dispatcher = instrument.get_dispatcher()
handler_state = AgentOpsHandlerState()
event_handler = AgentOpsEventHandler(
shared_handler_state=handler_state, ao_client=ao_client
)
span_handler = AgentOpsSpanHandler(
shared_handler_state=handler_state, ao_client=ao_client
)
dispatcher.add_event_handler(event_handler)
dispatcher.add_span_handler(span_handler)
| AgentOpsHandler |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_protect05.py | {
"start": 315,
"end": 1240
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("protect05.xlsx")
def test_create_file(self):
"""Test the a simple XlsxWriter file with worksheet protection."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
unlocked = workbook.add_format({"locked": 0, "hidden": 0})
hidden = workbook.add_format({"locked": 0, "hidden": 1})
worksheet.protect()
worksheet.unprotect_range("=A1")
worksheet.unprotect_range("$C$1:$C$3")
worksheet.unprotect_range("G4:I6", "MyRange")
worksheet.unprotect_range("K7")
worksheet.write("A1", 1)
worksheet.write("A2", 2, unlocked)
worksheet.write("A3", 3, hidden)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | torch/_export/db/examples/dynamic_shape_assert.py | {
"start": 41,
"end": 450
} | class ____(torch.nn.Module):
"""
A basic usage of python assertion.
"""
def forward(self, x):
# assertion with error message
assert x.shape[0] > 2, f"{x.shape[0]} is greater than 2"
# assertion without error message
assert x.shape[0] > 1
return x
example_args = (torch.randn(3, 2),)
tags = {"python.assert"}
model = DynamicShapeAssert()
| DynamicShapeAssert |
python | pydata__xarray | xarray/tests/test_dask.py | {
"start": 2084,
"end": 11573
} | class ____(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_identical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, assert_allclose)
@pytest.fixture(autouse=True)
def setUp(self):
self.values = np.random.default_rng(0).random((4, 6))
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_var = Variable(("x", "y"), self.values)
self.lazy_var = Variable(("x", "y"), self.data)
def test_basics(self):
v = self.lazy_var
assert self.data is v.data
assert self.data.chunks == v.chunks
assert_array_equal(self.values, v)
def test_copy(self):
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True))
def test_chunk(self):
test_cases: list[tuple[int | dict[str, Any], tuple[tuple[int, ...], ...]]] = [
({}, ((2, 2), (2, 2, 2))),
(3, ((3, 1), (3, 3))),
({"x": 3, "y": 3}, ((3, 1), (3, 3))),
({"x": 3}, ((3, 1), (2, 2, 2))),
({"x": (3, 1)}, ((3, 1), (2, 2, 2))),
]
for chunks, expected in test_cases:
rechunked = self.lazy_var.chunk(chunks)
assert rechunked.chunks == expected
self.assertLazyAndIdentical(self.eager_var, rechunked)
expected_chunksizes = dict(zip(self.lazy_var.dims, expected, strict=True))
assert rechunked.chunksizes == expected_chunksizes
def test_indexing(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0], v[0])
self.assertLazyAndIdentical(u[:1], v[:1])
self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
@pytest.mark.parametrize(
"expected_data, index",
[
(da.array([99, 2, 3, 4]), 0),
(da.array([99, 99, 99, 4]), slice(2, None, -1)),
(da.array([99, 99, 3, 99]), [0, -1, 1]),
(da.array([99, 99, 99, 4]), np.arange(3)),
(da.array([1, 99, 99, 99]), [False, True, True, True]),
(da.array([1, 99, 99, 99]), np.array([False, True, True, True])),
(da.array([99, 99, 99, 99]), Variable(("x"), np.array([True] * 4))),
],
)
def test_setitem_dask_array(self, expected_data, index):
arr = Variable(("x"), da.array([1, 2, 3, 4]))
expected = Variable(("x"), expected_data)
with raise_if_dask_computes():
arr[index] = 99
assert_identical(arr, expected)
def test_squeeze(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())
def test_equals(self):
v = self.lazy_var
assert v.equals(v)
assert isinstance(v.data, da.Array)
assert v.identical(v)
assert isinstance(v.data, da.Array)
def test_transpose(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.T, v.T)
def test_shift(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))
self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))
assert v.data.chunks == v.shift(x=1).data.chunks
def test_roll(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))
assert v.data.chunks == v.roll(x=1).data.chunks
def test_unary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(-u, -v)
self.assertLazyAndIdentical(abs(u), abs(v))
self.assertLazyAndIdentical(u.round(), v.round())
def test_binary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(2 * u, 2 * v)
self.assertLazyAndIdentical(u + u, v + v)
self.assertLazyAndIdentical(u[0] + u, v[0] + v)
def test_binary_op_bitshift(self) -> None:
# bit shifts only work on ints so we need to generate
# new eager and lazy vars
rng = np.random.default_rng(0)
values = rng.integers(low=-10000, high=10000, size=(4, 6))
data = da.from_array(values, chunks=(2, 2))
u = Variable(("x", "y"), values)
v = Variable(("x", "y"), data)
self.assertLazyAndIdentical(u << 2, v << 2)
self.assertLazyAndIdentical(u << 5, v << 5)
self.assertLazyAndIdentical(u >> 2, v >> 2)
self.assertLazyAndIdentical(u >> 5, v >> 5)
def test_repr(self):
expected = dedent(
f"""\
<xarray.Variable (x: 4, y: 6)> Size: 192B
{self.lazy_var.data!r}"""
)
assert expected == repr(self.lazy_var)
def test_pickle(self):
# Test that pickling/unpickling does not convert the dask
# backend to numpy
a1 = Variable(["x"], build_dask_array("x"))
a1.compute()
assert not a1._in_memory
assert kernel_call_count == 1
a2 = pickle.loads(pickle.dumps(a1))
assert kernel_call_count == 1
assert_identical(a1, a2)
assert not a1._in_memory
assert not a2._in_memory
def test_reduce(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(u.std(), v.std())
with raise_if_dask_computes():
actual = v.argmax(dim="x")
self.assertLazyAndAllClose(u.argmax(dim="x"), actual)
with raise_if_dask_computes():
actual = v.argmin(dim="x")
self.assertLazyAndAllClose(u.argmin(dim="x"), actual)
self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())
self.assertLazyAndAllClose((u < 1).all("x"), (v < 1).all("x"))
with pytest.raises(NotImplementedError, match=r"only works along an axis"):
v.median()
with pytest.raises(NotImplementedError, match=r"only works along an axis"):
v.median(v.dims)
with raise_if_dask_computes():
v.reduce(duck_array_ops.mean)
def test_missing_values(self):
values = np.array([0, 1, np.nan, 3])
data = da.from_array(values, chunks=(2,))
eager_var = Variable("x", values)
lazy_var = Variable("x", data)
self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
self.assertLazyAndIdentical(Variable("x", range(4)), lazy_var.fillna(2))
self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())
def test_concat(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], "x"))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], "x"))
self.assertLazyAndIdentical(
u[:3], Variable.concat([v[[0, 2]], v[[1]]], "x", positions=[[0, 2], [1]])
)
def test_missing_methods(self):
v = self.lazy_var
with pytest.raises(NotImplementedError, match="dask"):
v.argsort()
with pytest.raises(NotImplementedError, match="dask"):
v[0].item() # type: ignore[attr-defined]
def test_univariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), np.sin(v))
def test_bivariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.maximum(u, 0), np.maximum(v, 0))
self.assertLazyAndAllClose(np.maximum(u, 0), np.maximum(0, v))
def test_univariate_xufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_bivariate_xufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))
def test_compute(self):
u = self.eager_var
v = self.lazy_var
assert dask.is_dask_collection(v)
(v2,) = dask.compute(v + 1)
assert not dask.is_dask_collection(v2)
assert ((u + 1).data == v2.data).all()
def test_persist(self):
u = self.eager_var
v = self.lazy_var + 1
(v2,) = dask.persist(v)
assert v is not v2
assert len(v2.__dask_graph__()) < len(v.__dask_graph__()) # type: ignore[arg-type]
assert v2.__dask_keys__() == v.__dask_keys__()
assert dask.is_dask_collection(v)
assert dask.is_dask_collection(v2)
self.assertLazyAndAllClose(u + 1, v)
self.assertLazyAndAllClose(u + 1, v2)
@requires_pint
def test_tokenize_duck_dask_array(self):
import pint
unit_registry = pint.UnitRegistry()
q = unit_registry.Quantity(self.data, "meter")
variable = xr.Variable(("x", "y"), q)
token = dask.base.tokenize(variable)
post_op = variable + 5 * unit_registry.meter
assert dask.base.tokenize(variable) != dask.base.tokenize(post_op)
# Immutability check
assert dask.base.tokenize(variable) == token
| TestVariable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.