language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 406506,
"end": 432748
} | class ____(StatNode, ParallelNode):
"""
Base class for 'with cython.parallel.parallel():' and 'for i in prange():'.
assignments { Entry(var) : (var.pos, inplace_operator_or_None) }
assignments to variables in this parallel section
parent parent ParallelStatNode or None
is_parallel indicates whether this node is OpenMP parallel
(true for #pragma omp parallel for and
#pragma omp parallel)
is_parallel is true for:
#pragma omp parallel
#pragma omp parallel for
sections, but NOT for
#pragma omp for
We need this to determine the sharing attributes.
privatization_insertion_point a code insertion point used to make temps
private (esp. the "nsteps" temp)
args tuple the arguments passed to the parallel construct
kwargs DictNode the keyword arguments passed to the parallel
construct (replaced by its compile time value)
"""
child_attrs = ['body', 'num_threads', 'threading_condition']
body = None
is_prange = False
is_nested_prange = False
error_label_used = False
num_threads = None
chunksize = None
threading_condition = None
parallel_exc = (
Naming.parallel_exc_type,
Naming.parallel_exc_value,
Naming.parallel_exc_tb,
)
parallel_pos_info = (
Naming.parallel_filename,
Naming.parallel_lineno,
Naming.parallel_clineno,
)
pos_info = (
Naming.filename_cname,
Naming.lineno_cname,
Naming.clineno_cname,
)
# Note that this refers to openmp critical sections, not freethreading
# Python critical sections.
critical_section_counter = 0
def __init__(self, pos, **kwargs):
super().__init__(pos, **kwargs)
# All assignments in this scope
self.assignments = kwargs.get('assignments') or {}
# All seen closure cnames and their temporary cnames
self.seen_closure_vars = set()
# Dict of variables that should be declared (first|last|)private or
# reduction { Entry: op }.
# If op is not None, it's a reduction.
self.privates = {}
# [NameNode]
self.assigned_nodes = []
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.num_threads = None
self.threading_condition = None
if self.kwargs:
# Try to find known keyword arguments.
pairs = []
seen = set()
for dictitem in self.kwargs.key_value_pairs:
if dictitem.key.value in seen:
error(self.pos, "Duplicate keyword argument found: %s" % dictitem.key.value)
seen.add(dictitem.key.value)
if dictitem.key.value == 'num_threads':
if not dictitem.value.is_none:
self.num_threads = dictitem.value
elif dictitem.key.value == 'use_threads_if':
if not dictitem.value.is_none:
self.threading_condition = dictitem.value
elif self.is_prange and dictitem.key.value == 'chunksize':
if not dictitem.value.is_none:
self.chunksize = dictitem.value
else:
pairs.append(dictitem)
self.kwargs.key_value_pairs = pairs
try:
self.kwargs = self.kwargs.compile_time_value(env)
except Exception as e:
error(self.kwargs.pos, "Only compile-time values may be "
"supplied as keyword arguments")
else:
self.kwargs = {}
for kw, val in self.kwargs.items():
if kw not in self.valid_keyword_arguments:
error(self.pos, "Invalid keyword argument: %s" % kw)
else:
setattr(self, kw, val)
def analyse_expressions(self, env):
if self.num_threads:
self.num_threads = self.num_threads.analyse_expressions(env)
if self.threading_condition:
if self.is_parallel:
self.threading_condition = self.threading_condition.analyse_expressions(env)
else:
error(self.pos, "'use_threads_if' must de declared in the parent parallel section")
if self.chunksize:
self.chunksize = self.chunksize.analyse_expressions(env)
self.body = self.body.analyse_expressions(env)
self.analyse_sharing_attributes(env)
if self.num_threads is not None:
if self.parent and self.parent.num_threads is not None and not self.parent.is_prange:
error(self.pos, "num_threads already declared in outer section")
elif self.parent and not self.parent.is_prange:
error(self.pos, "num_threads must be declared in the parent parallel section")
elif (self.num_threads.type.is_int and
self.num_threads.is_literal and
self.num_threads.compile_time_value(env) <= 0):
error(self.pos, "argument to num_threads must be greater than 0")
if not self.num_threads.is_simple() or self.num_threads.type.is_pyobject:
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
return self
def analyse_sharing_attributes(self, env):
"""
Analyse the privates for this block and set them in self.privates.
This should be called in a post-order fashion during the
analyse_expressions phase
"""
for entry, (pos, op) in self.assignments.items():
if self.is_prange and not self.is_parallel:
# closely nested prange in a with parallel block, disallow
# assigning to privates in the with parallel block (we
# consider it too implicit and magicky for users)
if entry in self.parent.assignments:
error(pos, "Cannot assign to private of outer parallel block")
continue
if not self.is_prange and op:
# Again possible, but considered to magicky
error(pos, "Reductions not allowed for parallel blocks")
continue
# By default all variables should have the same values as if
# executed sequentially
self.propagate_var_privatization(entry, pos, op)
def propagate_var_privatization(self, entry, pos, op):
"""
Propagate the sharing attributes of a variable. If the privatization is
determined by a parent scope, done propagate further.
If we are a prange, we propagate our sharing attributes outwards to
other pranges. If we are a prange in parallel block and the parallel
block does not determine the variable private, we propagate to the
parent of the parent. Recursion stops at parallel blocks, as they have
no concept of lastprivate or reduction.
So the following cases propagate:
sum is a reduction for all loops:
for i in prange(n):
for j in prange(n):
for k in prange(n):
sum += i * j * k
sum is a reduction for both loops, local_var is private to the
parallel with block:
for i in prange(n):
with parallel:
local_var = ... # private to the parallel
for j in prange(n):
sum += i * j
Nested with parallel blocks are disallowed, because they wouldn't
allow you to propagate lastprivates or reductions:
#pragma omp parallel for lastprivate(i)
for i in prange(n):
sum = 0
#pragma omp parallel private(j, sum)
with parallel:
#pragma omp parallel
with parallel:
#pragma omp for lastprivate(j) reduction(+:sum)
for j in prange(n):
sum += i
# sum and j are well-defined here
# sum and j are undefined here
# sum and j are undefined here
"""
self.privates[entry] = op
if entry.type.is_memoryviewslice:
error(pos, "Memoryview slices can only be shared in parallel sections")
return
if self.is_prange:
if not self.is_parallel and entry not in self.parent.assignments:
# Parent is a parallel with block
parent = self.parent.parent
else:
parent = self.parent
if parent:
parent.propagate_var_privatization(entry, pos, op)
def _allocate_closure_temp(self, code, entry):
"""
Helper function that allocate a temporary for a closure variable that
is assigned to.
"""
if self.parent:
return self.parent._allocate_closure_temp(code, entry)
if entry.cname in self.seen_closure_vars:
return entry.cname
cname = code.funcstate.allocate_temp(entry.type, True)
# Add both the actual cname and the temp cname, as the actual cname
# will be replaced with the temp cname on the entry
self.seen_closure_vars.add(entry.cname)
self.seen_closure_vars.add(cname)
self.modified_entries.append((entry, entry.cname))
code.putln("%s = %s;" % (cname, entry.cname))
entry.cname = cname
def evaluate_before_block(self, code, expr):
c = self.begin_of_parallel_control_block_point_after_decls
# we need to set the owner to ourselves temporarily, as
# allocate_temp may generate a comment in the middle of our pragma
# otherwise when DebugFlags.debug_temp_code_comments is in effect
owner = c.funcstate.owner
c.funcstate.owner = c
expr.generate_evaluation_code(c)
c.funcstate.owner = owner
return expr.result()
def put_num_threads(self, code):
"""
Write self.num_threads if set as the num_threads OpenMP directive
"""
if self.num_threads is not None:
code.put(" num_threads(%s)" % self.evaluate_before_block(code, self.num_threads))
def declare_closure_privates(self, code):
"""
If a variable is in a scope object, we need to allocate a temp and
assign the value from the temp to the variable in the scope object
after the parallel section. This kind of copying should be done only
in the outermost parallel section.
"""
self.modified_entries = []
for entry in sorted(self.assignments):
if entry.from_closure or entry.in_closure:
self._allocate_closure_temp(code, entry)
def release_closure_privates(self, code):
"""
Release any temps used for variables in scope objects. As this is the
outermost parallel block, we don't need to delete the cnames from
self.seen_closure_vars.
"""
for entry, original_cname in self.modified_entries:
code.putln("%s = %s;" % (original_cname, entry.cname))
code.funcstate.release_temp(entry.cname)
entry.cname = original_cname
def privatize_temps(self, code, exclude_temps=()):
"""
Make any used temporaries private. Before the relevant code block
code.start_collecting_temps() should have been called.
"""
c = self.privatization_insertion_point
self.privatization_insertion_point = None
if self.is_parallel:
self.temps = temps = code.funcstate.stop_collecting_temps()
privates, firstprivates = [], []
for temp, type in sorted(temps):
if type.is_pyobject or type.is_memoryviewslice:
firstprivates.append(temp)
else:
privates.append(temp)
if privates:
c.put(" private(%s)" % ", ".join(privates))
if firstprivates:
c.put(" firstprivate(%s)" % ", ".join(firstprivates))
if self.breaking_label_used:
shared_vars = [Naming.parallel_why]
if self.error_label_used:
shared_vars.extend(self.parallel_exc)
c.globalstate.use_utility_code(
UtilityCode.load_cached(
"SharedInFreeThreading",
"Synchronization.c"))
c.put(f" __Pyx_shared_in_cpython_freethreading({Naming.parallel_freethreading_mutex})")
c.put(" private(%s, %s, %s)" % self.pos_info)
c.put(" shared(%s)" % ', '.join(shared_vars))
def cleanup_temps(self, code):
# Now clean up any memoryview slice and object temporaries
if self.is_parallel and not self.is_nested_prange:
code.putln("/* Clean up any temporaries */")
for temp, type in sorted(self.temps):
code.put_xdecref_clear(temp, type, have_gil=True)
def setup_parallel_control_flow_block(self, code):
"""
Sets up a block that surrounds the parallel block to determine
how the parallel section was exited. Any kind of return is
trapped (break, continue, return, exceptions). This is the idea:
{
int why = 0;
#pragma omp parallel
{
return # -> goto new_return_label;
goto end_parallel;
new_return_label:
why = 3;
goto end_parallel;
end_parallel:;
#pragma omp flush(why) # we need to flush for every iteration
}
if (why == 3)
goto old_return_label;
}
"""
self.old_loop_labels = code.new_loop_labels()
self.old_error_label = code.new_error_label()
self.old_return_label = code.return_label
code.return_label = code.new_label(name="return")
code.begin_block() # parallel control flow block
self.begin_of_parallel_control_block_point = code.insertion_point()
self.begin_of_parallel_control_block_point_after_decls = code.insertion_point()
self.undef_builtin_expect_apple_gcc_bug(code)
def begin_parallel_block(self, code):
"""
Each OpenMP thread in a parallel section that contains a with gil block
must have the thread-state initialized. The call to
PyGILState_Release() then deallocates our threadstate. If we wouldn't
do this, each with gil block would allocate and deallocate one, thereby
losing exception information before it can be saved before leaving the
parallel section.
"""
self.begin_of_parallel_block = code.insertion_point()
def end_parallel_block(self, code):
"""
To ensure all OpenMP threads have thread states, we ensure the GIL
in each thread (which creates a thread state if it doesn't exist),
after which we release the GIL.
On exit, reacquire the GIL and release the thread state.
If compiled without OpenMP support (at the C level), then we still have
to acquire the GIL to decref any object temporaries.
"""
begin_code = self.begin_of_parallel_block
self.begin_of_parallel_block = None
if self.error_label_used:
end_code = code
begin_code.putln("#ifdef _OPENMP")
begin_code.put_ensure_gil(declare_gilstate=True)
begin_code.putln("Py_BEGIN_ALLOW_THREADS")
begin_code.putln("#endif /* _OPENMP */")
end_code.putln("#ifdef _OPENMP")
end_code.putln("Py_END_ALLOW_THREADS")
end_code.putln("#else")
end_code.put_safe("{\n")
end_code.put_ensure_gil()
end_code.putln("#endif /* _OPENMP */")
self.cleanup_temps(end_code)
end_code.put_release_ensured_gil()
end_code.putln("#ifndef _OPENMP")
end_code.put_safe("}\n")
end_code.putln("#endif /* _OPENMP */")
def trap_parallel_exit(self, code, should_flush=False):
"""
Trap any kind of return inside a parallel construct. 'should_flush'
indicates whether the variable should be flushed, which is needed by
prange to skip the loop. It also indicates whether we need to register
a continue (we need this for parallel blocks, but not for prange
loops, as it is a direct jump there).
It uses the same mechanism as try/finally:
1 continue
2 break
3 return
4 error
"""
save_lastprivates_label = code.new_label()
dont_return_label = code.new_label()
self.any_label_used = False
self.breaking_label_used = False
self.error_label_used = False
self.parallel_private_temps = []
all_labels = code.get_all_labels()
# Figure this out before starting to generate any code
for label in all_labels:
if code.label_used(label):
self.breaking_label_used = (self.breaking_label_used or
label != code.continue_label)
self.any_label_used = True
if self.any_label_used:
code.put_goto(dont_return_label)
for i, label in enumerate(all_labels):
if not code.label_used(label):
continue
is_continue_label = label == code.continue_label
code.put_label(label)
if not (should_flush and is_continue_label):
if label == code.error_label:
self.error_label_used = True
self.fetch_parallel_exception(code)
code.putln("%s = %d;" % (Naming.parallel_why, i + 1))
code.put_goto(dont_return_label)
if self.any_label_used:
code.put_label(dont_return_label)
if should_flush and self.breaking_label_used:
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why)
def fetch_parallel_exception(self, code):
"""
As each OpenMP thread may raise an exception, we need to fetch that
exception from the threadstate and save it for after the parallel
section where it can be re-raised in the master thread.
Although it would seem that __pyx_filename, __pyx_lineno and
__pyx_clineno are only assigned to under exception conditions (i.e.,
when we have the GIL), and thus should be allowed to be shared without
any race condition, they are in fact subject to the same race
conditions that they were previously when they were global variables
and functions were allowed to release the GIL:
thread A thread B
acquire
set lineno
release
acquire
set lineno
release
acquire
fetch exception
release
skip the fetch
deallocate threadstate deallocate threadstate
"""
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.put_acquire_freethreading_lock()
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type)
code.putln(
"if (!%s) {" % Naming.parallel_exc_type)
code.putln("__Pyx_ErrFetchWithState(&%s, &%s, &%s);" % self.parallel_exc)
pos_info = chain(*zip(self.parallel_pos_info, self.pos_info))
code.funcstate.uses_error_indicator = True
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_gotref(Naming.parallel_exc_type, py_object_type)
code.putln(
"}")
code.put_release_freethreading_lock()
code.put_release_ensured_gil()
code.end_block()
def restore_parallel_exception(self, code):
"Re-raise a parallel exception"
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.put_acquire_freethreading_lock()
code.put_giveref(Naming.parallel_exc_type, py_object_type)
code.putln("__Pyx_ErrRestoreWithState(%s, %s, %s);" % self.parallel_exc)
pos_info = chain(*zip(self.pos_info, self.parallel_pos_info))
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.put_release_freethreading_lock()
code.put_release_ensured_gil()
code.end_block()
def restore_labels(self, code):
"""
Restore all old labels. Call this before the 'else' clause to for
loops and always before ending the parallel control flow block.
"""
code.set_all_labels(self.old_loop_labels + (self.old_return_label,
self.old_error_label))
def end_parallel_control_flow_block(
self, code, break_=False, continue_=False, return_=False):
"""
This ends the parallel control flow block and based on how the parallel
section was exited, takes the corresponding action. The break_ and
continue_ parameters indicate whether these should be propagated
outwards:
for i in prange(...):
with cython.parallel.parallel():
continue
Here break should be trapped in the parallel block, and propagated to
the for loop.
"""
c = self.begin_of_parallel_control_block_point
self.begin_of_parallel_control_block_point = None
self.begin_of_parallel_control_block_point_after_decls = None
if self.num_threads is not None:
# FIXME: is it the right place? should not normally produce code.
self.num_threads.generate_disposal_code(code)
self.num_threads.free_temps(code)
if c.is_tracing():
# Disable sys monitoring in parallel blocks. It isn't thread safe in either
# Cython or Python.
c.putln("__Pyx_TurnOffSysMonitoringInParallel")
# Firstly, always prefer errors over returning, continue or break
if self.error_label_used:
c.putln("const char *%s = NULL; int %s = 0, %s = 0;" % self.parallel_pos_info)
c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" % self.parallel_exc)
c.putln("#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING")
c.putln(f"PyMutex {Naming.parallel_freethreading_mutex} = {{0}};")
c.putln("#endif")
code.putln(
"if (%s) {" % Naming.parallel_exc_type)
code.putln("/* This may have been overridden by a continue, "
"break or return in another thread. Prefer the error. */")
code.putln("%s = 4;" % Naming.parallel_why)
code.putln(
"}")
if continue_:
any_label_used = self.any_label_used
else:
any_label_used = self.breaking_label_used
if any_label_used:
# __pyx_parallel_why is used, declare and initialize
c.putln("int %s;" % Naming.parallel_why)
c.putln("%s = 0;" % Naming.parallel_why)
code.putln(
"if (%s) {" % Naming.parallel_why)
for temp_cname, private_cname, temp_type in self.parallel_private_temps:
if temp_type.is_cpp_class:
# utility code was loaded earlier
temp_cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % temp_cname
code.putln("%s = %s;" % (private_cname, temp_cname))
code.putln("switch (%s) {" % Naming.parallel_why)
if continue_:
code.put(" case 1: ")
code.put_goto(code.continue_label)
if break_:
code.put(" case 2: ")
code.put_goto(code.break_label)
if return_:
code.put(" case 3: ")
code.put_goto(code.return_label)
if self.error_label_used:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(" case 4:")
self.restore_parallel_exception(code)
code.put_goto(code.error_label)
code.putln("}") # end switch
code.putln(
"}") # end if
code.end_block() # end parallel control flow block
self.redef_builtin_expect_apple_gcc_bug(code)
# FIXME: improve with version number for OS X Lion
buggy_platform_macro_condition = "(defined(__APPLE__) || defined(__OSX__))"
have_expect_condition = "(defined(__GNUC__) && " \
"(__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))"
redef_condition = "(%s && %s)" % (buggy_platform_macro_condition, have_expect_condition)
def undef_builtin_expect_apple_gcc_bug(self, code):
"""
A bug on OS X Lion disallows __builtin_expect macros. This code avoids them
"""
if not self.parent:
code.undef_builtin_expect(self.redef_condition)
def redef_builtin_expect_apple_gcc_bug(self, code):
if not self.parent:
code.redef_builtin_expect(self.redef_condition)
def _parameters_nogil_check(self, env, names, nodes):
for name, node in zip(names, nodes):
if node is not None and node.type.is_pyobject:
error(node.pos, "%s may not be a Python object "
"as we don't have the GIL" % name)
| ParallelStatNode |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 4250,
"end": 7576
} | class ____:
"""Descriptor for getting and setting scalar properties. Scalars are numeric values with a unit, e.g. "50vh"."""
def __init__(
self,
units: set[Unit] | None = None,
percent_unit: Unit = Unit.WIDTH,
allow_auto: bool = True,
) -> None:
self.units: set[Unit] = units or {*UNIT_SYMBOL}
self.percent_unit = percent_unit
self.allow_auto = allow_auto
super().__init__()
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.name = name
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> Scalar | None:
"""Get the scalar property.
Args:
obj: The ``Styles`` object.
objtype: The ``Styles`` class.
Returns:
The Scalar object or ``None`` if it's not set.
"""
return obj.get_rule(self.name) # type: ignore[return-value]
def __set__(
self, obj: StylesBase, value: float | int | Scalar | str | None
) -> None:
"""Set the scalar property.
Args:
obj: The ``Styles`` object.
value: The value to set the scalar property to.
You can directly pass a float or int value, which will be interpreted with
a default unit of Cells. You may also provide a string such as ``"50%"``,
as you might do when writing CSS. If a string with no units is supplied,
Cells will be used as the unit. Alternatively, you can directly supply
a ``Scalar`` object.
Raises:
StyleValueError: If the value is of an invalid type, uses an invalid unit, or
cannot be parsed for any other reason.
"""
_rich_traceback_omit = True
if value is None:
obj.clear_rule(self.name)
obj.refresh(layout=True)
return
if isinstance(value, (int, float)):
new_value = Scalar(float(value), Unit.CELLS, Unit.WIDTH)
elif isinstance(value, Scalar):
new_value = value
elif isinstance(value, str):
try:
new_value = Scalar.parse(value)
except ScalarParseError:
raise StyleValueError(
f"unable to parse scalar from {value!r}",
help_text=scalar_help_text(
property_name=self.name, context="inline"
),
)
else:
raise StyleValueError("expected float, int, Scalar, or None")
if (
new_value is not None
and new_value.unit == Unit.AUTO
and not self.allow_auto
):
raise StyleValueError("'auto' not allowed here")
if new_value is not None and new_value.unit != Unit.AUTO:
if new_value.unit not in self.units:
raise StyleValueError(
f"{self.name} units must be one of {friendly_list(get_symbols(self.units))}"
)
if new_value.is_percent:
new_value = Scalar(
float(new_value.value), self.percent_unit, Unit.WIDTH
)
if obj.set_rule(self.name, new_value):
obj.refresh(layout=True)
| ScalarProperty |
python | tensorflow__tensorflow | tensorflow/python/ops/structured/structured_tensor_test.py | {
"start": 2538,
"end": 3183
} | class ____(extension_type.ExtensionType):
ragged: ragged_tensor.RaggedTensor
@dispatch.dispatch_for_types(array_ops.shape_v2, _PrivateBrokenType)
def shape_v2_broken(
input: _PrivateBrokenType, # pylint: disable=redefined-builtin
out_type: dtypes.DType = None,
name: Optional[str] = None) -> DynamicRaggedShape:
"""Returns a DynamicRaggedShape containing the shape of the input."""
del name
del input
del out_type
return {
"foo": "This is not a shape",
"bar": "But if I put a string here, it becomes a vector"
}
# pylint: disable=g-long-lambda
@test_util.run_all_in_graph_and_eager_modes
| _PrivateBrokenType |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail_baseConfig.py | {
"start": 171,
"end": 788
} | class ____(BaseModel):
x: int
y: str
def method(self) -> None:
pass
class Config:
alias_generator = None
frozen = True
extra = Extra.forbid
def config_method(self) -> None:
...
model = Model(x=1, y='y', z='z')
# MYPY: error: Unexpected keyword argument "z" for "Model" [call-arg]
model = Model(x=1)
# MYPY: error: Missing named argument "y" for "Model" [call-arg]
model.y = 'a'
# MYPY: error: Property "y" defined in "Model" is read-only [misc]
Model.from_orm({})
# MYPY: error: "Model" does not have from_attributes=True [pydantic-orm]
| Model |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/data_version.py | {
"start": 1638,
"end": 2606
} | class ____(
NamedTuple(
"_DataVersionsByPartition", [("data_versions_by_partition", Mapping[str, DataVersion])]
)
):
def __new__(
cls,
data_versions_by_partition: Mapping[str, Union[str, DataVersion]],
):
check.dict_param(
data_versions_by_partition,
"data_versions_by_partition",
key_type=str,
value_type=(str, DataVersion),
)
return super().__new__(
cls,
data_versions_by_partition={
partition: DataVersion(version) if isinstance(version, str) else version
for partition, version in data_versions_by_partition.items()
},
)
DEFAULT_DATA_VERSION: Final[DataVersion] = DataVersion("INITIAL")
NULL_DATA_VERSION: Final[DataVersion] = DataVersion("NULL")
UNKNOWN_DATA_VERSION: Final[DataVersion] = DataVersion("UNKNOWN")
NULL_EVENT_POINTER: Final[str] = "NULL"
| DataVersionsByPartition |
python | getsentry__sentry | src/sentry/utils/snuba.py | {
"start": 15463,
"end": 15729
} | class ____(QueryExecutionError):
"""
This query has resulted in needing to check multiple datasets in a way
that is not currently handled, clickhouse errors with data being compressed
by different methods when this happens
"""
| DatasetSelectionError |
python | pypa__warehouse | warehouse/observations/models.py | {
"start": 5431,
"end": 7946
} | class ____:
"""
A mixin for models that can have Observations.
Since Observations require a User to link to as the creator,
any code using `record_observation()` will need to pass a
`request` object that has a `user` attribute.
For Views, when using `@view_config(..., uses_session=True)`,
Usage:
some_model.record_observation(...)
some_model.observations # a list of Observation objects
"""
Observation: typing.ClassVar[type]
@declared_attr
def observations(cls): # noqa: N805
cls.Observation = type(
f"{cls.__name__}Observation",
(Observation, db.Model),
dict(
__tablename__=f"{cls.__name__.lower()}_observations",
__mapper_args__={
"polymorphic_identity": cls.__name__.lower(),
"concrete": True,
},
related_id=mapped_column(
PG_UUID,
ForeignKey(f"{cls.__tablename__}.id"),
comment="The ID of the related model",
nullable=True,
index=True,
),
related=relationship(cls, back_populates="observations"),
related_name=mapped_column(
String,
comment="The name of the related model",
nullable=False,
),
observer_id=mapped_column(
PG_UUID,
ForeignKey("observers.id"),
comment="ID of the Observer who created the Observation",
nullable=False,
),
observer=relationship(Observer),
),
)
return relationship(cls.Observation)
def record_observation(
self,
*,
request: Request,
kind: ObservationKind,
actor: User, # TODO: Expand type as we add more HasObserver models
summary: str,
payload: dict,
):
"""
Record an observation on the related model.
"""
if actor.observer is None:
actor.observer = Observer()
observation = self.Observation(
kind=kind.value[0],
observer=actor.observer,
payload=payload,
related=self,
related_name=repr(self),
summary=summary,
)
request.db.add(observation)
return observation
| HasObservations |
python | google__jax | jax/_src/pallas/mosaic/core.py | {
"start": 7081,
"end": 7782
} | class ____(enum.Enum):
REGULAR = "regular"
DMA = "dma"
BARRIER = "barrier"
def __call__(self, shape: tuple[int, ...]):
dtype: Any
if self == SemaphoreType.DMA:
dtype = DMASemaphore()
elif self == SemaphoreType.BARRIER:
dtype = pallas_core.BarrierSemaphore()
else:
dtype = pallas_core.Semaphore()
return pallas_core.MemoryRef(jax_core.ShapedArray(shape, dtype),
MemorySpace.SEMAPHORE)
def get_array_aval(self) -> pallas_core.ShapedArrayWithMemorySpace:
return self(()).get_array_aval()
def get_ref_aval(self) -> state.AbstractRef:
return self(()).get_ref_aval()
@dataclasses.dataclass(frozen=True)
| SemaphoreType |
python | matplotlib__matplotlib | lib/matplotlib/_docstring.py | {
"start": 2105,
"end": 2576
} | class ____(dict):
def __missing__(self, key):
if not key.endswith(":kwdoc"):
raise KeyError(key)
name = key[:-len(":kwdoc")]
from matplotlib.artist import Artist, kwdoc
try:
cls, = (cls for cls in _api.recursive_subclasses(Artist)
if cls.__name__ == name)
except ValueError as e:
raise KeyError(key) from e
return self.setdefault(key, kwdoc(cls))
| _ArtistKwdocLoader |
python | spyder-ide__spyder | spyder/plugins/run/widgets.py | {
"start": 2232,
"end": 5138
} | class ____(QDialog):
"""Run configuration dialog box, base widget"""
size_change = Signal(QSize)
def __init__(self, parent=None, disable_run_btn=False):
QDialog.__init__(self, parent)
self.setWindowFlags(
self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
# (e.g. the editor's analysis thread in Spyder), thus leading to
# a segmentation fault on UNIX or an application crash on Windows
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowIcon(ima.icon('run_settings'))
layout = QVBoxLayout()
self.setLayout(layout)
self.disable_run_btn = disable_run_btn
# Style that will be set by children
self._css = qstylizer.style.StyleSheet()
def add_widgets(self, *widgets_or_spacings):
"""Add widgets/spacing to dialog vertical layout"""
layout = self.layout()
for widget_or_spacing in widgets_or_spacings:
if isinstance(widget_or_spacing, int):
layout.addSpacing(widget_or_spacing)
elif isinstance(widget_or_spacing, QLayout):
layout.addLayout(widget_or_spacing)
else:
layout.addWidget(widget_or_spacing)
return layout
def add_button_box(self, stdbtns):
"""Create dialog button box and add it to the dialog layout"""
self.bbox = SpyderDialogButtonBox(stdbtns)
if not self.disable_run_btn:
run_btn = self.bbox.addButton(
_("Run"), QDialogButtonBox.ActionRole)
run_btn.clicked.connect(self.run_btn_clicked)
reset_deafults_btn = self.bbox.addButton(
_('Reset'), QDialogButtonBox.ResetRole)
reset_deafults_btn.clicked.connect(self.reset_btn_clicked)
# Align this button to the text above it
reset_deafults_btn.setStyleSheet("margin-left: 5px")
self.bbox.accepted.connect(self.accept)
self.bbox.rejected.connect(self.reject)
self.layout().addWidget(self.bbox)
def resizeEvent(self, event):
"""
Reimplement Qt method to be able to save the widget's size from the
main application
"""
QDialog.resizeEvent(self, event)
self.size_change.emit(self.size())
def run_btn_clicked(self):
"""Run button was just clicked"""
pass
def reset_btn_clicked(self):
"""Reset button was clicked."""
pass
def ok_btn_clicked(self):
"""Ok button was clicked."""
pass
def setup(self):
"""Setup Run Configuration dialog with filename *fname*"""
raise NotImplementedError
# ---- Dialogs
# -----------------------------------------------------------------------------
| BaseRunConfigDialog |
python | walkccc__LeetCode | solutions/2858. Minimum Edge Reversals So Every Node Is Reachable/2858.py | {
"start": 0,
"end": 904
} | class ____:
def minEdgeReversals(self, n: int, edges: list[list[int]]) -> list[int]:
graph = [[] for _ in range(n)]
for u, v in edges:
graph[u].append((v, True)) # 1 means (u -> v)
graph[v].append((u, False)) # 0 means (v <- u)
seen = {0}
@functools.lru_cache(None)
def dp(u: int) -> int:
"""
Returns the minimum number of edge reversals so node u can reach every
node in its subtree.
"""
res = 0
for v, isForward in graph[u]:
if v in seen:
continue
seen.add(v)
res += dp(v) + (0 if isForward else 1)
return res
ans = [0] * n
ans[0] = dp(0)
def dfs(u: int) -> None:
for v, isForward in graph[u]:
if v in seen:
continue
seen.add(v)
ans[v] = ans[u] + (1 if isForward else -1)
dfs(v)
seen = {0}
dfs(0)
return ans
| Solution |
python | encode__django-rest-framework | rest_framework/validators.py | {
"start": 12010,
"end": 12604
} | class ____(BaseUniqueForValidator):
message = _('This field must be unique for the "{date_field}" date.')
def filter_queryset(self, attrs, queryset, field_name, date_field_name):
value = attrs[self.field]
date = attrs[self.date_field]
filter_kwargs = {}
filter_kwargs[field_name] = value
filter_kwargs['%s__day' % date_field_name] = date.day
filter_kwargs['%s__month' % date_field_name] = date.month
filter_kwargs['%s__year' % date_field_name] = date.year
return qs_filter(queryset, **filter_kwargs)
| UniqueForDateValidator |
python | sdispater__pendulum | src/pendulum/locales/locale.py | {
"start": 220,
"end": 2837
} | class ____:
"""
Represent a specific locale.
"""
_cache: ClassVar[dict[str, Locale]] = {}
def __init__(self, locale: str, data: Any) -> None:
self._locale: str = locale
self._data: Any = data
self._key_cache: dict[str, str] = {}
@classmethod
def load(cls, locale: str | Locale) -> Locale:
if isinstance(locale, Locale):
return locale
locale = cls.normalize_locale(locale)
if locale in cls._cache:
return cls._cache[locale]
# Checking locale existence
actual_locale = locale
locale_path = cast(Path, resources.files(__package__).joinpath(actual_locale))
while not locale_path.exists():
if actual_locale == locale:
raise ValueError(f"Locale [{locale}] does not exist.")
actual_locale = actual_locale.split("_")[0]
m = import_module(f"pendulum.locales.{actual_locale}.locale")
cls._cache[locale] = cls(locale, m.locale)
return cls._cache[locale]
@classmethod
def normalize_locale(cls, locale: str) -> str:
m = re.fullmatch("([a-z]{2})[-_]([a-z]{2})", locale, re.I)
if m:
return f"{m.group(1).lower()}_{m.group(2).lower()}"
else:
return locale.lower()
def get(self, key: str, default: Any | None = None) -> Any:
if key in self._key_cache:
return self._key_cache[key]
parts = key.split(".")
try:
result = self._data[parts[0]]
for part in parts[1:]:
result = result[part]
except KeyError:
result = default
self._key_cache[key] = result
return self._key_cache[key]
def translation(self, key: str) -> Any:
return self.get(f"translations.{key}")
def plural(self, number: int) -> str:
return cast(str, self._data["plural"](number))
def ordinal(self, number: int) -> str:
return cast(str, self._data["ordinal"](number))
def ordinalize(self, number: int) -> str:
ordinal = self.get(f"custom.ordinal.{self.ordinal(number)}")
if not ordinal:
return f"{number}"
return f"{number}{ordinal}"
def match_translation(self, key: str, value: Any) -> dict[str, str] | None:
translations = self.translation(key)
if value not in translations.values():
return None
return cast(Dict[str, str], {v: k for k, v in translations.items()}[value])
def __repr__(self) -> str:
return f"{self.__class__.__name__}('{self._locale}')"
| Locale |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-snowflake-cortex/destination_snowflake_cortex/cortex_processor.py | {
"start": 2929,
"end": 3690
} | class ____(SQLTypeConverter):
"""A class to convert types for Snowflake."""
@overrides
def to_sql_type(
self,
json_schema_property_def: dict[str, str | dict | list],
) -> sqlalchemy.types.TypeEngine:
"""Convert a value to a SQL type.
We first call the parent class method to get the type. Then if the type JSON, we
replace it with VARIANT.
"""
sql_type = super().to_sql_type(json_schema_property_def)
if isinstance(sql_type, sqlalchemy.types.JSON):
return VARIANT()
return sql_type
@staticmethod
def get_json_type() -> sqlalchemy.types.TypeEngine:
"""Get the type to use for nested JSON data."""
return VARIANT()
| SnowflakeTypeConverter |
python | doocs__leetcode | solution/0900-0999/0934.Shortest Bridge/Solution.py | {
"start": 0,
"end": 973
} | class ____:
def shortestBridge(self, grid: List[List[int]]) -> int:
def dfs(i, j):
q.append((i, j))
grid[i][j] = 2
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < n and 0 <= y < n and grid[x][y] == 1:
dfs(x, y)
n = len(grid)
dirs = (-1, 0, 1, 0, -1)
q = deque()
i, j = next((i, j) for i in range(n) for j in range(n) if grid[i][j])
dfs(i, j)
ans = 0
while 1:
for _ in range(len(q)):
i, j = q.popleft()
for a, b in pairwise(dirs):
x, y = i + a, j + b
if 0 <= x < n and 0 <= y < n:
if grid[x][y] == 1:
return ans
if grid[x][y] == 0:
grid[x][y] = 2
q.append((x, y))
ans += 1
| Solution |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/ppo/optimizer_torch.py | {
"start": 818,
"end": 1170
} | class ____(OnPolicyHyperparamSettings):
beta: float = 5.0e-3
epsilon: float = 0.2
lambd: float = 0.95
num_epoch: int = 3
shared_critic: bool = False
learning_rate_schedule: ScheduleType = ScheduleType.LINEAR
beta_schedule: ScheduleType = ScheduleType.LINEAR
epsilon_schedule: ScheduleType = ScheduleType.LINEAR
| PPOSettings |
python | kamyu104__LeetCode-Solutions | Python/powx-n.py | {
"start": 32,
"end": 439
} | class ____(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
result = 1
abs_n = abs(n)
while abs_n:
if abs_n & 1:
result *= x
abs_n >>= 1
x *= x
return 1 / result if n < 0 else result
# Time: O(logn)
# Space: O(logn)
# Recursive solution.
| Solution |
python | kamyu104__LeetCode-Solutions | Python/maximum-profit-from-trading-stocks-with-discounts.py | {
"start": 83,
"end": 2474
} | class ____(object):
def maxProfit(self, n, present, future, hierarchy, budget):
"""
:type n: int
:type present: List[int]
:type future: List[int]
:type hierarchy: List[List[int]]
:type budget: int
:rtype: int
"""
def iter_dfs():
ret = []
stk = [(1, (0, ret))]
while stk:
step, args = stk.pop()
if step == 1:
u, ret = args
ret[:] = [collections.defaultdict(int) for _ in xrange(2)]
ret[0][0] = ret[1][0] = 0
stk.append((4, (u, ret)))
stk.append((2, (u, 0, ret)))
elif step == 2:
u, i, ret = args
if i == len(adj[u]):
continue
v = adj[u][i]
stk.append((2, (u, i+1, ret)))
new_ret = []
stk.append((3, (new_ret, ret)))
stk.append((1, (v, new_ret)))
elif step == 3:
new_ret, ret = args
for i in xrange(2):
for j1, v1 in ret[i].items():
for j2, v2 in new_ret[i].iteritems():
if j1+j2 <= budget:
ret[i][j1+j2] = max(ret[i][j1+j2], v1+v2)
elif step == 4:
u, ret = args
new_ret = [collections.defaultdict(int) for _ in xrange(2)]
for i in xrange(2):
for j, v in ret[0].iteritems():
new_ret[i][j] = max(new_ret[i][j], v)
cost = present[u]>>i
if cost > budget:
continue
profit = future[u]-cost
for j, v in ret[1].iteritems():
if j+cost <= budget:
new_ret[i][j+cost] = max(new_ret[i][j+cost], v+profit)
ret[:] = new_ret
return max(ret[0].itervalues())
adj = [[] for _ in xrange(n)]
for u, v in hierarchy:
adj[u-1].append(v-1)
return iter_dfs()
# Time: O(n * b)
# Space: O(n + b)
import collections
# dfs, tree dp
| Solution |
python | getsentry__sentry | tests/sentry/snuba/test_transactions.py | {
"start": 1006,
"end": 108200
} | class ____(SnubaTestCase, TestCase):
def setUp(self) -> None:
super().setUp()
self.environment = self.create_environment(self.project, name="prod")
self.release = self.create_release(self.project, version="first-release")
self.now = before_now()
self.one_min_ago = before_now(minutes=1)
self.two_min_ago = before_now(minutes=2)
self.event_time = self.one_min_ago
# error event
data = load_data("javascript")
data["timestamp"] = before_now(minutes=10).isoformat()
self.store_event(data=data, project_id=self.project.id)
# transaction event
data = load_data("transaction", timestamp=self.event_time)
data["transaction"] = "a" * 32
data["user"] = {"id": "99", "email": "bruce@example.com", "username": "brucew"}
data["release"] = "first-release"
data["environment"] = self.environment.name
data["tags"] = [["key1", "value1"]]
self.event = self.store_event(data=data, project_id=self.project.id)
self.snuba_params = SnubaParams(
organization=self.organization,
projects=[self.project],
start=before_now(days=1),
end=self.now,
)
def test_transaction_query(self) -> None:
result = transactions.query(
selected_columns=["transaction"],
query="",
snuba_params=self.snuba_params,
referrer="test_transactions_query",
)
data = result["data"]
assert len(data) == 1
assert data[0] == {"transaction": "a" * 32}
def test_error_event_type_query(self) -> None:
results = transactions.query(
selected_columns=["count()", "any(transaction)", "any(user.id)"],
query="event.type:error",
snuba_params=SnubaParams(
start=before_now(minutes=5),
end=before_now(seconds=1),
projects=[self.project],
),
referrer="discover",
use_aggregate_conditions=True,
)
data = results["data"]
assert data[0]["count"] == 0
def test_any_function(self) -> None:
results = transactions.query(
selected_columns=["count()", "any(transaction)", "any(user.id)"],
query="event.type:transaction",
snuba_params=SnubaParams(
start=before_now(minutes=5),
end=before_now(seconds=1),
projects=[self.project],
),
referrer="discover",
use_aggregate_conditions=True,
)
data = results["data"]
assert len(data) == 1
assert data[0]["any_transaction"] == "a" * 32
assert data[0]["any_user_id"] == "99"
assert data[0]["count"] == 1
def test_auto_fields_aggregates(self) -> None:
result = transactions.query(
selected_columns=["count_unique(user.email)"],
referrer="discover",
query="",
snuba_params=self.snuba_params,
auto_fields=True,
)
data = result["data"]
assert len(data) == 1
assert data[0]["count_unique_user_email"] == 1
def test_auto_fields_simple_fields(self) -> None:
result = transactions.query(
selected_columns=["user.email", "release"],
referrer="discover",
query="",
snuba_params=self.snuba_params,
auto_fields=True,
)
data = result["data"]
assert len(data) == 1
assert data[0]["id"] == self.event.event_id
assert data[0]["user.email"] == "bruce@example.com"
assert data[0]["release"] == "first-release"
assert data[0]["project.name"] == self.project.slug
assert len(result["meta"]["fields"]) == 4
assert result["meta"]["fields"] == {
"user.email": "string",
"release": "string",
"id": "string",
"project.name": "string",
}
def test_conditional_filter(self) -> None:
project2 = self.create_project(organization=self.organization)
project3 = self.create_project(organization=self.organization)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "b" * 32
self.event = self.store_event(data=data, project_id=project2.id)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "c" * 32
self.event = self.store_event(data=data, project_id=project3.id)
result = transactions.query(
selected_columns=["project", "transaction"],
query=f"project:{self.project.slug} OR project:{project2.slug}",
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project, project2],
),
orderby=["transaction"],
referrer="discover",
)
data = result["data"]
assert len(data) == 2
assert data[0]["project"] == self.project.slug
assert data[1]["project"] == project2.slug
def test_nested_conditional_filter(self) -> None:
project2 = self.create_project(organization=self.organization)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["release"] = "a" * 32
self.event = self.store_event(data=data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["release"] = "b" * 32
self.event = self.store_event(data=data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["release"] = "c" * 32
self.event = self.store_event(data=data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["release"] = "a" * 32
self.event = self.store_event(data=data, project_id=project2.id)
result = transactions.query(
selected_columns=["release"],
query="(release:{} OR release:{}) AND project:{}".format(
"a" * 32, "b" * 32, self.project.slug
),
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project, project2],
),
orderby=["release"],
referrer="discover",
)
data = result["data"]
assert len(data) == 2
assert data[0]["release"] == "a" * 32
assert data[1]["release"] == "b" * 32
def test_environment_condition(self) -> None:
result = transactions.query(
selected_columns=["id", "message"],
query=f"environment:{self.create_environment(self.project).name}",
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(result["data"]) == 0
result = transactions.query(
selected_columns=["id", "message"],
query=f"environment:{self.environment.name}",
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(result["data"]) == 1
data = result["data"]
assert data[0]["id"] == self.event.event_id
assert data[0]["message"] == "a" * 32
def test_field_alias_with_component(self) -> None:
result = transactions.query(
selected_columns=["project.id", "user", "user.email"],
query="",
snuba_params=self.snuba_params,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["user"] == "id:99"
assert data[0]["user.email"] == "bruce@example.com"
assert len(result["meta"]["fields"]) == 3
assert result["meta"]["fields"] == {
"project.id": "integer",
"user": "string",
"user.email": "string",
}
def test_field_aliasing_in_aggregate_functions_and_groupby(self) -> None:
result = transactions.query(
selected_columns=["project.id", "count_unique(user.email)"],
query="",
snuba_params=self.snuba_params,
auto_fields=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["count_unique_user_email"] == 1
def test_field_aliasing_in_conditions(self) -> None:
result = transactions.query(
selected_columns=["project.id", "user.email"],
query="user.email:bruce@example.com",
snuba_params=self.snuba_params,
referrer="discover",
auto_fields=True,
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["user.email"] == "bruce@example.com"
def test_field_aliasing_in_selected_columns(self) -> None:
result = transactions.query(
selected_columns=["project.id", "user", "release", "timestamp.to_hour"],
query="",
snuba_params=self.snuba_params,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["user"] == "id:99"
assert data[0]["release"] == "first-release"
event_hour = self.event_time.replace(minute=0, second=0, microsecond=0)
assert data[0]["timestamp.to_hour"] == event_hour.isoformat()
assert len(result["meta"]["fields"]) == 4
assert result["meta"]["fields"] == {
"project.id": "integer",
"user": "string",
"release": "string",
"timestamp.to_hour": "date",
}
def test_latest_release_condition(self) -> None:
result = transactions.query(
selected_columns=["id", "message"],
query="release:latest",
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(result["data"]) == 1
data = result["data"]
assert data[0]["id"] == self.event.event_id
assert data[0]["message"] == self.event.transaction
assert "event_id" not in data[0]
def test_message_filter(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "oh yeah"
self.event = self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "oh no"
self.event = self.store_event(data, project_id=self.project.id)
tests: list[tuple[str, list[str]]] = [
('message:"oh no"', ["oh no"]),
('message:"oh yeah"', ["oh yeah"]),
('message:""', []),
("has:message", ["a" * 32, "oh no", "oh yeah"]),
("!has:message", []),
("message:oh*", ["oh no", "oh yeah"]),
('message:"oh *"', ["oh no", "oh yeah"]),
('message:["oh meh"]', []),
('message:["oh yeah"]', ["oh yeah"]),
('message:["oh yeah", "oh no"]', ["oh no", "oh yeah"]),
]
for query, expected in tests:
result = transactions.query(
selected_columns=["message"],
query=query,
snuba_params=self.snuba_params,
orderby=["message"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected)
assert [item["message"] for item in data] == expected
def test_release_condition(self) -> None:
result = transactions.query(
selected_columns=["id", "message"],
query=f"release:{self.create_release(self.project).version}",
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(result["data"]) == 0
result = transactions.query(
selected_columns=["id", "message"],
query=f"release:{self.release.version}",
snuba_params=self.snuba_params,
referrer="discover",
)
assert len(result["data"]) == 1
data = result["data"]
assert data[0]["id"] == self.event.event_id
assert data[0]["message"] == self.event.transaction
assert "event_id" not in data[0]
def test_semver_condition(self) -> None:
release_1 = self.create_release(version="test@1.2.3")
release_2 = self.create_release(version="test@1.2.4")
release_3 = self.create_release(version="test@1.2.5")
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_1.version
release_1_e_1 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_1.version
release_1_e_2 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_2.version
release_2_e_1 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_2.version
release_2_e_2 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_3.version
release_3_e_1 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_3.version
release_3_e_2 = self.store_event(data, project_id=self.project.id).event_id
result = transactions.query(
selected_columns=["id"],
query=f"{SEMVER_ALIAS}:>1.2.3",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
release_2_e_1,
release_2_e_2,
release_3_e_1,
release_3_e_2,
}
result = transactions.query(
selected_columns=["id"],
query=f"{SEMVER_ALIAS}:>=1.2.3",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
release_1_e_1,
release_1_e_2,
release_2_e_1,
release_2_e_2,
release_3_e_1,
release_3_e_2,
}
result = transactions.query(
selected_columns=["id"],
query=f"{SEMVER_ALIAS}:<1.2.4",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {release_1_e_1, release_1_e_2}
result = transactions.query(
selected_columns=["id"],
query=f"!{SEMVER_ALIAS}:1.2.3",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
self.event.event_id,
release_2_e_1,
release_2_e_2,
release_3_e_1,
release_3_e_2,
}
def test_release_stage_condition(self) -> None:
replaced_release = self.create_release(
version="replaced_release",
environments=[self.environment],
adopted=timezone.now(),
unadopted=timezone.now(),
)
adopted_release = self.create_release(
version="adopted_release",
environments=[self.environment],
adopted=timezone.now(),
)
self.create_release(version="not_adopted_release", environments=[self.environment])
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = adopted_release.version
data["environment"] = self.environment.name
adopted_release_e_1 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = adopted_release.version
data["environment"] = self.environment.name
adopted_release_e_2 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = replaced_release.version
data["environment"] = self.environment.name
replaced_release_e_1 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = replaced_release.version
data["environment"] = self.environment.name
replaced_release_e_2 = self.store_event(data, project_id=self.project.id).event_id
self.snuba_params.environments = [self.environment]
result = transactions.query(
selected_columns=["id"],
query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.ADOPTED.value}",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
adopted_release_e_1,
adopted_release_e_2,
}
result = transactions.query(
selected_columns=["id"],
query=f"!{RELEASE_STAGE_ALIAS}:{ReleaseStages.LOW_ADOPTION.value}",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
adopted_release_e_1,
adopted_release_e_2,
replaced_release_e_1,
replaced_release_e_2,
}
result = transactions.query(
selected_columns=["id"],
query=f"{RELEASE_STAGE_ALIAS}:[{ReleaseStages.ADOPTED.value}, {ReleaseStages.REPLACED.value}]",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
adopted_release_e_1,
adopted_release_e_2,
replaced_release_e_1,
replaced_release_e_2,
}
def test_semver_package_condition(self) -> None:
release_1 = self.create_release(version="test@1.2.3")
release_2 = self.create_release(version="test2@1.2.4")
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_1.version
data["environment"] = self.environment.name
release_1_e_1 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_1.version
data["environment"] = self.environment.name
release_1_e_2 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_2.version
data["environment"] = self.environment.name
release_2_e_1 = self.store_event(data, project_id=self.project.id).event_id
result = transactions.query(
selected_columns=["id"],
referrer="discover",
query=f"{SEMVER_PACKAGE_ALIAS}:test",
snuba_params=self.snuba_params,
)
assert {r["id"] for r in result["data"]} == {
release_1_e_1,
release_1_e_2,
}
result = transactions.query(
selected_columns=["id"],
query=f"{SEMVER_PACKAGE_ALIAS}:test2",
referrer="discover",
snuba_params=self.snuba_params,
)
assert {r["id"] for r in result["data"]} == {
release_2_e_1,
}
def test_semver_build_condition(self) -> None:
release_1 = self.create_release(version="test@1.2.3+123")
release_2 = self.create_release(version="test2@1.2.4+124")
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_1.version
data["environment"] = self.environment.name
release_1_e_1 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_1.version
data["environment"] = self.environment.name
release_1_e_2 = self.store_event(data, project_id=self.project.id).event_id
data = load_data("transaction", timestamp=before_now(minutes=1))
data["release"] = release_2.version
data["environment"] = self.environment.name
release_2_e_1 = self.store_event(data, project_id=self.project.id).event_id
result = transactions.query(
selected_columns=["id"],
query=f"{SEMVER_BUILD_ALIAS}:123",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
release_1_e_1,
release_1_e_2,
}
result = transactions.query(
selected_columns=["id"],
query=f"{SEMVER_BUILD_ALIAS}:124",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {
release_2_e_1,
}
result = transactions.query(
selected_columns=["id"],
query=f"{SEMVER_BUILD_ALIAS}:>=123",
snuba_params=self.snuba_params,
referrer="discover",
)
assert {r["id"] for r in result["data"]} == {release_1_e_1, release_1_e_2, release_2_e_1}
def test_message_orderby(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "oh yeah"
self.event = self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "oh no"
self.event = self.store_event(data, project_id=self.project.id)
tests = [
("message", ["a" * 32, "oh no", "oh yeah"]),
(
"-message",
[
"oh yeah",
"oh no",
"a" * 32,
],
),
]
for orderby, expected in tests:
result = transactions.query(
selected_columns=["message"],
query="",
snuba_params=self.snuba_params,
orderby=[orderby],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 3
assert [item["message"] for item in data] == expected
def test_missing_project(self) -> None:
projects = []
other_project = None
for project_name in ["a" * 32, "z" * 32, "m" * 32]:
other_project = self.create_project(organization=self.organization, slug=project_name)
projects.append(other_project)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "ohh no"
self.event = self.store_event(data, project_id=other_project.id)
self.snuba_params.projects = projects
# delete the last project so its missing
if other_project is not None:
other_project.delete()
result = transactions.query(
selected_columns=["message", "project"],
query="",
snuba_params=self.snuba_params,
orderby=["project"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 2
assert [item["project"] for item in data] == ["a" * 32, "z" * 32]
def test_offsets(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "hello1"
self.event = self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "hello2"
self.event = self.store_event(data, project_id=self.project.id)
result = transactions.query(
selected_columns=["message"],
query="",
snuba_params=self.snuba_params,
orderby=["message"],
limit=1,
offset=2,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
# because we're ording by `message`, and offset by 2, the message should be `hello2`
# order would be a * 32, hello1, hello2
assert data[0]["message"] == "hello2"
def test_orderby_field_alias(self) -> None:
events = (
("a" * 32, "ok", False),
("b" * 32, "already_exists", True),
("c" * 32, "aborted", None),
)
for event in events:
data = load_data("transaction", timestamp=before_now(minutes=10))
data["event_id"] = event[0]
data["transaction"] = event[0]
data["contexts"]["trace"]["status"] = event[1]
self.store_event(data=data, project_id=self.project.id)
queries = [
("transaction.status", [0, 6, 10]),
("transaction.status", [0, 6, 10]),
("-transaction.status", [10, 6, 0]),
("-transaction.status", [10, 6, 0]),
]
for orderby, expected in queries:
result = transactions.query(
selected_columns=["transaction", "transaction.status"],
query="",
orderby=[orderby],
snuba_params=SnubaParams(
start=before_now(minutes=12),
end=before_now(minutes=8),
projects=[self.project],
organization=self.organization,
),
referrer="discover",
)
data = result["data"]
assert [x["transaction.status"] for x in data] == expected
def test_transaction_status(self) -> None:
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/success"
data["contexts"]["trace"]["status"] = "ok"
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/aborted"
data["contexts"]["trace"]["status"] = "aborted"
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/already_exists"
data["contexts"]["trace"]["status"] = "already_exists"
self.store_event(data, project_id=self.project.id)
result = transactions.query(
selected_columns=["transaction.status"],
query="",
snuba_params=self.snuba_params,
referrer="discover",
)
data = result["data"]
assert len(data) == 4
assert {
data[0]["transaction.status"],
data[1]["transaction.status"],
data[2]["transaction.status"],
data[3]["transaction.status"],
} == {0, 10, 6}
def test_project_in_condition_with_or(self) -> None:
project2 = self.create_project(organization=self.organization)
event_data = load_data("transaction", timestamp=before_now(seconds=3))
self.store_event(data=event_data, project_id=project2.id)
expected = sorted([self.project.slug])
result = transactions.query(
selected_columns=["project"],
query=f"project:{self.project.slug} or event.type:transaction",
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project, project2],
organization=self.organization,
),
orderby=["project"],
referrer="discover",
)
data = result["data"]
assert len(data) == len(expected)
assert [item["project"] for item in data] == expected
def test_project_mapping(self) -> None:
other_project = self.create_project(organization=self.organization)
self.snuba_params.projects = [other_project]
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "hello"
self.store_event(data, project_id=other_project.id)
result = transactions.query(
selected_columns=["project", "message"],
query="",
snuba_params=self.snuba_params,
orderby=["project"],
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project"] == other_project.slug
def test_sorting_and_reverse_sorting_project_name(self) -> None:
projects = []
for project_name in ["a" * 32, "z" * 32, "m" * 32]:
other_project = self.create_project(organization=self.organization, slug=project_name)
projects.append(other_project)
data = load_data("transaction", timestamp=before_now(minutes=1))
self.store_event(data, project_id=other_project.id)
self.snuba_params.projects = projects
result = transactions.query(
selected_columns=["project", "message"],
query="",
snuba_params=self.snuba_params,
orderby=["-project"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 3
assert [item["project"] for item in data] == ["z" * 32, "m" * 32, "a" * 32]
result = transactions.query(
selected_columns=["project", "message"],
query="",
snuba_params=self.snuba_params,
orderby=["project"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 3
assert [item["project"] for item in data] == ["a" * 32, "m" * 32, "z" * 32]
def test_tags_colliding_with_fields(self) -> None:
data = load_data("transaction", timestamp=before_now(minutes=1))
data["tags"] = [["id", "new"]]
event = self.store_event(data, project_id=self.project.id)
tests = [
("id", "", sorted([self.event.event_id, event.event_id])),
("id", f"id:{event.event_id}", [event.event_id]),
("tags[id]", "", ["", "new"]),
("tags[id]", "tags[id]:new", ["new"]),
]
for column, query, expected in tests:
result = transactions.query(
selected_columns=[column],
query=query,
snuba_params=self.snuba_params,
orderby=[column],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected), (query, expected)
assert [item[column] for item in data] == expected
def test_tags_orderby(self) -> None:
data = load_data("transaction", timestamp=before_now(minutes=1))
data["tags"] = [["key1", "value2"]]
self.store_event(data, project_id=self.project.id)
tests = [
("key1", "key1", ["value1", "value2"]),
("key1", "-key1", ["value2", "value1"]),
("tags[key1]", "tags[key1]", ["value1", "value2"]),
("tags[key1]", "-tags[key1]", ["value2", "value1"]),
]
for column, orderby, expected in tests:
result = transactions.query(
selected_columns=[column],
query="",
snuba_params=self.snuba_params,
orderby=[orderby],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected)
assert [item[column] for item in data] == expected
def test_tags_filter(self) -> None:
data = load_data("transaction", timestamp=before_now(minutes=1))
data["tags"] = [["key1", "value2"]]
self.store_event(data, project_id=self.project.id)
tests: list[tuple[str, str, list[str]]] = [
("key1", "", ["value1", "value2"]),
("key1", "has:key1", ["value1", "value2"]),
("key1", "!has:key1", []),
("key1", "key1:value1", ["value1"]),
("key1", "key1:value2", ["value2"]),
("key1", 'key1:""', []),
("key1", "key1:value*", ["value1", "value2"]),
("key1", 'key1:["value1"]', ["value1"]),
("key1", 'key1:["value1", "value2"]', ["value1", "value2"]),
("tags[key1]", "", ["value1", "value2"]),
# has does not work with tags[...] syntax
# ("tags[key1]", 'has:"tags[key1]"', ["value1", "value2"]),
# ("tags[key1]", '!has:"tags[key1]"', []),
("tags[key1]", "tags[key1]:value1", ["value1"]),
("tags[key1]", "tags[key1]:value2", ["value2"]),
("tags[key1]", 'tags[key1]:""', []),
("tags[key1]", "tags[key1]:value*", ["value1", "value2"]),
("tags[key1]", 'tags[key1]:["value1"]', ["value1"]),
("tags[key1]", 'tags[key1]:["value1", "value2"]', ["value1", "value2"]),
]
for column, query, expected in tests:
result = transactions.query(
selected_columns=[column],
query=query,
snuba_params=self.snuba_params,
orderby=[column],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected), (column, query, expected)
assert [item[column] for item in data] == expected
def test_team_key_transactions(self) -> None:
team1 = self.create_team(organization=self.organization, name="Team A")
self.project.add_team(team1)
team2 = self.create_team(organization=self.organization, name="Team B")
self.project.add_team(team2)
txns = ["/blah_transaction/"]
key_txns = [
(team1, "/foo_transaction/"),
(team2, "/zoo_transaction/"),
]
for transaction in txns:
data = load_data(
"transaction",
timestamp=before_now(minutes=(5)),
)
data["transaction"] = transaction
self.store_event(data, project_id=self.project.id)
for team, transaction in key_txns:
data = load_data(
"transaction",
timestamp=before_now(minutes=(5)),
)
data["transaction"] = transaction
self.store_event(data, project_id=self.project.id)
TeamKeyTransaction.objects.create(
organization=self.organization,
transaction=transaction,
project_team=ProjectTeam.objects.get(project=self.project, team=team),
)
queries = [
("", [("/blah_transaction/", 0), ("/foo_transaction/", 1), ("/zoo_transaction/", 1)]),
("has:team_key_transaction", [("/foo_transaction/", 1), ("/zoo_transaction/", 1)]),
("!has:team_key_transaction", [("/blah_transaction/", 0)]),
("team_key_transaction:true", [("/foo_transaction/", 1), ("/zoo_transaction/", 1)]),
("team_key_transaction:false", [("/blah_transaction/", 0)]),
]
for query, expected_results in queries:
result = transactions.query(
selected_columns=["transaction", "team_key_transaction"],
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[self.project],
organization=self.organization,
teams=[team1, team2],
),
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected_results)
assert [
(x["transaction"], x["team_key_transaction"])
for x in sorted(data, key=lambda k: k["transaction"])
] == expected_results
def test_timestamp_rounding_fields(self) -> None:
result = transactions.query(
selected_columns=["timestamp.to_hour", "timestamp.to_day"],
query="",
snuba_params=self.snuba_params,
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
hour = self.event_time.replace(minute=0, second=0, microsecond=0)
day = hour.replace(hour=0)
assert [item["timestamp.to_hour"] for item in data] == [hour.isoformat()]
assert [item["timestamp.to_day"] for item in data] == [day.isoformat()]
def test_timestamp_rounding_filters(self) -> None:
one_day_ago = before_now(days=1)
two_day_ago = before_now(days=2)
three_day_ago = before_now(days=3)
self.snuba_params.start = three_day_ago
data = load_data("transaction", timestamp=two_day_ago)
self.store_event(data, project_id=self.project.id)
result = transactions.query(
selected_columns=["timestamp.to_hour", "timestamp.to_day"],
query=f"timestamp.to_hour:<{one_day_ago.isoformat()} timestamp.to_day:<{one_day_ago.isoformat()}",
snuba_params=self.snuba_params,
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
hour = two_day_ago.replace(minute=0, second=0, microsecond=0)
day = hour.replace(hour=0)
assert [item["timestamp.to_hour"] for item in data] == [hour.isoformat()]
assert [item["timestamp.to_day"] for item in data] == [day.isoformat()]
def test_user_display(self) -> None:
# `user.display` should give `username`
data = load_data("transaction", timestamp=self.event_time)
data["transaction"] = "a" * 32
data["user"] = {"username": "brucew", "id": "1234", "ip": "127.0.0.1"}
self.event = self.store_event(data=data, project_id=self.project.id)
# `user.display` should give `id`
data = load_data("transaction", timestamp=self.event_time)
data["transaction"] = "a" * 32
data["user"] = {"id": "1234", "ip": "127.0.0.1"}
self.event = self.store_event(data=data, project_id=self.project.id)
# `user.display` should give `ip`
data = load_data("transaction", timestamp=self.event_time)
data["transaction"] = "a" * 32
data["user"] = {"ip_address": "127.0.0.1"}
self.event = self.store_event(data=data, project_id=self.project.id)
result = transactions.query(
selected_columns=["user.display"],
query="",
snuba_params=self.snuba_params,
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 4
assert {item["user.display"] for item in data} == {
"bruce@example.com",
"brucew",
"1234",
"127.0.0.1",
}
def test_user_display_filter(self) -> None:
# `user.display` should give `username`
data = load_data("transaction", timestamp=self.event_time)
data["transaction"] = "a" * 32
data["user"] = {"username": "brucew", "ip": "127.0.0.1"}
self.event = self.store_event(data=data, project_id=self.project.id)
result = transactions.query(
selected_columns=["user.display"],
query="has:user.display user.display:bruce@example.com",
snuba_params=self.snuba_params,
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
assert [item["user.display"] for item in data] == ["bruce@example.com"]
def test_using_project_and_project_name(self) -> None:
projects = []
for project_name in ["a" * 32, "z" * 32, "m" * 32]:
other_project = self.create_project(organization=self.organization, slug=project_name)
projects.append(other_project)
data = load_data("transaction", timestamp=self.event_time)
self.store_event(data=data, project_id=other_project.id)
self.snuba_params.projects = projects
result = transactions.query(
selected_columns=["project.name", "message", "project"],
query="",
snuba_params=self.snuba_params,
orderby=["project.name"],
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 3
assert [item["project.name"] for item in data] == [
"a" * 32,
"m" * 32,
"z" * 32,
]
@pytest.mark.xfail(reason="Started failing on ClickHouse 21.8")
def test_snql_wip_project_threshold_config(self) -> None:
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=100,
metric=TransactionMetric.DURATION.value,
)
project2 = self.create_project()
ProjectTransactionThreshold.objects.create(
project=project2,
organization=project2.organization,
threshold=600,
metric=TransactionMetric.LCP.value,
)
events = [
("a" * 10, 300),
("b" * 10, 300),
("c" * 10, 3000),
("d" * 10, 3000),
]
for idx, event in enumerate(events):
data = load_data(
"transaction",
timestamp=before_now(minutes=(3 + idx)),
start_timestamp=before_now(minutes=(3 + idx), milliseconds=event[1]),
)
data["event_id"] = f"{idx}" * 32
data["transaction"] = event[0]
self.store_event(data, project_id=self.project.id)
if idx % 2:
ProjectTransactionThresholdOverride.objects.create(
transaction=event[0],
project=self.project,
organization=self.organization,
threshold=1000,
metric=TransactionMetric.DURATION.value,
)
data = load_data(
"transaction", timestamp=before_now(minutes=3), start_timestamp=before_now(minutes=4)
)
data["transaction"] = "e" * 10
self.store_event(data, project_id=project2.id)
expected_transaction = ["a" * 10, "b" * 10, "c" * 10, "d" * 10, "e" * 10]
expected_project_threshold_config = [
["duration", 100],
["duration", 1000],
["duration", 100],
["duration", 1000],
["lcp", 600],
]
result = transactions.query(
selected_columns=["project", "transaction", "project_threshold_config"],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[self.project, project2],
organization=self.organization,
),
referrer="test_discover_query",
)
assert len(result["data"]) == 5
sorted_data = sorted(result["data"], key=lambda k: k["transaction"])
assert [row["transaction"] for row in sorted_data] == expected_transaction
assert [row["project_threshold_config"][0] for row in sorted_data] == [
r[0] for r in expected_project_threshold_config
]
assert [row["project_threshold_config"][1] for row in sorted_data] == [
r[1] for r in expected_project_threshold_config
]
ProjectTransactionThreshold.objects.filter(
project=project2,
organization=project2.organization,
).delete()
expected_transaction = ["e" * 10]
expected_project_threshold_config = [["duration", 300]]
result = transactions.query(
selected_columns=["project", "transaction", "project_threshold_config"],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project2],
organization=self.organization,
),
referrer="test_discover_query",
)
assert len(result["data"]) == 1
sorted_data = sorted(result["data"], key=lambda k: k["transaction"])
assert [row["transaction"] for row in sorted_data] == expected_transaction
assert [row["project_threshold_config"][0] for row in sorted_data] == [
r[0] for r in expected_project_threshold_config
]
assert [row["project_threshold_config"][1] for row in sorted_data] == [
r[1] for r in expected_project_threshold_config
]
def test_to_other_function(self) -> None:
project = self.create_project()
for i in range(3):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = f"/to_other/{i}"
data["release"] = "aaaa"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/to_other/y"
data["release"] = "yyyy"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/to_other/z"
data["release"] = "zzzz"
self.store_event(data, project_id=project.id)
columns1 = ["transaction", 'to_other(release,"aaaa")']
columns2 = ["transaction", 'to_other(release,"aaaa",old,new)']
test_cases = [
(columns1, "", ["this", "this", "this", "that", "that"], "to_other_release__aaaa"),
(columns2, "", ["new", "new", "new", "old", "old"], "to_other_release__aaaa__old_new"),
]
for cols, query, expected, alias in test_cases:
result = transactions.query(
selected_columns=cols,
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == len(expected)
assert [x[alias] for x in data] == expected
def test_count_if_function(self) -> None:
for i in range(3):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = "aaaa"
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = "bbbb"
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = "cccc"
self.store_event(data, project_id=self.project.id)
columns1 = ["count()", "count_if(release,equals,aaaa)", "count_if(release,notEquals,aaaa)"]
columns2 = ["count()", "count_if(release,less,bbbb)", "count_if(release,lessOrEquals,bbbb)"]
test_cases = [
(
columns1,
"",
{
"count": 5,
"count_if_release_equals_aaaa": 3,
"count_if_release_notEquals_aaaa": 2,
},
),
(
columns2,
"",
{
"count": 5,
"count_if_release_less_bbbb": 3,
"count_if_release_lessOrEquals_bbbb": 4,
},
),
]
for cols, query, expected in test_cases:
result = transactions.query(
selected_columns=cols,
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[self.project],
),
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
assert data[0] == expected
def test_count_if_function_with_unicode(self) -> None:
unicode_phrase1 = "\u716e\u6211\u66f4\u591a\u7684\u98df\u7269\uff0c\u6211\u9913\u4e86"
unicode_phrase2 = "\u53cd\u6b63\u611b\u60c5\u4e0d\u5c31\u90a3\u6837"
for i in range(3):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = unicode_phrase1
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["release"] = unicode_phrase2
self.store_event(data, project_id=self.project.id)
columns1 = [
"count()",
f"count_if(release,equals,{unicode_phrase1})",
f"count_if(release,notEquals,{unicode_phrase1})",
]
test_cases = [
(
columns1,
"",
{
"count": 4,
"count_if_release_equals__u716e_u6211_u66f4_u591a_u7684_u98df_u7269_uff0c_u6211_u9913_u4e86": 3,
"count_if_release_notEquals__u716e_u6211_u66f4_u591a_u7684_u98df_u7269_uff0c_u6211_u9913_u4e86": 1,
},
),
]
for cols, query, expected in test_cases:
result = transactions.query(
selected_columns=cols,
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[self.project],
),
referrer="test_discover_query",
)
data = result["data"]
assert len(data) == 1
assert data[0] == expected
def test_failure_count_function(self) -> None:
project = self.create_project()
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/success"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/unknown"
data["contexts"]["trace"]["status"] = "unknown_error"
self.store_event(data, project_id=project.id)
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = f"/failure_count/{i}"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/0"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
queries = [
("", 8, True),
("failure_count():>0", 7, True),
("failure_count():>0", 8, False),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=["transaction", "failure_count()"],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
assert data[0]["failure_count"] == 2
assert data[1]["failure_count"] == 1
def test_apdex_function(self) -> None:
project = self.create_project()
ProjectTransactionThreshold.objects.create(
project=project,
organization=project.organization,
threshold=400,
metric=TransactionMetric.DURATION.value,
)
ProjectTransactionThresholdOverride.objects.create(
project=project,
transaction="/apdex/ace",
organization=project.organization,
threshold=400,
metric=TransactionMetric.LCP.value,
)
project2 = self.create_project()
events = [
("ace", 400),
("ace", 400),
("one", 400),
("one", 400),
("two", 3000),
("two", 3000),
("three", 300),
("three", 3000),
("zorp", 300),
("zorp", 3000),
]
for idx, event in enumerate(events):
data = load_data(
"transaction",
timestamp=before_now(minutes=(5 + idx)),
start_timestamp=before_now(minutes=(5 + idx), milliseconds=event[1]),
)
data["measurements"]["lcp"]["value"] = 3000
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/apdex/{event[0]}"
data["user"] = {"email": f"{idx}@example.com"}
if event[0] == "zorp":
self.store_event(data, project_id=project2.id) # No custom thresholds for project2
else:
self.store_event(data, project_id=project.id)
queries = [
("", [0.5, 0.5, 0.25, 0.0, 0.25], ["apdex(100)"], "apdex_100"),
("", [0.0, 1.0, 0.5, 0.0, 0.5], ["apdex()"], "apdex"),
("apdex(100):<0.5", [0.25, 0.0, 0.25], ["apdex(100)"], "apdex_100"),
("apdex():>0", [1.0, 0.5, 0.5], ["apdex()"], "apdex"),
]
for query, expected_apdex, col, alias in queries:
result = transactions.query(
selected_columns=["transaction"] + col,
query=query,
orderby=["transaction"],
referrer="discover",
snuba_params=SnubaParams(
start=before_now(minutes=30),
end=before_now(minutes=2),
projects=[project, project2],
organization=self.organization,
),
use_aggregate_conditions=True,
)
data = result["data"]
assert len(data) == len(expected_apdex)
assert [
x[alias] for x in sorted(data, key=lambda k: k["transaction"])
] == expected_apdex
def test_count_miserable_function(self) -> None:
project = self.create_project()
ProjectTransactionThreshold.objects.create(
project=project,
organization=project.organization,
threshold=400,
metric=TransactionMetric.DURATION.value,
)
ProjectTransactionThresholdOverride.objects.create(
project=project,
transaction="/count_miserable/ace",
organization=project.organization,
threshold=400,
metric=TransactionMetric.LCP.value,
)
project2 = self.create_project()
events = [
("ace", 400),
("ace", 400),
("one", 400),
("one", 400),
("two", 3000),
("two", 3000),
("three", 300),
("three", 3000),
("zorp", 300),
("zorp", 3000),
]
for idx, event in enumerate(events):
data = load_data(
"transaction",
timestamp=before_now(minutes=(5 + idx)),
start_timestamp=before_now(minutes=(5 + idx), milliseconds=event[1]),
)
data["measurements"]["lcp"]["value"] = 3000
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/count_miserable/{event[0]}"
data["user"] = {"email": f"{idx}@example.com"}
if event[0] == "zorp":
self.store_event(data, project_id=project2.id) # No custom thresholds for project2
else:
self.store_event(data, project_id=project.id)
queries = [
(
"",
[0, 0, 1, 2, 1],
["count_miserable(user,100)"],
"count_miserable_user_100",
),
("", [2, 0, 1, 2, 1], ["count_miserable(user)"], "count_miserable_user"),
(
"count_miserable(user,100):<2",
[0, 0, 1, 1],
["count_miserable(user,100)"],
"count_miserable_user_100",
),
(
"count_miserable(user):>0",
[2, 1, 2, 1],
["count_miserable(user)"],
"count_miserable_user",
),
]
for query, expected_count_miserable, col, alias in queries:
result = transactions.query(
selected_columns=["transaction"] + col,
query=query,
orderby=["transaction"],
referrer="discover",
snuba_params=SnubaParams(
start=before_now(minutes=30),
end=before_now(minutes=2),
projects=[project, project2],
organization=self.organization,
),
use_aggregate_conditions=True,
)
data = result["data"]
assert len(data) == len(expected_count_miserable)
assert [
x[alias] for x in sorted(data, key=lambda k: k["transaction"])
] == expected_count_miserable
def test_user_misery_function(self) -> None:
project = self.create_project()
ProjectTransactionThreshold.objects.create(
project=project,
organization=project.organization,
threshold=400,
metric=TransactionMetric.DURATION.value,
)
ProjectTransactionThresholdOverride.objects.create(
project=project,
transaction="/user_misery/ace",
organization=project.organization,
threshold=400,
metric=TransactionMetric.LCP.value,
)
project2 = self.create_project()
events = [
("ace", 400),
("ace", 400),
("one", 400),
("one", 400),
("two", 3000),
("two", 3000),
("three", 300),
("three", 3000),
("zorp", 300),
("zorp", 3000),
]
for idx, event in enumerate(events):
data = load_data(
"transaction",
timestamp=before_now(minutes=(5 + idx)),
start_timestamp=before_now(minutes=(5 + idx), milliseconds=event[1]),
)
data["measurements"]["lcp"]["value"] = 3000
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/user_misery/{event[0]}"
data["user"] = {"email": f"{idx}@example.com"}
if event[0] == "zorp":
self.store_event(data, project_id=project2.id) # No custom thresholds for project2
else:
self.store_event(data, project_id=project.id)
queries = [
(
"",
[0.0492, 0.0492, 0.0575, 0.0659, 0.0575],
["user_misery(100)"],
"user_misery_100",
),
("", [0.0659, 0.0492, 0.0575, 0.0659, 0.0575], ["user_misery()"], "user_misery"),
(
"user_misery(100):<0.06",
[0.0492, 0.0492, 0.0575, 0.0575],
["user_misery(100)"],
"user_misery_100",
),
(
"user_misery():>0.05",
[0.0659, 0.0575, 0.0659, 0.0575],
["user_misery()"],
"user_misery",
),
]
similar = lambda a, b: abs(a - b) < 0.001
for query, expected_user_misery, col, alias in queries:
result = transactions.query(
selected_columns=["transaction"] + col,
referrer="discover",
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=30),
end=before_now(minutes=2),
projects=[project, project2],
organization=self.organization,
),
use_aggregate_conditions=True,
)
data = result["data"]
assert len(data) == len(expected_user_misery)
for i, misery in enumerate(sorted(data, key=lambda k: k["transaction"])):
assert similar(misery[alias], expected_user_misery[i])
def test_count(self) -> None:
project = self.create_project()
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/count/6"
self.store_event(data, project_id=project.id)
for i in range(8):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/count/8"
self.store_event(data, project_id=project.id)
queries = [
("", 2, (6, 8), True),
("count():>6", 2, (6, 8), False),
("count():>6", 1, (8,), True),
]
for query, expected_length, expected_counts, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=["transaction", "count()"],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
referrer="discover",
use_aggregate_conditions=use_aggregate_conditions,
)
data = result["data"]
assert len(data) == expected_length
for index, count in enumerate(data):
assert count["count"] == expected_counts[index]
def test_compare_numeric_aggregate_function(self) -> None:
project = self.create_project()
for i in range(6):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
start_timestamp=before_now(minutes=4 + i),
)
data["transaction"] = "/percentile"
self.store_event(data, project_id=project.id)
fields = [
(
[
"transaction",
"p50(measurements.lcp)",
"compare_numeric_aggregate(p50_measurements_lcp,greater,2000)",
],
"",
),
(
[
"transaction",
"p50(measurements.lcp)",
"compare_numeric_aggregate(p50_measurements_lcp,less,2000)",
],
"",
),
]
expected_results = [
("compare_numeric_aggregate_p50_measurements_lcp_greater_2000", 1),
("compare_numeric_aggregate_p50_measurements_lcp_less_2000", 0),
]
for i, test_case in enumerate(fields):
selected, query = test_case
result = transactions.query(
referrer="discover",
selected_columns=selected,
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=True,
)
alias, expected_value = expected_results[i]
data = result["data"]
assert data[0][alias] == expected_value
def test_last_seen(self) -> None:
project = self.create_project()
expected_timestamp = before_now(minutes=3)
string_condition_timestamp = before_now(minutes=4).strftime("%Y-%m-%dT%H:%M:%S+00:00")
data = load_data("transaction", timestamp=expected_timestamp)
data["transaction"] = "/last_seen"
self.store_event(data, project_id=project.id)
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=i + 4))
data["transaction"] = "/last_seen"
self.store_event(data, project_id=project.id)
queries = [
("", 1, True),
(f"last_seen():>{string_condition_timestamp}", 1, True),
("last_seen():>0", 1, False),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=["transaction", "last_seen()"],
query=query,
referrer="discover",
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
)
data = result["data"]
assert len(data) == expected_length
assert data[0]["last_seen"] == expected_timestamp.strftime("%Y-%m-%dT%H:%M:%S+00:00")
def test_latest_event(self) -> None:
project = self.create_project()
expected_timestamp = before_now(minutes=3)
data = load_data("transaction", timestamp=expected_timestamp)
data["transaction"] = "/latest_event"
stored_event = self.store_event(data, project_id=project.id)
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=i + 4))
data["transaction"] = "/latest_event"
self.store_event(data, project_id=project.id)
result = transactions.query(
selected_columns=["transaction", "latest_event()"],
query="",
orderby=["transaction"],
referrer="discover",
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=False,
)
data = result["data"]
assert len(data) == 1
assert data[0]["latest_event"] == stored_event.event_id
def test_failure_rate(self) -> None:
project = self.create_project()
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_rate/over"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
for i in range(4):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_rate/over"
self.store_event(data, project_id=project.id)
for i in range(7):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_rate/under"
self.store_event(data, project_id=project.id)
for i in range(3):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_rate/under"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
queries = [
("", 2, True),
("failure_rate():>0.5", 1, True),
("failure_rate():>0.5", 2, False),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=["transaction", "failure_rate()"],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
assert data[0]["failure_rate"] == 0.6
if expected_length > 1:
assert data[1]["failure_rate"] == 0.3
def _create_percentile_events(self, project):
for i in range(6):
start = before_now(minutes=3)
end = start - timedelta(minutes=1 + i)
data = load_data(
"transaction",
timestamp=start,
start_timestamp=end,
)
data["transaction"] = "/p50"
self.store_event(data, project_id=project.id)
def test_percentile(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("percentile(transaction.duration, 0.7):>0", 1, False),
("percentile(transaction.duration, 0.7):>500000", 0, True),
("percentile(transaction.duration, 0.7):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
referrer="discover",
selected_columns=[
"transaction",
"percentile(transaction.duration, 0.7)",
"percentile(transaction.duration, 0.5)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["percentile_transaction_duration_0_7"] == 270000
assert data[0]["percentile_transaction_duration_0_5"] == 210000
def test_p50(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p50(transaction.duration):>0", 1, False),
("p50(transaction.duration):>500000", 0, True),
("p50(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
referrer="discover",
selected_columns=[
"transaction",
"p50(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p50_transaction_duration"] == 210000
def test_p75(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p75(transaction.duration):>0", 1, False),
("p75(transaction.duration):>500000", 0, True),
("p75(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=[
"transaction",
"p75(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p75_transaction_duration"] == 285000
def test_p95(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p95(transaction.duration):>0", 1, False),
("p95(transaction.duration):>500000", 0, True),
("p95(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=[
"transaction",
"p95(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p95_transaction_duration"] == 345000
def test_p99(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p99(transaction.duration):>0", 1, False),
("p99(transaction.duration):>500000", 0, True),
("p99(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=[
"transaction",
"p99(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p99_transaction_duration"] == 357000
def test_p100(self) -> None:
project = self.create_project()
self._create_percentile_events(project)
queries = [
("", 1, True),
("p100(transaction.duration):>0", 1, False),
("p100(transaction.duration):>500000", 0, True),
("p100(transaction.duration):>100000", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=[
"transaction",
"p100(transaction.duration)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p100_transaction_duration"] == 360000
def test_p100_with_measurement(self) -> None:
project = self.create_project()
for i in range(6):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
start_timestamp=before_now(minutes=4 + i),
)
data["transaction"] = "/p100"
data["measurements"]["frames_total"] = {"value": 100 * i}
data["measurements"]["frames_slow"] = {"value": 50 * i}
self.store_event(data, project_id=project.id)
queries = [
("", 1, True),
("p100(measurements.frames_slow_rate):>0", 1, False),
("p100(measurements.frames_slow_rate):>0.6", 0, True),
("p100(measurements.frames_slow_rate):>0.4", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=[
"transaction",
"p100(measurements.frames_slow_rate)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=20),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["p100_measurements_frames_slow_rate"] == 0.5
def test_count_unique(self) -> None:
for idx in range(3):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
)
data["user"] = {"email": f"{idx}@example.com"}
data["tags"] = {"foo": "bar" if idx < 1 else "baz"}
self.store_event(data, project_id=self.project.id)
result = transactions.query(
selected_columns=["count_unique(user.display)", "count_unique(foo)"],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["count_unique_user_display"] == 3
assert data[0]["count_unique_foo"] == 2
def test_min_max(self) -> None:
"""Testing both min and max since they're so similar"""
for idx in range(3):
start = before_now(minutes=3)
end = start - timedelta(minutes=1 + idx)
data = load_data(
"transaction",
timestamp=start,
start_timestamp=end,
)
self.store_event(data, project_id=self.project.id)
result = transactions.query(
selected_columns=[
"min(transaction.duration)",
"max(transaction.duration)",
],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["min_transaction_duration"] == 60000
assert data[0]["max_transaction_duration"] == 180000
def test_stats_functions(self) -> None:
for idx in range(3):
start = before_now(minutes=3)
end = start - timedelta(minutes=1 + idx)
data = load_data(
"transaction",
timestamp=start,
start_timestamp=end,
)
self.store_event(data, project_id=self.project.id)
queries = [
("var(transaction.duration)", "var_transaction_duration", 3600000000),
("stddev(transaction.duration)", "stddev_transaction_duration", 60000),
# This is a nonsense cov&corr column, but gives us a consistent result for tests
(
"cov(transaction.duration,transaction.duration)",
"cov_transaction_duration_transaction_duration",
3600000000,
),
(
"corr(transaction.duration,transaction.duration)",
"corr_transaction_duration_transaction_duration",
1,
),
]
for column, alias, expected in queries:
result = transactions.query(
selected_columns=[column],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1, column
assert data[0][alias] == expected, column
def test_count_at_least(self) -> None:
end = before_now(minutes=3)
start_one_minute = end - timedelta(minutes=1)
start_two_minute = end - timedelta(minutes=2)
for idx in range(3):
data = load_data(
"transaction",
timestamp=end,
start_timestamp=start_one_minute if idx < 1 else start_two_minute,
)
self.store_event(data, project_id=self.project.id)
result = transactions.query(
selected_columns=[
"count_at_least(transaction.duration,60000)",
"count_at_least(transaction.duration,120000)",
],
query="",
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["count_at_least_transaction_duration_60000"] == 3
assert data[0]["count_at_least_transaction_duration_120000"] == 2
def test_eps(self) -> None:
project = self.create_project()
for _ in range(6):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
)
data["transaction"] = "/eps"
self.store_event(data, project_id=project.id)
queries = [
("", 1, True),
("eps():>1", 0, True),
("eps():>1", 1, False),
("eps(10):>0.5", 1, True),
("tps():>1", 0, True),
("tps():>1", 1, False),
("tps(10):>0.5", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=[
"transaction",
"eps()",
"eps(10)",
"eps(60)",
"tps()",
"tps(10)",
"tps(60)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["eps"] == 0.05
assert data[0]["eps_10"] == 0.6
assert data[0]["eps_60"] == 0.1
assert data[0]["tps"] == 0.05
assert data[0]["tps_10"] == 0.6
assert data[0]["tps_60"] == 0.1
def test_epm(self) -> None:
project = self.create_project()
for _ in range(6):
data = load_data(
"transaction",
timestamp=before_now(minutes=3),
)
data["transaction"] = "/epm"
self.store_event(data, project_id=project.id)
queries = [
("", 1, True),
("epm():>3", 0, True),
("epm():>3", 1, False),
("epm(10):>3", 1, True),
("tpm():>3", 0, True),
("tpm():>3", 1, False),
("tpm(10):>3", 1, True),
]
for query, expected_length, use_aggregate_conditions in queries:
result = transactions.query(
selected_columns=[
"transaction",
"epm()",
"epm(10)",
"epm(60)",
"tpm()",
"tpm(10)",
"tpm(60)",
],
query=query,
orderby=["transaction"],
snuba_params=SnubaParams(
start=before_now(minutes=4),
end=before_now(minutes=2),
projects=[project],
),
use_aggregate_conditions=use_aggregate_conditions,
referrer="discover",
)
data = result["data"]
assert len(data) == expected_length
if expected_length > 0:
assert data[0]["epm"] == 3
assert data[0]["epm_10"] == 36.0
assert data[0]["epm_60"] == 6
assert data[0]["tpm"] == 3
assert data[0]["tpm_10"] == 36.0
assert data[0]["tpm_60"] == 6
def test_transaction_status_filter(self) -> None:
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/success"
data["contexts"]["trace"]["status"] = "ok"
self.store_event(data, project_id=self.project.id)
self.store_event(data, project_id=self.project.id)
data = load_data("transaction", timestamp=before_now(minutes=1))
data["transaction"] = "/test_transaction/already_exists"
data["contexts"]["trace"]["status"] = "already_exists"
self.store_event(data, project_id=self.project.id)
def run_query(query, expected_statuses, message):
result = transactions.query(
selected_columns=["transaction.status"],
query=query,
snuba_params=self.snuba_params,
referrer="discover",
)
data = result["data"]
assert len(data) == len(expected_statuses), message
assert sorted(item["transaction.status"] for item in data) == sorted(
expected_statuses
), message
run_query("has:transaction.status transaction.status:ok", [0, 0, 0], "status 'ok'")
run_query(
"has:transaction.status transaction.status:[ok,already_exists]",
[0, 0, 0, 6],
"status 'ok' or 'already_exists'",
)
run_query("has:transaction.status !transaction.status:ok", [6], "status not 'ok'")
run_query(
"has:transaction.status !transaction.status:already_exists",
[0, 0, 0],
"status not 'already_exists'",
)
run_query(
"has:transaction.status !transaction.status:[ok,already_exists]",
[],
"status not 'ok' and not 'already_exists'",
)
run_query("!has:transaction.status", [], "status nonexistant")
def test_orderby_aggregate_function(self) -> None:
project = self.create_project()
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/success"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/unknown"
data["contexts"]["trace"]["status"] = "unknown_error"
self.store_event(data, project_id=project.id)
for i in range(6):
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = f"/failure_count/{i}"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
data = load_data("transaction", timestamp=before_now(minutes=5))
data["transaction"] = "/failure_count/0"
data["contexts"]["trace"]["status"] = "unauthenticated"
self.store_event(data, project_id=project.id)
orderbys = [
("failure_count()", [0, 1, 1, 1, 1, 1, 1, 2]),
("failure_count()", [0, 1, 1, 1, 1, 1, 1, 2]),
("-failure_count()", [2, 1, 1, 1, 1, 1, 1, 0]),
("-failure_count()", [2, 1, 1, 1, 1, 1, 1, 0]),
("failure_count", [0, 1, 1, 1, 1, 1, 1, 2]),
("-failure_count", [2, 1, 1, 1, 1, 1, 1, 0]),
]
for orderby, expected in orderbys:
result = transactions.query(
selected_columns=["transaction", "failure_count()"],
query="",
orderby=[orderby],
snuba_params=SnubaParams(
start=before_now(minutes=10),
end=before_now(minutes=2),
projects=[project],
),
referrer="discover",
)
data = result["data"]
assert [x["failure_count"] for x in data] == expected
@pytest.mark.skip("setting snuba config is too slow")
def test_spans_op_array_field(self) -> None:
trace_context = {
"parent_span_id": "8988cec7cc0779c1",
"type": "trace",
"op": "http.server",
"trace_id": "a7d67cf796774551a95be6543cacd459",
"span_id": "babaae0d4b7512d9",
"status": "ok",
"hash": "a" * 16,
"exclusive_time": 1.2345,
}
data = load_data(
"transaction", timestamp=before_now(minutes=10), trace_context=trace_context, spans=[]
)
self.store_event(data=data, project_id=self.project.id)
queries = [
("has:spans_op", 1),
("!has:spans_op", 0),
]
for query, expected_len in queries:
result = discover.query(
selected_columns=["spans_op"],
query=query,
snuba_params=SnubaParams(
start=before_now(minutes=12),
end=before_now(minutes=8),
projects=[self.project],
organization=self.organization,
),
referrer="discover",
)
data = result["data"]
assert len(data) == expected_len
def test_reflective_types(self) -> None:
results = transactions.query(
selected_columns=[
"p50(measurements.lcp)",
"p50(measurements.foo)",
"p50(spans.foo)",
],
query="event.type:transaction",
snuba_params=self.snuba_params,
use_aggregate_conditions=True,
referrer="discover",
)
assert results["meta"]["fields"] == {
"p50_measurements_lcp": "duration",
"p50_measurements_foo": "number",
"p50_spans_foo": "duration",
}
def test_measurements(self) -> None:
event_data = load_data("transaction", timestamp=before_now(seconds=3))
self.store_event(data=event_data, project_id=self.project.id)
results = transactions.query(
selected_columns=[
"measurements.fp",
"measurements.fcp",
"measurements.lcp",
"measurements.fid",
"measurements.cls",
"measurements.does_not_exist",
],
query="event.type:transaction !transaction:{}".format("a" * 32),
snuba_params=self.snuba_params,
referrer="discover",
)
data = results["data"]
assert len(data) == 1
assert data[0]["measurements.fp"] == event_data["measurements"]["fp"]["value"]
assert data[0]["measurements.fcp"] == event_data["measurements"]["fcp"]["value"]
assert data[0]["measurements.lcp"] == event_data["measurements"]["lcp"]["value"]
assert data[0]["measurements.fid"] == event_data["measurements"]["fid"]["value"]
assert data[0]["measurements.cls"] == event_data["measurements"]["cls"]["value"]
assert data[0]["measurements.does_not_exist"] is None
def test_conditions_with_special_columns(self) -> None:
for val in ["b", "c", "d"]:
data = load_data("transaction")
data["timestamp"] = self.one_min_ago.isoformat()
data["transaction"] = val * 32
data["logentry"] = {"formatted": val * 32}
data["tags"] = {"sub_customer.is-Enterprise-42": val * 32}
self.store_event(data=data, project_id=self.project.id)
result = transactions.query(
selected_columns=["title", "message"],
query="event.type:transaction (title:{} OR message:{})".format("b" * 32, "c" * 32),
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
orderby=["title"],
referrer="discover",
)
data = result["data"]
assert len(data) == 2
assert data[0]["title"] == "b" * 32
assert data[1]["title"] == "c" * 32
result = transactions.query(
selected_columns=["title", "sub_customer.is-Enterprise-42"],
query="event.type:transaction (title:{} AND sub_customer.is-Enterprise-42:{})".format(
"b" * 32, "b" * 32
),
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
orderby=["title"],
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["title"] == "b" * 32
assert data[0]["sub_customer.is-Enterprise-42"] == "b" * 32
def test_conditions_with_aggregates(self) -> None:
events = [("a", 2), ("b", 3), ("c", 4)]
for ev in events:
val = ev[0] * 32
for i in range(ev[1]):
data = load_data("transaction")
data["timestamp"] = self.one_min_ago.isoformat()
data["transaction"] = f"{val}-{i}"
data["logentry"] = {"formatted": val}
data["tags"] = {"trek": val}
self.store_event(data=data, project_id=self.project.id)
result = transactions.query(
selected_columns=["trek", "count()"],
query="event.type:transaction (trek:{} OR trek:{}) AND count():>2".format(
"a" * 32, "b" * 32
),
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
orderby=["trek"],
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["trek"] == "b" * 32
assert data[0]["count"] == 3
def test_conditions_with_nested_aggregates(self) -> None:
events = [("a", 2), ("b", 3), ("c", 4)]
for ev in events:
val = ev[0] * 32
for i in range(ev[1]):
data = load_data("transaction")
data["timestamp"] = self.one_min_ago.isoformat()
data["transaction"] = f"{val}-{i}"
data["logentry"] = {"formatted": val}
data["tags"] = {"trek": val}
self.store_event(data=data, project_id=self.project.id)
result = transactions.query(
selected_columns=["trek", "count()"],
query="(event.type:transaction AND (trek:{} AND (transaction:*{}* AND count():>2)))".format(
"b" * 32, "b" * 32
),
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
orderby=["trek"],
use_aggregate_conditions=True,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["trek"] == "b" * 32
assert data[0]["count"] == 3
with pytest.raises(InvalidSearchQuery) as err:
transactions.query(
selected_columns=["trek", "transaction"],
query="(event.type:transaction AND (trek:{} AND (transaction:*{}* AND count():>2)))".format(
"b" * 32, "b" * 32
),
referrer="discover",
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
orderby=["trek"],
use_aggregate_conditions=True,
)
assert "used in a condition but is not a selected column" in str(err)
def test_conditions_with_timestamps(self) -> None:
events = [("b", 1), ("c", 2), ("d", 3)]
for t, ev in enumerate(events):
val = ev[0] * 32
for i in range(ev[1]):
data = load_data("transaction", timestamp=self.now - timedelta(seconds=3 * t + 1))
data["transaction"] = f"{val}"
self.store_event(data=data, project_id=self.project.id)
results = transactions.query(
selected_columns=["transaction", "count()"],
query="event.type:transaction AND (timestamp:<{} OR timestamp:>{})".format(
(self.now - timedelta(seconds=5)).isoformat(),
(self.now - timedelta(seconds=3)).isoformat(),
),
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
orderby=["transaction"],
use_aggregate_conditions=True,
referrer="discover",
)
data = results["data"]
assert len(data) == 3
assert data[0]["transaction"] == "a" * 32
assert data[0]["count"] == 1
assert data[1]["transaction"] == "b" * 32
assert data[1]["count"] == 1
assert data[2]["transaction"] == "d" * 32
assert data[2]["count"] == 3
def test_timestamp_rollup_filter(self) -> None:
event_hour = self.event_time.replace(minute=0, second=0)
result = transactions.query(
selected_columns=["project.id", "user", "release"],
query="timestamp.to_hour:" + event_hour.isoformat(),
snuba_params=self.snuba_params,
referrer="discover",
)
data = result["data"]
assert len(data) == 1
assert data[0]["project.id"] == self.project.id
assert data[0]["user"] == "id:99"
assert data[0]["release"] == "first-release"
assert len(result["meta"]["fields"]) == 3
assert result["meta"]["fields"] == {
"project.id": "integer",
"user": "string",
"release": "string",
}
def test_count_with_or(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=3))
data["transaction"] = "a" * 32
self.store_event(data=data, project_id=self.project.id)
results = transactions.query(
selected_columns=["transaction", "count()"],
query="event.type:transaction AND (count():<1 OR count():>0)",
snuba_params=self.snuba_params,
orderby=["transaction"],
use_aggregate_conditions=True,
referrer="discover",
)
data = results["data"]
assert len(data) == 1
assert data[0]["transaction"] == "a" * 32
assert data[0]["count"] == 2
def test_array_join(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=90))
data["transaction"] = "foo"
data["measurements"] = {
"fp": {"value": 1000},
"fcp": {"value": 1000},
"lcp": {"value": 1000},
}
self.store_event(data=data, project_id=self.project.id)
results = transactions.query(
selected_columns=["array_join(measurements_key)"],
query="transaction:foo",
snuba_params=self.snuba_params,
functions_acl=["array_join"],
referrer="discover",
)
assert {"fcp", "fp", "lcp"} == {
row["array_join_measurements_key"] for row in results["data"]
}
def test_access_to_private_functions(self) -> None:
# using private functions directly without access should error
with pytest.raises(InvalidSearchQuery, match="array_join: no access to private function"):
transactions.query(
selected_columns=["array_join(tags.key)"],
query="",
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
referrer="discover",
)
# using private functions in an aggregation without access should error
with pytest.raises(InvalidSearchQuery, match="histogram: no access to private function"):
for array_column in ARRAY_COLUMNS:
transactions.query(
selected_columns=[f"histogram({array_column}_value, 1,0,1)"],
query=f"histogram({array_column}_value, 1,0,1):>0",
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
use_aggregate_conditions=True,
referrer="discover",
)
# using private functions in an aggregation without access should error
# with auto aggregation on
with pytest.raises(InvalidSearchQuery, match="histogram: no access to private function"):
for array_column in ARRAY_COLUMNS:
transactions.query(
selected_columns=["count()"],
query=f"histogram({array_column}_value, 1,0,1):>0",
snuba_params=SnubaParams(
start=self.two_min_ago,
end=self.now,
projects=[self.project],
),
referrer="discover",
auto_aggregations=True,
use_aggregate_conditions=True,
)
def test_sum_array_combinator(self) -> None:
data = load_data("transaction", timestamp=before_now(seconds=3))
data["measurements"] = {
"fp": {"value": 1000},
"fcp": {"value": 1000},
"lcp": {"value": 1000},
}
self.store_event(data=data, project_id=self.project.id)
results = transactions.query(
selected_columns=["sumArray(measurements_value)"],
query="!transaction:{}".format("a" * 32),
snuba_params=self.snuba_params,
# make sure to opt in to gain access to the function
functions_acl=["sumArray"],
referrer="discover",
# -Array combinator is only supported in SnQL
)
assert results["data"][0]["sumArray_measurements_value"] == 3000.0
def test_span_op_breakdowns(self) -> None:
event_data = load_data("transaction", timestamp=before_now(seconds=3))
self.store_event(data=event_data, project_id=self.project.id)
results = transactions.query(
selected_columns=[
"spans.http",
"spans.db",
"spans.resource",
"spans.browser",
"spans.total.time",
"spans.does_not_exist",
],
query="event.type:transaction !transaction:{}".format("a" * 32),
snuba_params=self.snuba_params,
referrer="discover",
)
data = results["data"]
assert len(data) == 1
span_ops = event_data["breakdowns"]["span_ops"]
assert data[0]["spans.http"] == span_ops["ops.http"]["value"]
assert data[0]["spans.db"] == span_ops["ops.db"]["value"]
assert data[0]["spans.resource"] == span_ops["ops.resource"]["value"]
assert data[0]["spans.browser"] == span_ops["ops.browser"]["value"]
assert data[0]["spans.total.time"] == span_ops["total.time"]["value"]
assert data[0]["spans.does_not_exist"] is None
| TransactionQueryIntegrationTest |
python | mlflow__mlflow | mlflow/pyfunc/scoring_server/client.py | {
"start": 1124,
"end": 3190
} | class ____(BaseScoringServerClient):
def __init__(self, host, port):
self.url_prefix = f"http://{host}:{port}"
def ping(self):
ping_status = requests.get(url=self.url_prefix + "/ping")
if ping_status.status_code != 200:
raise Exception(f"ping failed (error code {ping_status.status_code})")
def get_version(self):
resp_status = requests.get(url=self.url_prefix + "/version")
if resp_status.status_code != 200:
raise Exception(f"version failed (error code {resp_status.status_code})")
return resp_status.text
def wait_server_ready(self, timeout=30, scoring_server_proc=None):
begin_time = time.time()
while True:
time.sleep(0.3)
try:
self.ping()
return
except Exception:
pass
if time.time() - begin_time > timeout:
break
if scoring_server_proc is not None:
return_code = scoring_server_proc.poll()
if return_code is not None:
raise RuntimeError(f"Server process already exit with returncode {return_code}")
raise RuntimeError("Wait scoring server ready timeout.")
def invoke(self, data, params: dict[str, Any] | None = None):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
:py:class:`PredictionsResponse <mlflow.deployments.PredictionsResponse>` result.
"""
response = requests.post(
url=self.url_prefix + "/invocations",
data=dump_input_data(data, params=params),
headers={"Content-Type": scoring_server.CONTENT_TYPE_JSON},
)
if response.status_code != 200:
raise Exception(
f"Invocation failed (error code {response.status_code}, response: {response.text})"
)
return PredictionsResponse.from_json(response.text)
| ScoringServerClient |
python | pytorch__pytorch | test/dynamo/test_einops.py | {
"start": 4250,
"end": 5628
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x_abc, suffix=""):
a, b, c = x_abc.shape
def suf(pattern):
parts = pattern.split()
return " ".join([p if p[-1] not in "acd" else p + suffix for p in parts])
# patterns look a bit strange because names a, c, d will be modified on every run
# by suf function
x_abcd = repeat(x_abc, suf("a b c -> a b c 4"))
x_abc = reduce(x_abcd, suf("a b c d -> a b c"), "min")
x_abdc, ps = pack([x_abc] * (2 + len(suffix)), suf("a b * c"))
x_array = unpack(rearrange(x_abdc, suf("a b d c -> (a b ) 1 c d")), ps, "ab one1 c *")
x1 = x_array[0] + len(x_array)
x1 = rearrange(x1, suf("(a b ) 1 c -> a b c"), b=b)
addition = einsum(x_abc, x_abcd, suf("a b c , a b c d -> d"))[0]
return x1 + addition
compiled_fn = torch.compile(TorchModuleWithOperations(), fullgraph=True)
x = torch.arange(2 * 3 * 5).view(2, 3, 5)
y = compiled_fn(x)
# Should not recompile!
with torch.compiler.set_stance("fail_on_recompile"):
z = compiled_fn(x)
"""
subprocess.check_output([sys.executable, "-c", script])
instantiate_parametrized_tests(
TestEinops,
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TorchModuleWithOperations |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/utils.py | {
"start": 4100,
"end": 4284
} | class ____(enum.Enum):
rest = "rest"
graphql = "graphql"
@classmethod
def api_types(cls) -> List:
return [api_type.value for api_type in ApiTypeEnum]
| ApiTypeEnum |
python | getsentry__sentry | tests/sentry/issues/test_status_change.py | {
"start": 630,
"end": 3064
} | class ____(TestCase):
def test_ignore_until_escalating(self) -> None:
assert (
infer_substatus(
new_status=GroupStatus.IGNORED,
new_substatus=None,
status_details={"untilEscalating": True},
group_list=[],
)
== GroupSubStatus.UNTIL_ESCALATING
)
def test_ignore_condition_met(self) -> None:
for condition in IGNORED_CONDITION_FIELDS:
assert (
infer_substatus(
new_status=GroupStatus.IGNORED,
new_substatus=None,
status_details={condition: 50},
group_list=[],
)
== GroupSubStatus.UNTIL_CONDITION_MET
)
def test_ignore_forever(self) -> None:
assert (
infer_substatus(
new_status=GroupStatus.IGNORED,
new_substatus=None,
status_details={"status": "ignored"},
group_list=[],
)
== GroupSubStatus.FOREVER
)
def test_unresolve_new_group(self) -> None:
assert (
infer_substatus(
new_status=GroupStatus.UNRESOLVED,
new_substatus=None,
status_details={},
group_list=[self.create_group(status=GroupStatus.IGNORED)],
)
== GroupSubStatus.NEW
)
def test_unresolve_ongoing_group(self) -> None:
assert (
infer_substatus(
new_status=GroupStatus.UNRESOLVED,
new_substatus=None,
status_details={},
group_list=[
self.create_group(first_seen=datetime.now(timezone.utc) - timedelta(days=10))
],
)
== GroupSubStatus.ONGOING
)
def test_unresolve_regressed_group(self) -> None:
assert (
infer_substatus(
new_status=GroupStatus.UNRESOLVED,
new_substatus=None,
status_details={},
group_list=[
self.create_group(
status=GroupStatus.RESOLVED,
first_seen=datetime.now(timezone.utc) - timedelta(days=10),
)
],
)
== GroupSubStatus.REGRESSED
)
| InferSubstatusTest |
python | celery__celery | t/unit/security/test_serialization.py | {
"start": 443,
"end": 2520
} | class ____(SecurityCase):
def _get_s(self, key, cert, certs, serializer="json"):
store = CertStore()
for c in certs:
store.add_cert(Certificate(c))
return SecureSerializer(
PrivateKey(key), Certificate(cert), store, serializer=serializer
)
@pytest.mark.parametrize(
"data", [1, "foo", b"foo", {"foo": 1}, {"foo": DEFAULT_SEPARATOR}]
)
@pytest.mark.parametrize("serializer", ["json", "pickle"])
def test_serialize(self, data, serializer):
s = self._get_s(KEY1, CERT1, [CERT1], serializer=serializer)
assert s.deserialize(s.serialize(data)) == data
def test_deserialize(self):
s = self._get_s(KEY1, CERT1, [CERT1])
with pytest.raises(SecurityError):
s.deserialize('bad data')
def test_unmatched_key_cert(self):
s = self._get_s(KEY1, CERT2, [CERT1, CERT2])
with pytest.raises(SecurityError):
s.deserialize(s.serialize('foo'))
def test_unknown_source(self):
s1 = self._get_s(KEY1, CERT1, [CERT2])
s2 = self._get_s(KEY1, CERT1, [])
with pytest.raises(SecurityError):
s1.deserialize(s1.serialize('foo'))
with pytest.raises(SecurityError):
s2.deserialize(s2.serialize('foo'))
def test_self_send(self):
s1 = self._get_s(KEY1, CERT1, [CERT1])
s2 = self._get_s(KEY1, CERT1, [CERT1])
assert s2.deserialize(s1.serialize('foo')) == 'foo'
def test_separate_ends(self):
s1 = self._get_s(KEY1, CERT1, [CERT2])
s2 = self._get_s(KEY2, CERT2, [CERT1])
assert s2.deserialize(s1.serialize('foo')) == 'foo'
def test_register_auth(self):
register_auth(KEY1, None, CERT1, '')
assert 'application/data' in registry._decoders
def test_lots_of_sign(self):
for i in range(1000):
rdata = bytes_to_str(base64.urlsafe_b64encode(os.urandom(265)))
s = self._get_s(KEY1, CERT1, [CERT1])
assert s.deserialize(s.serialize(rdata)) == rdata
| test_secureserializer |
python | getsentry__sentry | src/sentry/utils/retries.py | {
"start": 3436,
"end": 5466
} | class ____(RetryPolicy):
"""
A basic policy that can be used to retry a callable based on the result
of a test function that determines whether or not to retry after the
callable throws an exception.
The test function takes two arguments: the number of times the callable
has unsuccessfully been invoked, and the exception instance that was
raised during the last execution attempt. This function is expected to
return a boolean: if the value is ``True``, the callable will be retried;
if the value is ``False``, the callable will not be retried and the
exception thrown during the previous execution attempt will be raised.
The delay function (if provided) takes one argument: the number of times
the callable has unsuccessfully been invoked. This function is expected
to return a float value: the number of seconds to wait before the next
attempt. If the delay function is not provided, the callable will be
immediately retried.
"""
def __init__(
self,
test_function: Callable[[int, Exception], bool],
delay_function: Callable[[int], float] | None = None,
) -> None:
self.__test_function = test_function
self.__delay_function = delay_function if delay_function is not None else lambda i: 0.0
def __call__(self, function: Callable[[], T]) -> T:
for i in itertools.count(1):
try:
return function()
except Exception as e:
if self.__test_function(i, e):
delay = self.__delay_function(i)
logger.warning(
"Caught %r while executing %r (attempt #%s), retrying in %f seconds...",
e,
function,
i,
delay,
)
time.sleep(delay)
else:
raise
assert False, "retry loop exited without returning"
| ConditionalRetryPolicy |
python | encode__django-rest-framework | tests/test_request.py | {
"start": 983,
"end": 1359
} | class ____(TestCase):
def test_request_type(self):
request = Request(factory.get('/'))
message = (
'The `request` argument must be an instance of '
'`django.http.HttpRequest`, not `rest_framework.request.Request`.'
)
with self.assertRaisesMessage(AssertionError, message):
Request(request)
| TestInitializer |
python | rapidsai__cudf | python/cudf/cudf/core/column_accessor.py | {
"start": 821,
"end": 2609
} | class ____(dict):
"""A dictionary whose __getitem__ method accesses nested dicts.
This class directly subclasses dict for performance, so there are a number
of gotchas: 1) the only safe accessor for nested elements is
`__getitem__` (all other accessors will fail to perform nested lookups), 2)
nested mappings will not exhibit the same behavior (they will be raw
dictionaries unless explicitly created to be of this class), and 3) to
construct this class you _must_ use `from_zip` to get appropriate treatment
of tuple keys.
"""
@classmethod
def from_zip(cls, data: Iterator):
"""Create from zip, specialized factory for nesting."""
obj = cls()
for key, value in data:
d = obj
for k in key[:-1]:
d = d.setdefault(k, {})
d[key[-1]] = value
return obj
def __getitem__(self, key):
"""Recursively apply dict.__getitem__ for nested elements."""
# As described in the pandas docs
# https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#advanced-indexing-with-hierarchical-index
# accessing nested elements of a multiindex must be done using a tuple.
# Lists and other sequences are treated as accessing multiple elements
# at the top level of the index.
if isinstance(key, tuple):
return reduce(dict.__getitem__, key, self)
return super().__getitem__(key)
def _to_flat_dict_inner(d: dict, parents: tuple = ()):
for k, v in d.items():
if not isinstance(v, d.__class__):
if parents:
k = (*parents, k)
yield (k, v)
else:
yield from _to_flat_dict_inner(d=v, parents=(*parents, k))
| _NestedGetItemDict |
python | scikit-learn__scikit-learn | sklearn/externals/_arff.py | {
"start": 11578,
"end": 11755
} | class ____(Exception):
message: Optional[str] = None
def __init__(self):
self.line = -1
def __str__(self):
return self.message%self.line
| ArffException |
python | ray-project__ray | python/ray/data/block.py | {
"start": 4417,
"end": 5650
} | class ____:
"""Execution stats for this block.
Attributes:
wall_time_s: The wall-clock time it took to compute this block.
cpu_time_s: The CPU time it took to compute this block.
node_id: A unique id for the node that computed this block.
max_uss_bytes: An estimate of the maximum amount of physical memory that the
process was using while computing this block.
"""
def __init__(self):
self.start_time_s: Optional[float] = None
self.end_time_s: Optional[float] = None
self.wall_time_s: Optional[float] = None
self.udf_time_s: Optional[float] = 0
self.cpu_time_s: Optional[float] = None
self.node_id = ray.runtime_context.get_runtime_context().get_node_id()
self.max_uss_bytes: int = 0
self.task_idx: Optional[int] = None
@staticmethod
def builder() -> "_BlockExecStatsBuilder":
return _BlockExecStatsBuilder()
def __repr__(self):
return repr(
{
"wall_time_s": self.wall_time_s,
"cpu_time_s": self.cpu_time_s,
"udf_time_s": self.udf_time_s,
"node_id": self.node_id,
}
)
| BlockExecStats |
python | getsentry__sentry | tests/sentry/seer/endpoints/test_group_ai_summary.py | {
"start": 363,
"end": 2700
} | class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.group = self.create_group()
self.url = self._get_url(self.group.id)
self.login_as(user=self.user)
def _get_url(self, group_id: int) -> str:
return f"/api/0/issues/{group_id}/summarize/"
@patch("sentry.seer.endpoints.group_ai_summary.get_issue_summary")
def test_endpoint_calls_get_issue_summary(self, mock_get_issue_summary: MagicMock) -> None:
mock_summary_data = {"headline": "Test headline"}
mock_get_issue_summary.return_value = (mock_summary_data, 200)
response = self.client.post(self.url, data={"event_id": "test_event_id"}, format="json")
assert response.status_code == 200
assert response.data == mock_summary_data
mock_get_issue_summary.assert_called_once_with(
group=self.group,
user=ANY,
force_event_id="test_event_id",
source=SeerAutomationSource.ISSUE_DETAILS,
)
@patch("sentry.seer.endpoints.group_ai_summary.get_issue_summary")
def test_endpoint_without_event_id(self, mock_get_issue_summary: MagicMock) -> None:
mock_summary_data = {"headline": "Test headline"}
mock_get_issue_summary.return_value = (mock_summary_data, 200)
response = self.client.post(self.url, format="json")
assert response.status_code == 200
assert response.data == mock_summary_data
mock_get_issue_summary.assert_called_once_with(
group=self.group,
user=ANY,
force_event_id=None,
source=SeerAutomationSource.ISSUE_DETAILS,
)
@patch("sentry.seer.endpoints.group_ai_summary.get_issue_summary")
def test_endpoint_with_error_response(self, mock_get_issue_summary: MagicMock) -> None:
error_data = {"detail": "An error occurred"}
mock_get_issue_summary.return_value = (error_data, 400)
response = self.client.post(self.url, format="json")
assert response.status_code == 400
assert response.data == error_data
mock_get_issue_summary.assert_called_once_with(
group=self.group,
user=ANY,
force_event_id=None,
source=SeerAutomationSource.ISSUE_DETAILS,
)
| GroupAiSummaryEndpointTest |
python | Netflix__metaflow | test/unit/inheritance/flows/comprehensive_linear_base.py | {
"start": 831,
"end": 1076
} | class ____(BaseB):
"""Another middle class with additional config and parameter"""
gamma = Parameter("gamma", help="Gamma parameter", default=2.5)
config_c = Config("config_c", default_value={"mode": "production", "debug": False})
| BaseC |
python | scipy__scipy | scipy/sparse/linalg/_interface.py | {
"start": 21821,
"end": 22747
} | class ____(LinearOperator):
"""Transposition of arbitrary Linear Operator"""
def __init__(self, A):
shape = (A.shape[1], A.shape[0])
super().__init__(dtype=A.dtype, shape=shape)
self.A = A
self.args = (A,)
def _matvec(self, x):
# NB. np.conj works also on sparse matrices
return np.conj(self.A._rmatvec(np.conj(x)))
def _rmatvec(self, x):
return np.conj(self.A._matvec(np.conj(x)))
def _matmat(self, x):
# NB. np.conj works also on sparse matrices
return np.conj(self.A._rmatmat(np.conj(x)))
def _rmatmat(self, x):
return np.conj(self.A._matmat(np.conj(x)))
def _get_dtype(operators, dtypes=None):
if dtypes is None:
dtypes = []
for obj in operators:
if obj is not None and hasattr(obj, 'dtype'):
dtypes.append(obj.dtype)
return np.result_type(*dtypes)
| _TransposedLinearOperator |
python | Netflix__metaflow | metaflow/_vendor/zipp.py | {
"start": 2873,
"end": 3634
} | class ____(CompleteDirs):
"""
ZipFile subclass to ensure implicit
dirs exist and are resolved rapidly.
"""
def namelist(self):
with contextlib.suppress(AttributeError):
return self.__names
self.__names = super(FastLookup, self).namelist()
return self.__names
def _name_set(self):
with contextlib.suppress(AttributeError):
return self.__lookup
self.__lookup = super(FastLookup, self)._name_set()
return self.__lookup
def _pathlib_compat(path):
"""
For path-like objects, convert to a filename for compatibility
on Python 3.6.1 and earlier.
"""
try:
return path.__fspath__()
except AttributeError:
return str(path)
| FastLookup |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 88145,
"end": 99165
} | class ____(Operation):
def __init__(self, subscripts, *, name=None):
super().__init__(name=name)
self.subscripts = subscripts
def call(self, *operands, **kwargs):
return backend.numpy.einsum(self.subscripts, *operands, **kwargs)
def compute_output_spec(self, *operands):
"""Compute the output shape of `einsum`.
The shape computation follows the steps below:
1. Find all letters in the input specs (left part of "->"), and
break them into two categories: letters appearing more than once
go to `reduced_dims`, otherwise go to `kept_dims`.
2. Adjust `reduced_dims` and `kept_dims` based on the output spec
(right part of "->"). The rule is if the letter appears in the
output spec, then move it to `kept_dims`, otherwise move it to
`reduced_dims`.
3. Compute the target output shape. If no output spec is set, then
the target output shape will be "...{kept_dims}", e.g., "...ijk",
else it will be the same as output spec. "..." is a wildcard that
could map shape of arbitrary length.
4. For each operand in `operands`, map the shape specified in the input
spec to the output target, e.g, if operand is of shape [2,3,4],
input spec is "i..." and output target is "i...jk", then 2 will go
the index 0. For dims not represented by any letter, insert to the
wildcard part. For each letter in output target not appearing in
input spec, the dim will be 1 for broadcasting. After 4, each
operand should have a target shape containing only number and
`None`.
5. Broadcast all shapes computed from 4, and the result is the output
shape.
Let's take an example to illustrate the steps above. Let's define:
```python
x = KerasTensor([None, 3, 4])
y = KerasTensor(2, 4, 3)
z = knp.einsum("...ij, kji->...k", x, y)
```
1. `reduced_dims` is {"i", "j"}, `kept_dims` is {"k"}.
2. `reduced_dims` is still {"i", "j"}, and `kept_dims` is {"k"}.
3. Output target is "...k".
4. For `x`, the input spec is "...ij", and the output target is "...k".
"i" and "j" do not appear in the output target, so no replacement
happens, and [None] goes to wildcard. Afterwards, "k" is replaced
by 1, so we get shape [None, 1]. Applying the same logic to `y`, we
get shape [2].
5. Broadcast [None, 1] and [2], and we get [None, 2], which is the
output shape.
"""
split_subscripts = self.subscripts.split("->")
if len(split_subscripts) > 2:
raise ValueError(
"At most one '->' is supported in `einsum` subscripts, but "
f"received {self.subscripts}."
)
if len(split_subscripts) == 2:
subscripts = split_subscripts[0]
output_spec = split_subscripts[1]
else:
subscripts = self.subscripts
output_spec = None
input_specs = subscripts.split(",")
if len(input_specs) != len(operands):
raise ValueError(
f"Number of operands ({len(operands)}) does not match the "
f"number of input specs ({len(input_specs)}) in `einsum`, "
f"received subscripts={self.subscripts}."
)
reduced_dims = set()
kept_dims = set()
for s in subscripts:
if not s.isalpha():
continue
if s not in reduced_dims and s not in kept_dims:
kept_dims.add(s)
elif s in kept_dims:
kept_dims.remove(s)
reduced_dims.add(s)
if output_spec is not None:
# The output spec changes the rule of kept_dims and reduced_dims.
# In short, dims appearing in the output spec will be kept, and
# dims not appearing in the output spec will be reduced.
kept_dims_copy = kept_dims.copy()
reduced_dims_copy = reduced_dims.copy()
for dim in kept_dims:
if dim not in output_spec:
kept_dims_copy.remove(dim)
reduced_dims_copy.add(dim)
for dim in reduced_dims:
if dim in output_spec:
reduced_dims_copy.remove(dim)
kept_dims_copy.add(dim)
kept_dims = kept_dims_copy
reduced_dims = reduced_dims_copy
reduced_dims = sorted(reduced_dims)
kept_dims = sorted(kept_dims)
if output_spec is None:
target_broadcast_spec = f"...{''.join(kept_dims)}"
else:
target_broadcast_spec = output_spec
expanded_operands_shapes = []
for x, spec in zip(operands, input_specs):
x_shape = getattr(x, "shape", [])
x_shape = [-1 if size is None else size for size in x_shape]
split_spec = spec.split("...")
expanded_shape = target_broadcast_spec
if len(split_spec) == 1:
# In this case, the input spec is just a string of letters,
# e.g., "ijk".
if len(x_shape) != len(split_spec[0]):
raise ValueError(
"Number of dimensions in the subscript does not "
"match the number of dimensions in the operand, "
f"received subscript `{spec}` and operand of shape "
f"{x_shape}."
)
for size, s in zip(x_shape, split_spec[0]):
# Replace the letter with the right shape.
expanded_shape = expanded_shape.replace(s, f"{str(size)} ")
expanded_shape = expanded_shape.replace("...", "")
else:
# In this case, the input spec has "...", e.g., "i...j", "i...",
# or "...j".
for i in range(len(split_spec[0])):
expanded_shape = expanded_shape.replace(
split_spec[0][i], f"{x_shape[i]} "
)
for i in range(len(split_spec[1])):
expanded_shape = expanded_shape.replace(
split_spec[1][-i - 1], f"{x_shape[-i - 1]} "
)
# Shape matched by "..." will be inserted to the position of
# "...".
wildcard_shape_start_index = len(split_spec[0])
wildcard_shape_end_index = (
len(x_shape)
if len(split_spec[1]) == 0
else -len(split_spec[1])
)
wildcard_shape = x_shape[
wildcard_shape_start_index:wildcard_shape_end_index
]
wildcard_shape_str = (
f"{' '.join([str(size) for size in wildcard_shape])} "
)
expanded_shape = expanded_shape.replace(
"...", wildcard_shape_str
)
# Replace all letters not yet handled with "1" for broadcasting.
expanded_shape = re.sub("[a-z]", "1 ", expanded_shape)
expanded_shape = expanded_shape.split()
expanded_shape = [
None if size == "-1" else int(size) for size in expanded_shape
]
expanded_operands_shapes.append(expanded_shape)
output_shape = expanded_operands_shapes[0]
for shape in expanded_operands_shapes[1:]:
output_shape = broadcast_shapes(output_shape, shape)
dtypes_to_resolve = list(
set(
backend.standardize_dtype(getattr(x, "dtype", type(x)))
for x in operands
)
)
if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == "int8":
dtype = "int32"
else:
dtype = dtypes.result_type(*dtypes_to_resolve)
return KerasTensor(output_shape, dtype=dtype)
@keras_export(["keras.ops.einsum", "keras.ops.numpy.einsum"])
def einsum(subscripts, *operands, **kwargs):
"""Evaluates the Einstein summation convention on the operands.
Args:
subscripts: Specifies the subscripts for summation as comma separated
list of subscript labels. An implicit (classical Einstein
summation) calculation is performed unless the explicit indicator
`->` is included as well as subscript labels of the precise
output form.
operands: The operands to compute the Einstein sum of.
Returns:
The calculation based on the Einstein summation convention.
Example:
>>> from keras.src import ops
>>> a = ops.arange(25).reshape(5, 5)
>>> b = ops.arange(5)
>>> c = ops.arange(6).reshape(2, 3)
Trace of a matrix:
>>> ops.einsum("ii", a)
60
>>> ops.einsum(a, [0, 0])
60
>>> ops.trace(a)
60
Extract the diagonal:
>>> ops.einsum("ii -> i", a)
array([ 0, 6, 12, 18, 24])
>>> ops.einsum(a, [0, 0], [0])
array([ 0, 6, 12, 18, 24])
>>> ops.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis:
>>> ops.einsum("ij -> i", a)
array([ 10, 35, 60, 85, 110])
>>> ops.einsum(a, [0, 1], [0])
array([ 10, 35, 60, 85, 110])
>>> ops.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional tensors summing a single axis can be done
with ellipsis:
>>> ops.einsum("...j -> ...", a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [..., 1], [...])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose or reorder any number of axes:
>>> ops.einsum("ji", c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> ops.einsum("ij -> ji", c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> ops.einsum(c, [1, 0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> ops.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Matrix vector multiplication:
>>> ops.einsum("ij, j", a, b)
array([ 30, 80, 130, 180, 230])
>>> ops.einsum(a, [0, 1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> ops.einsum("...j, j", a, b)
array([ 30, 80, 130, 180, 230])
"""
if any_symbolic_tensors(operands):
return Einsum(subscripts).symbolic_call(*operands, **kwargs)
return backend.numpy.einsum(subscripts, *operands, **kwargs)
@keras_export(["keras.ops.empty", "keras.ops.numpy.empty"])
def empty(shape, dtype=None):
"""Return a tensor of given shape and type filled with uninitialized data.
Args:
shape: Shape of the empty tensor.
dtype: Desired data type of the empty tensor.
Returns:
The empty tensor.
"""
return backend.numpy.empty(shape, dtype=dtype)
| Einsum |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 30025,
"end": 30356
} | class ____(TestCase):
provider: type[Provider] = DummyProvider
def setUp(self):
super().setUp()
# TestCase automatically sets up dummy provider
if self.provider != DummyProvider:
auth.register(self.provider)
self.addCleanup(auth.unregister, self.provider)
| AuthProviderTestCase |
python | ansible__ansible | test/lib/ansible_test/_internal/util.py | {
"start": 29873,
"end": 30109
} | class ____(Exception):
"""An unhandled internal error indicating a bug in the code."""
def __init__(self, message: str) -> None:
super().__init__(f'An internal error has occurred in ansible-test: {message}')
| InternalError |
python | vyperlang__vyper | vyper/compiler/input_bundle.py | {
"start": 1444,
"end": 2100
} | class ____(CompilerInput):
# some json input, which has already been parsed into a dict or list
# this is needed because json inputs present json interfaces as json
# objects, not as strings. this class helps us avoid round-tripping
# back to a string to pretend it's a file.
data: Any = field() # something that json.load() returns
@classmethod
def from_file_input(cls, file_input: FileInput) -> "JSONInput":
s = json.loads(file_input.source_code)
return cls(**asdict(file_input), data=s)
def __hash__(self):
# don't use dataclass provided implementation
return super().__hash__()
| JSONInput |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 60740,
"end": 60961
} | class ____(_PrintableStructure):
_fields_ = [
('l1Cache', c_ulonglong),
('l2Cache', c_ulonglong),
('deviceMemory', c_ulonglong),
('registerFile', c_ulonglong),
]
| c_nvmlEccErrorCounts_t |
python | eth-brownie__brownie | brownie/typing.py | {
"start": 3951,
"end": 4150
} | class ____(_CompilerSettings):
evmVersion: NotRequired[Optional[EvmVersion]]
remappings: List[str]
optimizer: NotRequired[OptimizerSettings]
viaIR: NotRequired[bool]
@final
| SettingsSolc |
python | cython__cython | docs/examples/userguide/sharing_declarations/shrubbing.py | {
"start": 30,
"end": 203
} | class ____:
def __cinit__(self, w: cython.int, l: cython.int):
self.width = w
self.length = l
def standard_shrubbery():
return Shrubbery(3, 7)
| Shrubbery |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 9077,
"end": 9531
} | class ____:
def __init__(self, length=64, vocab_size=100, num_labels=5):
self.length = length
self.sequences = [torch.randint(0, vocab_size, (64,)).tolist() for _ in range(length)]
self.labels = torch.randint(0, num_labels, (length,)).tolist()
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_ids": self.sequences[i], "label": self.labels[i]}
| SequenceClassificationDataset |
python | pytorch__pytorch | test/inductor/test_aoti_cross_compile_windows.py | {
"start": 8399,
"end": 14105
} | class ____(TestCase):
"""
Test class for AOT Inductor Windows cross-compilation.
Define test methods that return ModelTestConfig, and the decorator
will auto-generate compile/load test methods.
"""
def _define_simple(self):
"""Define the Simple model and its test configuration."""
class Simple(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(10, 16)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(16, 1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return x
return ModelTestConfig(
name="simple",
model_class=Simple,
example_inputs=(torch.randn(8, 10, device=GPU_TYPE),),
dynamic_shapes={"x": {0: torch.export.Dim("batch", min=1, max=1024)}},
)
def _define_simple_cnn(self):
"""Define the SimpleCNN model and its test configuration."""
class SimpleCNN(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 16, 3)
self.relu = torch.nn.ReLU()
self.pool = torch.nn.AdaptiveAvgPool2d((1, 1))
self.fc = torch.nn.Linear(16, 10)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.pool(x)
x = x.flatten(1)
x = self.fc(x)
return x
return ModelTestConfig(
name="simple_cnn",
model_class=SimpleCNN,
example_inputs=(torch.randn(2, 3, 32, 32, device=GPU_TYPE),),
dynamic_shapes={"x": {0: torch.export.Dim("batch", min=1, max=16)}},
rtol=1e-3,
atol=1e-3,
)
def _define_transformer(self):
"""Define the SimpleTransformer model and its test configuration."""
class SimpleTransformer(torch.nn.Module):
def __init__(self):
super().__init__()
self.embedding = torch.nn.Linear(128, 256)
self.attention = torch.nn.MultiheadAttention(256, 8, batch_first=True)
self.norm1 = torch.nn.LayerNorm(256)
self.ffn = torch.nn.Sequential(
torch.nn.Linear(256, 1024),
torch.nn.ReLU(),
torch.nn.Linear(1024, 256),
)
self.norm2 = torch.nn.LayerNorm(256)
self.output = torch.nn.Linear(256, 10)
def forward(self, x):
# x shape: (batch, seq_len, input_dim)
x = self.embedding(x)
attn_out, _ = self.attention(x, x, x)
x = self.norm1(x + attn_out)
ffn_out = self.ffn(x)
x = self.norm2(x + ffn_out)
x = x.mean(dim=1) # Global average pooling
x = self.output(x)
return x
return ModelTestConfig(
name="transformer",
model_class=SimpleTransformer,
example_inputs=(torch.randn(4, 16, 128, device=GPU_TYPE),),
dynamic_shapes={"x": {0: torch.export.Dim("batch", min=1, max=32)}},
rtol=1e-3,
atol=1e-3,
)
if __name__ == "__main__":
import sys
from torch._inductor.test_case import run_tests
# Check for --package-dir argument and remove it before unittest sees it
package_dir = None
win_torch_lib_dir = None
filtered_argv = []
i = 0
while i < len(sys.argv):
if sys.argv[i] == "--package-dir":
if i + 1 < len(sys.argv):
package_dir = sys.argv[i + 1]
i += 2 # Skip both --package-dir and its value
else:
print("Error: --package-dir requires a valid directory path")
sys.exit(1)
elif sys.argv[i].startswith("--package-dir="):
package_dir = sys.argv[i].split("=", 1)[1]
i += 1
elif sys.argv[i] == "--win-torch-lib-dir":
if i + 1 < len(sys.argv):
win_torch_lib_dir = sys.argv[i + 1]
i += 2 # Skip both --win-torch-lib-dir and its value
else:
print("Error: --win-torch-lib-dir requires a valid directory path")
sys.exit(1)
elif sys.argv[i].startswith("--win-torch-lib-dir="):
win_torch_lib_dir = sys.argv[i].split("=", 1)[1]
i += 1
else:
filtered_argv.append(sys.argv[i])
i += 1
# Validate and set the base path for package storage
if package_dir:
try:
package_path = Path(package_dir)
package_path.mkdir(parents=True, exist_ok=True)
# Test write access
test_file = package_path / ".test_write"
test_file.touch()
test_file.unlink()
WindowsCrossCompilationTestFramework.set_base_path(package_path)
except Exception:
print("Error: --package-dir requires a valid directory path")
sys.exit(1)
# Set Windows torch libs path if provided (only needed for compile tests)
if win_torch_lib_dir:
WindowsCrossCompilationTestFramework.set_win_torch_libs_path(win_torch_lib_dir)
# Update sys.argv to remove our custom arguments
sys.argv = filtered_argv
if HAS_GPU:
run_tests(needs="filelock")
| TestAOTInductorWindowsCrossCompilation |
python | pandas-dev__pandas | pandas/tests/scalar/timestamp/test_constructors.py | {
"start": 15243,
"end": 17057
} | class ____:
def test_construct_from_time_unit(self):
# GH#54097 only passing a time component, no date
ts = Timestamp("01:01:01.111")
assert ts.unit == "us"
def test_constructor_str_infer_reso(self):
# non-iso8601 path
# _parse_delimited_date path
ts = Timestamp("01/30/2023")
assert ts.unit == "us"
# _parse_dateabbr_string path
ts = Timestamp("2015Q1")
assert ts.unit == "us"
# dateutil_parse path
ts = Timestamp("2016-01-01 1:30:01 PM")
assert ts.unit == "us"
ts = Timestamp("2016 June 3 15:25:01.345")
assert ts.unit == "us"
ts = Timestamp("300-01-01")
assert ts.unit == "us"
ts = Timestamp("300 June 1:30:01.300")
assert ts.unit == "us"
# dateutil path -> don't drop trailing zeros
ts = Timestamp("01-01-2013T00:00:00.000000000+0000")
assert ts.unit == "ns"
ts = Timestamp("2016/01/02 03:04:05.001000 UTC")
assert ts.unit == "us"
# higher-than-nanosecond -> we drop the trailing bits
ts = Timestamp("01-01-2013T00:00:00.000000002100+0000")
assert ts == Timestamp("01-01-2013T00:00:00.000000002+0000")
assert ts.unit == "ns"
# GH#56208 minute reso through the ISO8601 path with tz offset
ts = Timestamp("2020-01-01 00:00+00:00")
assert ts.unit == "us"
ts = Timestamp("2020-01-01 00+00:00")
assert ts.unit == "us"
@pytest.mark.parametrize("method", ["now", "today"])
def test_now_today_unit(self, method):
# GH#55879
ts_from_method = getattr(Timestamp, method)()
ts_from_string = Timestamp(method)
assert ts_from_method.unit == ts_from_string.unit == "us"
| TestTimestampResolutionInference |
python | pytorch__pytorch | test/distributed/pipelining/test_pipe.py | {
"start": 1245,
"end": 2025
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mlp0 = MLPModule(d_hid)
self.mlp1 = MLPModule(d_hid)
self.mlp2 = MLPModule(d_hid)
self.mlp3 = MLPModule(d_hid)
def forward(self, x, y):
x = self.mlp0(x)
pipe_split()
x = self.mlp1(x)
pipe_split()
x = self.mlp2(x)
pipe_split()
x = self.mlp3(x)
return x - y
EXPECTED_N_STAGES = {
ExampleCode: 4,
MultiMLP: 4,
ModelWithParamAlias: 2,
}
# Currently, we don't enforce full set equality on the FQNs between the original
# and pipelined models, because in the multi-use param case, PP will deduplicate
# the FQNs from the state_dict.
# TODO
CHECK_FQN_SET_EQUALITY = False
| MultiMLP |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 90698,
"end": 91580
} | class ____(RequestHandler):
def initialize(self, test):
self.test = test
self.method = None
self.methods = [] # type: typing.List[str]
@contextlib.contextmanager
def in_method(self, method):
if self.method is not None:
self.test.fail(f"entered method {method} while in {self.method}")
self.method = method
self.methods.append(method)
try:
yield
finally:
self.method = None
@gen.coroutine
def prepare(self):
# Note that asynchronous prepare() does not block data_received,
# so we don't use in_method here.
self.methods.append("prepare")
yield gen.moment
@gen.coroutine
def post(self):
with self.in_method("post"):
yield gen.moment
self.write(dict(methods=self.methods))
| BaseFlowControlHandler |
python | getsentry__sentry | tests/sentry/flags/endpoints/test_hooks.py | {
"start": 409,
"end": 15153
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-flag-hooks"
def setUp(self) -> None:
super().setUp()
self.url = reverse(self.endpoint, args=(self.organization.slug, "launchdarkly"))
@property
def features(self) -> dict[str, bool]:
return {}
def test_generic_post_create(self, mock_incr: MagicMock) -> None:
request_data = {
"data": [
{
"action": "created",
"change_id": 9734362632,
"created_at": "2024-12-12T00:00:00+00:00",
"created_by": {"id": "username", "type": "name"},
"flag": "hello",
}
],
"meta": {"version": 1},
}
signature = hmac_sha256_hex_digest(key="456", message=json.dumps(request_data).encode())
FlagWebHookSigningSecretModel.objects.create(
organization=self.organization, provider="generic", secret="456"
)
with self.feature(self.features):
response = self.client.post(
reverse(self.endpoint, args=(self.organization.slug, "generic")),
request_data,
headers={"X-Sentry-Signature": signature},
)
assert response.status_code == 200, response.content
mock_incr.assert_any_call(
"feature_flags.audit_log_event_posted", tags={"provider": "generic"}
)
assert FlagAuditLogModel.objects.count() == 1
def test_unleash_post_create(self, mock_incr: MagicMock) -> None:
request_data = {
"id": 28,
"tags": [{"type": "simple", "value": "testvalue"}],
"type": "feature-environment-enabled",
"project": "default",
"createdAt": "2024-12-30T00:00:00.000Z",
"createdBy": "admin",
"environment": "development",
"createdByUserId": 1,
"featureName": "test-flag",
}
signature = "testing12345abcdaslkflsldkfkdlks"
FlagWebHookSigningSecretModel.objects.create(
organization=self.organization,
provider="unleash",
secret="testing12345abcdaslkflsldkfkdlks",
)
with self.feature(self.features):
response = self.client.post(
reverse(self.endpoint, args=(self.organization.slug, "unleash")),
request_data,
headers={"Authorization": signature},
)
assert response.status_code == 200, response.content
mock_incr.assert_any_call(
"feature_flags.audit_log_event_posted", tags={"provider": "unleash"}
)
assert FlagAuditLogModel.objects.count() == 1
def test_statsig_post_verification(self, mock_incr: MagicMock) -> None:
request_data = {"data": {"event": "url_verification", "verification_code": "123"}}
with self.feature(self.features):
response = self.client.post(
reverse(self.endpoint, args=(self.organization.slug, "statsig")), request_data
)
assert response.status_code == 200, response.content
assert response.json() == {"verification_code": "123"}
assert FlagAuditLogModel.objects.count() == 0
def test_statsig_post_create(self, mock_incr: MagicMock) -> None:
request_data = {
"data": [
{
"user": {"name": "johndoe", "email": "john@sentry.io"},
"timestamp": 1739400185198,
"eventName": "statsig::config_change",
"metadata": {
"projectName": "sentry",
"projectID": "1",
"type": "Gate",
"name": "gate1",
"description": "Updated Config Conditions\n - Added rule Rule 1",
"environments": "development,staging,production",
"action": "updated",
"tags": [],
"targetApps": [],
},
},
]
}
secret = "webhook-Xk9pL8NQaR5Ym2cx7vHnWtBj4M3f6qyZdC12mnspk8"
FlagWebHookSigningSecretModel.objects.create(
organization=self.organization,
provider="statsig",
secret=secret,
)
request_timestamp = "1739400185400" # ms timestamp of the webhook request
signature_basestring = f"v0:{request_timestamp}:{json.dumps(request_data)}".encode()
signature = "v0=" + hmac_sha256_hex_digest(key=secret, message=signature_basestring)
headers = {
"X-Statsig-Signature": signature,
"X-Statsig-Request-Timestamp": request_timestamp,
}
with self.feature(self.features):
response = self.client.post(
reverse(self.endpoint, args=(self.organization.slug, "statsig")),
request_data,
headers=headers,
)
assert response.status_code == 200, response.content
mock_incr.assert_any_call(
"feature_flags.audit_log_event_posted", tags={"provider": "statsig"}
)
assert FlagAuditLogModel.objects.count() == 1
def test_statsig_post_unauthorized(self, mock_incr: MagicMock) -> None:
request_data = {
"data": [
{
"user": {"name": "johndoe", "email": "john@sentry.io"},
"timestamp": 1739400185198,
"eventName": "statsig::config_change",
"metadata": {
"projectName": "sentry",
"projectID": "1",
"type": "Gate",
"name": "gate1",
"description": "Updated Config Conditions\n - Added rule Rule 1",
"environments": "development,staging,production",
"action": "updated",
"tags": [],
"targetApps": [],
},
},
]
}
with self.feature(self.features):
response = self.client.post(
reverse(self.endpoint, args=(self.organization.slug, "statsig")), request_data
)
assert response.status_code == 401, response.content
assert FlagAuditLogModel.objects.count() == 0
def test_launchdarkly_post_create(self, mock_incr: MagicMock) -> None:
request_data = LD_REQUEST
signature = hmac_sha256_hex_digest(key="456", message=json.dumps(request_data).encode())
# Test multiple secrets exist for the provider, org pair.
FlagWebHookSigningSecretModel.objects.create(
organization=self.organization, provider="launchdarkly", secret="123"
)
FlagWebHookSigningSecretModel.objects.create(
organization=self.organization, provider="launchdarkly", secret="456"
)
with self.feature(self.features):
response = self.client.post(
self.url, request_data, headers={"X-LD-Signature": signature}
)
assert response.status_code == 200
mock_incr.assert_any_call(
"feature_flags.audit_log_event_posted", tags={"provider": "launchdarkly"}
)
assert FlagAuditLogModel.objects.count() == 1
flag = FlagAuditLogModel.objects.first()
assert flag is not None
assert flag.action == ACTION_MAP["created"]
assert flag.flag == "test flag"
assert flag.created_by == "michelle@example.com"
assert flag.created_by_type == CREATED_BY_TYPE_MAP["email"]
assert flag.organization_id == self.organization.id
assert flag.provider == PROVIDER_MAP["launchdarkly"]
assert flag.tags is not None
assert flag.tags["description"] == "flag was created"
def test_launchdarkly_post_create_invalid_signature(self, mock_incr: MagicMock) -> None:
with self.feature(self.features):
sig = hmac_sha256_hex_digest(key="123", message=b"456")
response = self.client.post(self.url, LD_REQUEST, headers={"X-LD-Signature": sig})
assert response.status_code == 401
assert call("feature_flags.audit_log_event_posted") not in mock_incr.call_args_list
def test_post_launchdarkly_deserialization_failed(self, mock_incr: MagicMock) -> None:
signature = hmac_sha256_hex_digest(key="123", message=json.dumps({}).encode())
FlagWebHookSigningSecretModel.objects.create(
organization=self.organization, provider="launchdarkly", secret="123"
)
with self.feature(self.features):
response = self.client.post(self.url, {}, headers={"X-LD-Signature": signature})
assert response.status_code == 200
assert FlagAuditLogModel.objects.count() == 0
assert call("feature_flags.audit_log_event_posted") not in mock_incr.call_args_list
def test_post_invalid_provider(self, mock_incr: MagicMock) -> None:
url = reverse(self.endpoint, args=(self.organization.slug, "test"))
with self.feature(self.features):
response = self.client.post(url, {})
assert response.status_code == 404
assert call("feature_flags.audit_log_event_posted") not in mock_incr.call_args_list
def test_post_missing_signature(self, mock_incr: MagicMock) -> None:
with self.feature(self.features):
response = self.client.post(self.url, {})
assert response.status_code == 401, response.content
assert call("feature_flags.audit_log_event_posted") not in mock_incr.call_args_list
LD_REQUEST = {
"_links": {
"canonical": {
"href": "/api/v2/flags/default/test-flag",
"type": "application/json",
},
"parent": {"href": "/api/v2/auditlog", "type": "application/json"},
"self": {
"href": "/api/v2/auditlog/1234",
"type": "application/json",
},
"site": {"href": "/default/~/features/test-flag", "type": "text/html"},
},
"_id": "1234",
"_accountId": "1234",
"date": 1729123465221,
"accesses": [
{"action": "createFlag", "resource": "proj/default:env/test:flag/test-flag"},
{"action": "createFlag", "resource": "proj/default:env/production:flag/test-flag"},
],
"kind": "flag",
"name": "test flag",
"description": "flag was created",
"shortDescription": "",
"member": {
"_links": {
"parent": {"href": "/api/v2/members", "type": "application/json"},
"self": {
"href": "/api/v2/members/1234",
"type": "application/json",
},
},
"_id": "1234",
"email": "michelle@example.com",
"firstName": "Michelle",
"lastName": "Doe",
},
"titleVerb": "created the flag",
"title": "Michelle created the flag [test flag](https://app.launchdarkly.com/default/~/features/test-flag)",
"target": {
"_links": {
"canonical": {
"href": "/api/v2/flags/default/test-flag",
"type": "application/json",
},
"site": {"href": "/default/~/features/test-flag", "type": "text/html"},
},
"name": "test flag",
"resources": [
"proj/default:env/test:flag/test-flag",
"proj/default:env/production:flag/test-flag",
],
},
"currentVersion": {
"name": "test flag",
"kind": "boolean",
"description": "testing a feature flag",
"key": "test-flag",
"_version": 1,
"creationDate": 1729123465176,
"includeInSnippet": False,
"clientSideAvailability": {"usingMobileKey": False, "usingEnvironmentId": False},
"variations": [
{"_id": "d883033e-fa8b-41d4-a4be-112d9a59278e", "value": True, "name": "on"},
{"_id": "73aaa33f-c9ca-4bdc-8c97-01a20567aa3f", "value": False, "name": "off"},
],
"temporary": False,
"tags": [],
"_links": {
"parent": {"href": "/api/v2/flags/default", "type": "application/json"},
"self": {"href": "/api/v2/flags/default/test-flag", "type": "application/json"},
},
"maintainerId": "1234",
"_maintainer": {
"_links": {
"self": {
"href": "/api/v2/members/1234",
"type": "application/json",
}
},
"_id": "1234",
"firstName": "Michelle",
"lastName": "Doe",
"role": "owner",
"email": "michelle@example.com",
},
"goalIds": [],
"experiments": {"baselineIdx": 0, "items": []},
"customProperties": {},
"archived": False,
"deprecated": False,
"defaults": {"onVariation": 0, "offVariation": 1},
"environments": {
"production": {
"on": False,
"archived": False,
"salt": "1234",
"sel": "1234",
"lastModified": 1729123465190,
"version": 1,
"targets": [],
"contextTargets": [],
"rules": [],
"fallthrough": {"variation": 0},
"offVariation": 1,
"prerequisites": [],
"_site": {
"href": "/default/production/features/test-flag",
"type": "text/html",
},
"_environmentName": "Production",
"trackEvents": False,
"trackEventsFallthrough": False,
},
"test": {
"on": False,
"archived": False,
"salt": "7495d3dcf72f43aaa075012fad947d0d",
"sel": "61b4861e6ed54135bc244bb120e9e2da",
"lastModified": 1729123465190,
"version": 1,
"targets": [],
"contextTargets": [],
"rules": [],
"fallthrough": {"variation": 0},
"offVariation": 1,
"prerequisites": [],
"_site": {"href": "/default/test/features/test-flag", "type": "text/html"},
"_environmentName": "Test",
"trackEvents": False,
"trackEventsFallthrough": False,
},
},
},
}
| OrganizationFlagsHooksEndpointTestCase |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_common.py | {
"start": 7547,
"end": 8143
} | class ____(Marker, metaclass=abc.ABCMeta):
"""Base `Marker` class that represents exceptions encountered and deferred during templating."""
__slots__ = ()
@abc.abstractmethod
def _as_exception(self) -> Exception:
pass
def _as_message(self) -> str:
return str(self._as_exception())
def trip(self) -> t.NoReturn:
"""Raise an internal exception which can be converted back to this instance while maintaining the cause for callers that follow them."""
raise MarkerError(self._undefined_message, self) from self._as_exception()
| ExceptionMarker |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_null.py | {
"start": 1436,
"end": 3194
} | class ____(MetricProvider):
"""A convenience class to provide an alias for easier access to the null count in a column."""
metric_name = "column_values.null.count"
@metric_value(engine=PandasExecutionEngine)
def _pandas(*, metrics, **kwargs):
return metrics[
f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
]
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(*, metrics, **kwargs):
return metrics[
f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
]
@metric_value(engine=SparkDFExecutionEngine)
def _spark(*, metrics, **kwargs):
return metrics[
f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
]
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
dependencies[
f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
] = MetricConfiguration(
metric_name=f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
return dependencies
| ColumnValuesNullCount |
python | pytorch__pytorch | torch/ao/nn/intrinsic/qat/modules/conv_fused.py | {
"start": 15481,
"end": 17315
} | class ____(_ConvBnNd, nn.Conv1d):
r"""
A ConvBn1d module is a module fused from Conv1d and BatchNorm1d,
attached with FakeQuantize modules for weight,
used in quantization aware training.
We combined the interface of :class:`torch.nn.Conv1d` and
:class:`torch.nn.BatchNorm1d`.
Similar to :class:`torch.nn.Conv1d`, with FakeQuantize modules initialized
to default.
Attributes:
freeze_bn:
weight_fake_quant: fake quant module for weight
"""
_FLOAT_BN_MODULE: ClassVar[type[nn.BatchNorm1d]] = nn.BatchNorm1d
_FLOAT_RELU_MODULE: ClassVar[type[nn.Module] | None] = None
_FLOAT_MODULE: ClassVar[type[nn.Module]] = nni.ConvBn1d # type: ignore[assignment]
_FLOAT_CONV_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d
def __init__(
self,
# Conv1d args
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=None,
padding_mode="zeros",
# BatchNorm1d args
# num_features: out_channels
eps=1e-05,
momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None,
):
kernel_size = _single(kernel_size)
stride = _single(stride)
padding = _single(padding)
dilation = _single(dilation)
_ConvBnNd.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
False,
_single(0),
groups,
bias,
padding_mode,
eps,
momentum,
freeze_bn,
qconfig,
dim=1,
)
| ConvBn1d |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/resolvelib/structs.py | {
"start": 3147,
"end": 4094
} | class ____(object):
"""Wrap an iterator factory returned by `find_matches()`.
Calling `iter()` on this class would invoke the underlying iterator
factory, making it a "collection with ordering" that can be iterated
through multiple times, but lacks random access methods presented in
built-in Python sequence types.
"""
def __init__(self, factory):
self._factory = factory
self._iterable = None
def __repr__(self):
return "{}({})".format(type(self).__name__, list(self))
def __bool__(self):
try:
next(iter(self))
except StopIteration:
return False
return True
__nonzero__ = __bool__ # XXX: Python 2.
def __iter__(self):
iterable = (
self._factory() if self._iterable is None else self._iterable
)
self._iterable, current = itertools.tee(iterable)
return current
| _FactoryIterableView |
python | pytorch__pytorch | test/test_mps.py | {
"start": 464638,
"end": 490239
} | class ____(TestCaseMPS):
def test_conv1d_all_strides_paddings(self):
# https://github.com/pytorch/pytorch/issues/82921
def helper(stride, padding):
y_cpu = torch.randn(1, 57, 40)
conv_cpu = nn.Conv1d(57, 20, stride=stride, padding=padding, kernel_size=3, bias=False)
conv_gpu = copy.deepcopy(conv_cpu).to(device='mps')
x_cpu = conv_cpu(y_cpu)
y_gpu = y_cpu.to(device='mps')
x_gpu = conv_gpu(y_gpu)
self.assertEqual(x_cpu, x_gpu.cpu())
for stride in range(1, 4):
for padding in range(1, 4):
helper(stride, padding)
def test_conv1d_channels_last(self):
# https://github.com/pytorch/pytorch/issues/81557
model_cpu = torch.nn.Conv1d(1, 128, 3)
a_cpu = torch.arange((128 * 176), dtype=torch.float32)
a_cpu = a_cpu.view(128, 176, 1).permute(0, 2, 1)
out_cpu = model_cpu(a_cpu)
a_mps = a_cpu.detach().clone().to("mps")
model_mps = model_cpu.to("mps")
out_mps = model_mps(a_mps)
self.assertEqual(out_cpu, out_mps.cpu(), rtol=2.6e-05, atol=2e-04)
def test_conv_transpose_1d_all_strides(self):
# https://github.com/pytorch/pytorch/issues/82711
def helper(stride):
y_cpu = torch.ones(1, 1, 2)
deconv_cpu = nn.ConvTranspose1d(in_channels=1, out_channels=1, kernel_size=1, stride=stride, bias=False, padding=1)
deconv_cpu.weight.data = torch.ones(1, 1, 2)
deconv_gpu = copy.deepcopy(deconv_cpu).to(device='mps')
x_cpu = deconv_cpu(y_cpu)
y_gpu = y_cpu.to(device='mps')
x_gpu = deconv_gpu(y_gpu)
self.assertEqual(x_cpu, x_gpu.cpu())
[helper(stride) for stride in [1, 2, 3]]
def test_conv_transpose_1d_nn_functional(self):
# https://github.com/pytorch/pytorch/issues/82563
tin = torch.rand((1, 512, 1245), dtype=torch.float32)
tparams = torch.rand((512, 256, 16), dtype=torch.float32)
tbias = torch.rand((256), dtype=torch.float32)
device = 'cpu'
tcpu = torch.nn.functional.conv_transpose1d(tin.to(device), tparams.to(device), tbias.to(device), stride=8, padding=4)
device = 'mps'
tgpu = torch.nn.functional.conv_transpose1d(tin.to(device), tparams.to(device), tbias.to(device), stride=8, padding=4)
self.assertEqual(tcpu, tgpu.cpu(), rtol=2.6e-05, atol=2e-04)
def test_conv_backward_1d_channels_last(self):
def helper(shape, in_channels=1, out_channels=1, kernel_size=3, groups=1):
# https://github.com/pytorch/pytorch/issues/84511
conv_cpu = torch.nn.Conv1d(
in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups).requires_grad_()
conv_mps = torch.nn.Conv1d(
in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, groups=groups).to("mps")
conv_mps.weight.data = conv_cpu.weight.data.detach().clone().to("mps").requires_grad_(True)
conv_mps.bias.data = conv_cpu.bias.data.detach().clone().to("mps").requires_grad_(True)
data = torch.rand(shape, dtype=torch.float32)
x_cpu = data.permute(0, 2, 1).contiguous().requires_grad_(True)
x_mps = data.permute(0, 2, 1).detach().clone().to("mps").contiguous().requires_grad_(True)
res_cpu = conv_cpu(x_cpu)
res_mps = conv_mps(x_mps)
self.assertEqual(res_cpu, res_mps)
res_cpu = res_cpu.sum().backward()
res_mps = res_mps.sum().backward()
self.assertEqual(conv_cpu.weight.grad, conv_mps.weight.grad, rtol=2.6e-05, atol=2e-04)
self.assertEqual(x_cpu.grad, x_mps.grad)
helper(shape=(1, 176, 1))
helper(shape=(2, 12, 1))
helper(shape=(3, 176, 1))
helper(shape=(4, 376, 1))
helper(shape=(1024, 376, 9), in_channels=9, out_channels=1, groups=1)
helper(shape=(1024, 376, 9), in_channels=9, out_channels=9, groups=3)
# Regression test for https://github.com/pytorch/pytorch/issues/140902
# And https://github.com/pytorch/pytorch/issues/142344 (adding grad for input)
ic, oc, ks, f = 2, 5, 3, 7
conv = torch.nn.Conv1d(ic, oc, kernel_size=ks, padding=1).to("mps")
inp = torch.rand(1, ic, f, device="mps", requires_grad=True)
out = conv(inp)
grad_in = torch.rand(1, oc, f, device="mps")
grad_in_cl = torch.empty(1, f, oc, device="mps").transpose(1, 2)
grad_in_cl[:] = grad_in
# It does not matter whether grad_in contiguous, or channels last, results should equal to each other
grad_rc = torch.autograd.grad((out,), (inp, conv.weight, conv.bias), (grad_in,), retain_graph=True)
grad_rc_cl = torch.autograd.grad((out,), (inp, conv.weight, conv.bias), (grad_in_cl,), retain_graph=True)
self.assertEqual(grad_rc[0], grad_rc_cl[0])
self.assertEqual(grad_rc[1], grad_rc_cl[1])
self.assertEqual(grad_rc[2], grad_rc_cl[2])
def test_conv1d_contiguous(self):
model_cpu = torch.nn.Conv1d(1, 128, 3)
a_cpu = torch.ones(128, 1, 176)
out_cpu = model_cpu(a_cpu)
a_mps = a_cpu.detach().clone().to("mps")
model_mps = model_cpu.to("mps")
out_mps = model_mps(a_mps)
self.assertEqual(out_cpu.shape, out_mps.shape)
self.assertEqual(out_cpu, out_mps.cpu())
def test_conv2d_all_strides_paddings(self):
# https://github.com/pytorch/pytorch/issues/83180
def helper(N, C, H, W, groups, input_mem_format, weight_mem_format, permute_data):
x_cpu = torch.randn(N, C, H, W).to(memory_format=input_mem_format).requires_grad_()
x_mps = x_cpu.detach().clone().to(device='mps').requires_grad_()
if permute_data:
x_cpu.permute(0, 2, 3, 1)
x_mps.permute(0, 2, 3, 1)
for strideX in range(1, 4):
for strideY in range(1, 4):
conv_cpu = torch.nn.Conv2d(
in_channels=N, out_channels=C, kernel_size=H, groups=groups, stride=(strideX, strideY)).requires_grad_()
conv_cpu.weight.data = conv_cpu.weight.to(memory_format=weight_mem_format).requires_grad_()
conv_mps = torch.nn.Conv2d(
in_channels=N, out_channels=C, kernel_size=H, groups=groups, stride=(strideX, strideY), device="mps")
conv_mps.weight.data = conv_cpu.weight.data.detach().clone().to("mps").requires_grad_()
conv_mps.bias.data = conv_cpu.bias.data.detach().clone().to("mps").requires_grad_()
res_cpu = conv_cpu(x_cpu)
res_mps = conv_mps(x_mps)
self.assertEqual(res_cpu, res_mps.cpu(), rtol=1e-03, atol=1e-05)
res_cpu = res_cpu.sum().backward()
res_mps = res_mps.sum().backward()
self.assertEqual(res_cpu, res_mps, rtol=2.6e-05, atol=2e-04)
self.assertEqual(conv_cpu.weight.grad, conv_mps.weight.grad, rtol=2.6e-05, atol=2e-04)
self.assertEqual(conv_cpu.bias.grad, conv_mps.bias.grad)
self.assertEqual(x_cpu.grad, x_mps.grad)
for mem_format_input in [torch.contiguous_format, torch.channels_last]:
for mem_format_weight in [torch.contiguous_format, torch.channels_last]:
for permute_data in [True, False]:
helper(2, 2, 3, 6, 1, mem_format_input, mem_format_weight, permute_data)
helper(10, 10, 4, 6, 2, mem_format_input, mem_format_weight, permute_data)
helper(32, 32, 4, 6, 2, mem_format_input, mem_format_weight, permute_data)
def test_conv_transpose_2d_strided(self):
def helper(m_cpu, memory_format):
m_mps = copy.deepcopy(m_cpu).requires_grad_()
m_mps.weight.data = m_cpu.weight.data.detach().clone().to("mps").requires_grad_()
m_mps.bias.data = m_cpu.bias.data.detach().clone().to("mps").requires_grad_()
input_cpu = torch.randn(20, 16, 50, 100).to(memory_format=memory_format).requires_grad_()
input_mps = input_cpu.detach().clone().to("mps")
output_cpu = m_cpu(input_cpu)
output_mps = m_mps(input_mps)
self.assertEqual(output_cpu, output_mps)
for mem_format_input in [torch.contiguous_format, torch.channels_last]:
# With square kernels and equal stride
helper(nn.ConvTranspose2d(16, 33, 3, stride=2).requires_grad_(), mem_format_input)
# non-square kernels and unequal stride and with padding
helper(nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)).requires_grad_(), mem_format_input)
def test_conv_transpose_2d_specified_output(self):
input_cpu = torch.randn(1, 16, 12, 12)
input_mps = input_cpu.detach().clone().to("mps")
downsample_cpu = nn.Conv2d(16, 16, 3, stride=2, padding=1)
downsample_mps = nn.Conv2d(16, 16, 3, stride=2, padding=1, device="mps")
downsample_mps.weight.data = downsample_cpu.weight.data.detach().clone().to("mps").requires_grad_()
downsample_mps.bias.data = downsample_cpu.bias.data.detach().clone().to("mps").requires_grad_()
upsample_cpu = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
upsample_mps = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1, device="mps")
upsample_mps.weight.data = upsample_cpu.weight.data.detach().clone().to("mps").requires_grad_()
upsample_mps.bias.data = upsample_cpu.bias.data.detach().clone().to("mps").requires_grad_()
h_cpu = downsample_cpu(input_cpu)
h_mps = downsample_mps(input_mps)
self.assertEqual(h_cpu, h_mps)
size_cpu = h_cpu.size()
size_mps = h_mps.size()
self.assertEqual(size_cpu, size_mps)
output_cpu = upsample_cpu(h_cpu, output_size=input_cpu.size())
output_mps = upsample_mps(h_mps, output_size=input_mps.size())
self.assertEqual(output_cpu, output_mps)
self.assertEqual(output_cpu.size(), output_mps.size())
def test_conv2d_single_stride(self):
y_cpu = torch.randn(2, 2, 3, 6)
y_gpu = y_cpu.to(device='mps')
for stride in range(1, 4):
conv_cpu = torch.nn.Conv2d(in_channels=2, out_channels=2, kernel_size=3, stride=stride)
conv_gpu = copy.deepcopy(conv_cpu).to(device='mps')
x_cpu = conv_cpu(y_cpu)
x_gpu = conv_gpu(y_gpu)
self.assertEqual(x_cpu, x_gpu.cpu(), rtol=1e-03, atol=1e-05)
def test_conv3d_single_stride(self):
# Conv3d is only available from MacOS 13.2 onwards
y_cpu = torch.randn(2, 2, 3, 6)
y_gpu = y_cpu.to(device='mps')
for stride in range(1, 4):
conv_cpu = torch.nn.Conv3d(in_channels=2, out_channels=2, kernel_size=2, stride=stride)
conv_gpu = copy.deepcopy(conv_cpu).to(device='mps')
x_cpu = conv_cpu(y_cpu)
x_gpu = conv_gpu(y_gpu)
self.assertEqual(x_cpu, x_gpu.cpu(), rtol=1e-03, atol=1e-05)
def test_grid_sample(self):
def test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad):
def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners):
for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]:
# grid_dim_contig_order specifies the dimension order that can
# make grid to be contiguous.
# i.e., grid.permute(grid_dim_contig_order) is contiguous.
# e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be
# initialized with contiguous tensor of shape [N, 2, H, W]
# and permuted to [N, H, W, 2] afterwards.
grid_shape = [N, H, W, 2]
grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order]
grid_fwd_permute = [None, None, None, None]
for i, d in enumerate(grid_dim_contig_order):
grid_fwd_permute[d] = i
def get_grid(device='cpu', data=None):
if data is not None:
assert list(data.shape) == grid_shape
data = data.permute(grid_dim_contig_order).to(device)
else:
data = torch.randn(grid_init_shape, device=device)
grid = data.permute(grid_fwd_permute)
assert grid.permute(grid_dim_contig_order).is_contiguous()
return grid
input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad)
grid_cpu = get_grid().requires_grad_()
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(out_cpu.size(), torch.Size([N, C, H, W]))
gradients = torch.randn_like(out_cpu)
out_cpu.backward(gradients)
# Compare against unvectorized CPU fallback
# NOTE [ grid_sample CPU fallback ]
# grid_sample uses AVX for 2d images, but that requires 32-bit indexing for
# 32-bit floats. So we also have a fallback that is used only for float tensors
# requiring 64-bit indexing. That requires too much memory to run on CI, so we
# also export the fallback and test it here to ensure feature parity with
# the vectorized version.
input_fallback = input_cpu.float().detach_().requires_grad_()
grid_fallback = grid_cpu.float().detach_().requires_grad_()
out_fallback = torch._grid_sampler_2d_cpu_fallback(
input_fallback, grid_fallback,
F.GRID_SAMPLE_INTERPOLATION_MODES[mode],
F.GRID_SAMPLE_PADDING_MODES[padding_mode],
align_corners)
self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5)
out_fallback.backward(gradients.float())
if input_requires_grad:
self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5)
self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5)
input_mps = input_cpu.detach().transpose(0, 1).to("mps").transpose(0, 1).requires_grad_(input_requires_grad)
grid_mps = get_grid('mps', grid_cpu.detach()).requires_grad_()
out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
self.assertEqual(out_cpu, out_mps)
out_mps.backward(gradients.to("mps"))
if input_requires_grad:
self.assertEqual(input_cpu.grad, input_mps.grad)
self.assertEqual(grid_cpu.grad, grid_mps.grad, atol=5e-5, rtol=0)
# check that zero-dimensional input strides don't error out
base_input = torch.randn(N, C, 1, IW)
input_cpu = base_input.expand_as(input_mps).requires_grad_(input_requires_grad)
out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
input_mps = base_input.to("mps").expand_as(input_mps).requires_grad_(input_requires_grad)
out_mps = F.grid_sample(input_mps, grid_mps, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
self.assertEqual(out_cpu, out_mps)
# test same size output
test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners)
# test larger output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(IH + 1, 12)
W = random.randint(IW + 1, 12)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# test smaller output
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(2, IH)
W = random.randint(2, IW)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# test 1x1 inpput
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = 1
IW = 1
H = random.randint(2, 5)
W = random.randint(2, 5)
test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)
# testing empty grid
N = random.randint(2, 8)
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
W = random.randint(3, IW + 2)
test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners)
# testing empty channel
N = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners)
# testing empty batch
C = random.randint(2, 8)
IH = random.randint(2, 8)
IW = random.randint(2, 8)
H = random.randint(3, IH + 2)
W = random.randint(3, IW + 2)
test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners)
for mode in ('bilinear', 'nearest'):
for padding_mode in ('zeros', 'reflection'):
for align_corners in (True, False):
# test known input
input = torch.arange(1., 11, device="mps").view(1, 1, 2, 5)
grid = torch.tensor(
[[[-0.9, -4.1], [0, 0.2000], [1, -1], [-0.333, 1e-6], [0.5, 1.0]],
[[-1.0, -0.5], [0, 0.3333], [1, -1], [-0.200, 1e-6], [1.5, 0.5]]], device="mps").view(1, 2, 5, 2)
if mode == 'bilinear':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0.0000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 0.0000]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.0000, 6.5000000000, 1.2500, 4.6675000191, 4.6250],
[0.5000, 7.1665000916, 1.2500, 5.0000000000, 0.0000]], device="mps").view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 8.7500]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1.0000, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000, 7.1665000916, 5.0000, 5.0000000000, 10.0000]], device="mps").view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.4500, 6.0000000000, 5.0000, 4.8340, 9.0000],
[2.2500, 6.3332500450, 5.0000, 5.1000, 7.7500]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500],
[1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]], device="mps").view(1, 1, 2, 5)
else:
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
elif mode == 'nearest':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 9.],
[1., 8., 5., 8., 0.]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0., 8., 5., 7., 0.],
[1., 8., 5., 8., 0.]], device="mps").view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 10.]], device="mps").view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[1., 8., 5., 7., 9.],
[1., 8., 5., 8., 9.]], device="mps").view(1, 1, 2, 5)
else:
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
elif mode == 'bicubic':
if padding_mode == 'zeros':
if align_corners:
groundtruth = torch.tensor(
[[-0.10424726, 7.1400003, 5.0000, 5.7842274, 9.0000],
[2.4492188, 7.4814040, 5.0000, 6.0277520, 0.0000]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.00000, 7.6287503, 1.0625, 5.5977230, 5.3270264],
[0.40625, 8.0288770, 1.0625, 5.9375067, -0.3515625]], device="mps").view(1, 1, 2, 5)
elif padding_mode == 'border':
if align_corners:
groundtruth = torch.tensor(
[[1.1520010, 6.0599990, 5.0000, 4.870930, 9.0000000],
[2.1328125, 6.4258375, 5.0000, 5.076003, 8.8671875]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[0.894531, 6.6050020, 4.625, 4.7138715, 9.800781],
[0.906250, 7.2822485, 4.625, 5.0000052, 10.00000]], device="mps").view(1, 1, 2, 5)
elif padding_mode == 'reflection':
if align_corners:
groundtruth = torch.tensor(
[[3.1822524, 6.239998, 5.0000, 4.8709273, 9.00000],
[1.7812500, 6.703594, 5.0000, 5.0760007, 8.21875]], device="mps").view(1, 1, 2, 5)
else:
groundtruth = torch.tensor(
[[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531],
[0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]], device="mps").view(1, 1, 2, 5)
else:
raise AssertionError(f"missing groundtruth test for padding mode '{padding_mode}'")
else:
raise AssertionError(f"missing groundtruth test for interpolation mode '{mode}'")
output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,
align_corners=align_corners)
self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,
msg=f"groundtruth comparison failed for mode={mode}, "
f"padding_mode={padding_mode}")
| TestConvolutionMPS |
python | doocs__leetcode | solution/3000-3099/3000.Maximum Area of Longest Diagonal Rectangle/Solution.py | {
"start": 0,
"end": 323
} | class ____:
def areaOfMaxDiagonal(self, dimensions: List[List[int]]) -> int:
ans = mx = 0
for l, w in dimensions:
t = l**2 + w**2
if mx < t:
mx = t
ans = l * w
elif mx == t:
ans = max(ans, l * w)
return ans
| Solution |
python | PyCQA__pylint | pylint/reporters/multi_reporter.py | {
"start": 621,
"end": 3771
} | class ____:
"""Reports messages and layouts in plain text."""
name = "_internal_multi_reporter"
# Note: do not register this reporter with linter.register_reporter as it is
# not intended to be used directly like a regular reporter, but is
# instead used to implement the
# `--output-format=json:somefile.json,colorized`
# multiple output formats feature
extension = ""
def __init__(
self,
sub_reporters: list[BaseReporter],
close_output_files: Callable[[], None],
output: TextIO | None = None,
):
self._sub_reporters = sub_reporters
self.close_output_files = close_output_files
self._path_strip_prefix = os.getcwd() + os.sep
self._linter: PyLinter | None = None
self.out = output
self.messages: list[Message] = []
@property
def out(self) -> TextIO | None:
return self.__out
@out.setter
def out(self, output: TextIO | None = None) -> None:
"""MultiReporter doesn't have its own output.
This method is only provided for API parity with BaseReporter
and should not be called with non-None values for 'output'.
"""
self.__out = None
if output is not None:
raise NotImplementedError("MultiReporter does not support direct output.")
def __del__(self) -> None:
self.close_output_files()
@property
def path_strip_prefix(self) -> str:
return self._path_strip_prefix
@property
def linter(self) -> PyLinter | None:
return self._linter
@linter.setter
def linter(self, value: PyLinter) -> None:
self._linter = value
for rep in self._sub_reporters:
rep.linter = value
def handle_message(self, msg: Message) -> None:
"""Handle a new message triggered on the current file."""
for rep in self._sub_reporters:
# We provide a copy so reporters can't modify message for others.
rep.handle_message(copy(msg))
def writeln(self, string: str = "") -> None:
"""Write a line in the output buffer."""
for rep in self._sub_reporters:
rep.writeln(string)
def display_reports(self, layout: Section) -> None:
"""Display results encapsulated in the layout tree."""
for rep in self._sub_reporters:
rep.display_reports(layout)
def display_messages(self, layout: Section | None) -> None:
"""Hook for displaying the messages of the reporter."""
for rep in self._sub_reporters:
rep.display_messages(layout)
def on_set_current_module(self, module: str, filepath: str | None) -> None:
"""Hook called when a module starts to be analysed."""
for rep in self._sub_reporters:
rep.on_set_current_module(module, filepath)
def on_close(
self,
stats: LinterStats,
previous_stats: LinterStats | None,
) -> None:
"""Hook called when a module finished analyzing."""
for rep in self._sub_reporters:
rep.on_close(stats, previous_stats)
| MultiReporter |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 112663,
"end": 117790
} | class ____(GeneratedAirbyteDestination):
class Disable:
@public
def __init__(
self,
):
self.mode = "disable"
class Allow:
@public
def __init__(
self,
):
self.mode = "allow"
class Prefer:
@public
def __init__(
self,
):
self.mode = "prefer"
class Require:
@public
def __init__(
self,
):
self.mode = "require"
class VerifyCa:
@public
def __init__(self, ca_certificate: str, client_key_password: Optional[str] = None):
self.mode = "verify-ca"
self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")
self.client_key_password = check.opt_str_param(
client_key_password, "client_key_password"
)
class VerifyFull:
@public
def __init__(
self,
ca_certificate: str,
client_certificate: str,
client_key: str,
client_key_password: Optional[str] = None,
):
self.mode = "verify-full"
self.ca_certificate = check.str_param(ca_certificate, "ca_certificate")
self.client_certificate = check.str_param(client_certificate, "client_certificate")
self.client_key = check.str_param(client_key, "client_key")
self.client_key_password = check.opt_str_param(
client_key_password, "client_key_password"
)
@public
def __init__(
self,
name: str,
host: str,
port: int,
database: str,
schema: str,
username: str,
ssl_mode: Union[
"PostgresDestination.Disable",
"PostgresDestination.Allow",
"PostgresDestination.Prefer",
"PostgresDestination.Require",
"PostgresDestination.VerifyCa",
"PostgresDestination.VerifyFull",
],
password: Optional[str] = None,
ssl: Optional[bool] = None,
jdbc_url_params: Optional[str] = None,
):
"""Airbyte Destination for Postgres.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/postgres
Args:
name (str): The name of the destination.
host (str): Hostname of the database.
port (int): Port of the database.
database (str): Name of the database.
schema (str): The default schema tables are written to if the source does not specify a namespace. The usual value for this field is "public".
username (str): Username to use to access the database.
password (Optional[str]): Password associated with the username.
ssl (Optional[bool]): Encrypt data using SSL. When activating SSL, please select one of the connection modes.
ssl_mode (Union[PostgresDestination.Disable, PostgresDestination.Allow, PostgresDestination.Prefer, PostgresDestination.Require, PostgresDestination.VerifyCa, PostgresDestination.VerifyFull]): SSL connection modes. disable - Chose this mode to disable encryption of communication between Airbyte and destination database allow - Chose this mode to enable encryption only when required by the source database prefer - Chose this mode to allow unencrypted connection only if the source database does not support encryption require - Chose this mode to always require encryption. If the source database server does not support encryption, connection will fail verify-ca - Chose this mode to always require encryption and to verify that the source database server has a valid SSL certificate verify-full - This is the most secure mode. Chose this mode to always require encryption and to verify the identity of the source database server See more information - in the docs.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.database = check.str_param(database, "database")
self.schema = check.str_param(schema, "schema")
self.username = check.str_param(username, "username")
self.password = check.opt_str_param(password, "password")
self.ssl = check.opt_bool_param(ssl, "ssl")
self.ssl_mode = check.inst_param(
ssl_mode,
"ssl_mode",
(
PostgresDestination.Disable,
PostgresDestination.Allow,
PostgresDestination.Prefer,
PostgresDestination.Require,
PostgresDestination.VerifyCa,
PostgresDestination.VerifyFull,
),
)
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
super().__init__("Postgres", name)
| PostgresDestination |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 4098,
"end": 8987
} | class ____:
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
o = np.empty(())
r = np.add(a, 2, o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=o, subok=subok)
assert_(r is o)
r = np.add(a, 2, out=(o,), subok=subok)
assert_(r is o)
d = np.array(5.7)
o1 = np.empty(())
o2 = np.empty((), dtype=np.int32)
r1, r2 = np.frexp(d, o1, None, subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, None, o2, subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, o1, o2, subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
assert_(r1 is o1)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
assert_(r2 is o2)
r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
assert_(r1 is o1)
assert_(r2 is o2)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
assert_raises(TypeError, np.add, a, 2, o, o, subok=subok)
assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok)
assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
assert_raises(TypeError, np.add, a, 2, [], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
o.flags.writeable = False
assert_raises(ValueError, np.add, a, 2, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
def test_out_wrap_subok(self):
class ArrayWrap(np.ndarray):
__array_priority__ = 10
def __new__(cls, arr):
return np.asarray(arr).view(cls).copy()
def __array_wrap__(self, arr, context=None, return_scalar=False):
return arr.view(type(self))
for subok in (True, False):
a = ArrayWrap([0.5])
r = np.add(a, 2, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=None, subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
r = np.add(a, 2, out=(None,), subok=subok)
if subok:
assert_(isinstance(r, ArrayWrap))
else:
assert_(type(r) == np.ndarray)
d = ArrayWrap([5.7])
o1 = np.empty((1,))
o2 = np.empty((1,), dtype=np.int32)
r1, r2 = np.frexp(d, o1, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, o1, None, subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, None, o2, subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
if subok:
assert_(isinstance(r2, ArrayWrap))
else:
assert_(type(r2) == np.ndarray)
r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
if subok:
assert_(isinstance(r1, ArrayWrap))
else:
assert_(type(r1) == np.ndarray)
with assert_raises(TypeError):
# Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_out_wrap_no_leak(self):
# Regression test for gh-26545
class ArrSubclass(np.ndarray):
pass
arr = np.arange(10).view(ArrSubclass)
orig_refcount = sys.getrefcount(arr)
arr *= 1
assert sys.getrefcount(arr) == orig_refcount
| TestOut |
python | ray-project__ray | python/ray/data/_internal/planner/plan_expression/expression_visitors.py | {
"start": 2207,
"end": 3348
} | class ____(_ExprVisitorBase):
"""Visitor that collects all column references from expression trees.
This visitor traverses expression trees and accumulates column names
referenced in ColumnExpr nodes.
"""
def __init__(self):
"""Initialize with an empty set of referenced columns."""
# NOTE: We're using dict to maintain insertion ordering
self._col_refs: Dict[str, None] = dict()
def get_column_refs(self) -> List[str]:
return list(self._col_refs.keys())
def visit_column(self, expr: ColumnExpr) -> None:
"""Visit a column expression and collect its name.
Args:
expr: The column expression.
Returns:
None (only collects columns as a side effect).
"""
self._col_refs[expr.name] = None
def visit_alias(self, expr: AliasExpr) -> None:
"""Visit an alias expression and collect from its inner expression.
Args:
expr: The alias expression.
Returns:
None (only collects columns as a side effect).
"""
self.visit(expr.expr)
| _ColumnReferenceCollector |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | experiments/2D_car/DDPG.py | {
"start": 4017,
"end": 6811
} | class ____(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_iter, a, a_):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.lr = learning_rate
self.gamma = gamma
self.t_replace_iter = t_replace_iter
self.t_replace_counter = 0
with tf.variable_scope('Critic'):
# Input (s, a), output q
self.a = a
self.q = self._build_net(S, self.a, 'eval_net', trainable=True)
# Input (s_, a_), output q_ for q_target
self.q_ = self._build_net(S_, a_, 'target_net', trainable=False) # target_q is based on a_ from Actor's target_net
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')
with tf.variable_scope('target_q'):
self.target_q = R + self.gamma * self.q_
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(tf.squared_difference(self.target_q, self.q))
with tf.variable_scope('C_train'):
self.train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
def _build_net(self, s, a, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.contrib.layers.xavier_initializer()
init_b = tf.constant_initializer(0.01)
with tf.variable_scope('l1'):
n_l1 = 100
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)
net = tf.nn.relu6(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
net = tf.layers.dense(net, 20, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l2',
trainable=trainable)
with tf.variable_scope('q'):
q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
return q
def learn(self, s, a, r, s_):
self.sess.run(self.train_op, feed_dict={S: s, self.a: a, R: r, S_: s_})
if self.t_replace_counter % self.t_replace_iter == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
self.t_replace_counter += 1
| Critic |
python | kubernetes-client__python | kubernetes/client/api/networking_api.py | {
"start": 543,
"end": 5193
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/networking.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| NetworkingApi |
python | django__django | tests/generic_views/views.py | {
"start": 5099,
"end": 5326
} | class ____(generic.DeleteView):
queryset = Author.objects.all()
template_name = "generic_views/confirm_delete.html"
context_object_name = "thingy"
success_url = reverse_lazy("authors_list")
| SpecializedAuthorDelete |
python | fsspec__filesystem_spec | fsspec/gui.py | {
"start": 318,
"end": 5655
} | class ____:
"""Signal-slot mixin, for Panel event passing
Include this class in a widget manager's superclasses to be able to
register events and callbacks on Panel widgets managed by that class.
The method ``_register`` should be called as widgets are added, and external
code should call ``connect`` to associate callbacks.
By default, all signals emit a DEBUG logging statement.
"""
# names of signals that this class may emit each of which must be
# set by _register for any new instance
signals: ClassVar[Sequence[str]] = []
# names of actions that this class may respond to
slots: ClassVar[Sequence[str]] = []
# each of which must be a method name
def __init__(self):
self._ignoring_events = False
self._sigs = {}
self._map = {}
self._setup()
def _setup(self):
"""Create GUI elements and register signals"""
self.panel = pn.pane.PaneBase()
# no signals to set up in the base class
def _register(
self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
):
"""Watch the given attribute of a widget and assign it a named event
This is normally called at the time a widget is instantiated, in the
class which owns it.
Parameters
----------
widget : pn.layout.Panel or None
Widget to watch. If None, an anonymous signal not associated with
any widget.
name : str
Name of this event
thing : str
Attribute of the given widget to watch
log_level : int
When the signal is triggered, a logging event of the given level
will be fired in the dfviz logger.
auto : bool
If True, automatically connects with a method in this class of the
same name.
"""
if name not in self.signals:
raise ValueError(f"Attempt to assign an undeclared signal: {name}")
self._sigs[name] = {
"widget": widget,
"callbacks": [],
"thing": thing,
"log": log_level,
}
wn = "-".join(
[
getattr(widget, "name", str(widget)) if widget is not None else "none",
thing,
]
)
self._map[wn] = name
if widget is not None:
widget.param.watch(self._signal, thing, onlychanged=True)
if auto and hasattr(self, name):
self.connect(name, getattr(self, name))
def _repr_mimebundle_(self, *args, **kwargs):
"""Display in a notebook or a server"""
try:
return self.panel._repr_mimebundle_(*args, **kwargs)
except (ValueError, AttributeError) as exc:
raise NotImplementedError(
"Panel does not seem to be set up properly"
) from exc
def connect(self, signal, slot):
"""Associate call back with given event
The callback must be a function which takes the "new" value of the
watched attribute as the only parameter. If the callback return False,
this cancels any further processing of the given event.
Alternatively, the callback can be a string, in which case it means
emitting the correspondingly-named event (i.e., connect to self)
"""
self._sigs[signal]["callbacks"].append(slot)
def _signal(self, event):
"""This is called by a an action on a widget
Within an self.ignore_events context, nothing happens.
Tests can execute this method by directly changing the values of
widget components.
"""
if not self._ignoring_events:
wn = "-".join([event.obj.name, event.name])
if wn in self._map and self._map[wn] in self._sigs:
self._emit(self._map[wn], event.new)
@contextlib.contextmanager
def ignore_events(self):
"""Temporarily turn off events processing in this instance
(does not propagate to children)
"""
self._ignoring_events = True
try:
yield
finally:
self._ignoring_events = False
def _emit(self, sig, value=None):
"""An event happened, call its callbacks
This method can be used in tests to simulate message passing without
directly changing visual elements.
Calling of callbacks will halt whenever one returns False.
"""
logger.log(self._sigs[sig]["log"], f"{sig}: {value}")
for callback in self._sigs[sig]["callbacks"]:
if isinstance(callback, str):
self._emit(callback)
else:
try:
# running callbacks should not break the interface
ret = callback(value)
if ret is False:
break
except Exception as e:
logger.exception(
"Exception (%s) while executing callback for signal: %s",
e,
sig,
)
def show(self, threads=False):
"""Open a new browser tab and display this instance's interface"""
self.panel.show(threads=threads, verbose=False)
return self
| SigSlot |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowing1.py | {
"start": 158,
"end": 1099
} | class ____:
def x(self):
return
maybe = True
a = None if maybe else ClassA()
b = None if maybe else ClassA()
if not a or not b:
a.x()
b.x()
else:
a.x()
b.x()
if not (not a or not b):
a.x()
b.x()
else:
a.x()
b.x()
if not a and not b:
# This should be flagged as an error
a.x()
# This should be flagged as an error
b.x()
else:
a.x()
b.x()
if not (not a and not b):
a.x()
b.x()
else:
# This should be flagged as an error
a.x()
# This should be flagged as an error
b.x()
if a or b:
a.x()
b.x()
else:
# This should be flagged as an error
a.x()
# This should be flagged as an error
b.x()
def func1(a: str, b: str | bool) -> bool:
x: str | bool = a and a in []
reveal_type(x, expected_text="bool | Literal['']")
if random() > 0.5:
return (a and a in [""]) or True
else:
return x or True
| ClassA |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 78205,
"end": 78754
} | class ____(PrefectFilterBaseModel):
"""Filter by `ArtifactCollection.latest_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of artifact ids to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.ArtifactCollection.latest_id.in_(self.any_))
return filters
| ArtifactCollectionFilterLatestId |
python | walkccc__LeetCode | solutions/2433. Find The Original Array of Prefix Xor/2433.py | {
"start": 0,
"end": 203
} | class ____:
def findArray(self, pref: list[int]) -> list[int]:
ans = [0] * len(pref)
ans[0] = pref[0]
for i in range(1, len(ans)):
ans[i] = pref[i] ^ pref[i - 1]
return ans
| Solution |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax.py | {
"start": 2009,
"end": 2066
} | class ____(metaclass=ReverseMetaclass):
pass
| WithReverse |
python | huggingface__transformers | tests/models/eomt/test_image_processing_eomt.py | {
"start": 4024,
"end": 14287
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = EomtImageProcessor if is_vision_available() else None
fast_image_processing_class = EomtImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = EomtImageProcessingTester(self)
self.model_id = "tue-mps/coco_panoptic_eomt_large_640"
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "resample"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (2, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
@unittest.skip(reason="Not supported")
def test_call_numpy_4_channels(self):
pass
def test_call_pil(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test Non batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (2, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_call_pytorch(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values
expected_output_image_shape = (1, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values
expected_output_image_shape = (2, 3, 18, 18)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self.assertTrue(torch.allclose(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(image_encoding_slow.pixel_values - image_encoding_fast.pixel_values)).item(), 1e-3
)
# Lets check whether 99.9% of mask_labels values match or not.
match_ratio = (image_encoding_slow.mask_labels[0] == image_encoding_fast.mask_labels[0]).float().mean().item()
self.assertGreaterEqual(match_ratio, 0.999, "Mask labels do not match between slow and fast image processor.")
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
for idx in range(len(dummy_maps)):
match_ratio = (encoding_slow.mask_labels[idx] == encoding_fast.mask_labels[idx]).float().mean().item()
self.assertGreaterEqual(
match_ratio, 0.999, "Mask labels do not match between slow and fast image processors."
)
def test_post_process_semantic_segmentation(self):
processor = self.image_processing_class(**self.image_processor_dict)
# Set longest_edge to None to test for semantic segmentatiom.
processor.size = {"shortest_edge": 18, "longest_edge": None}
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
inputs = processor(images=image, do_split_image=True, return_tensors="pt")
patch_offsets = inputs["patch_offsets"]
target_sizes = [image.size[::-1]]
# For semantic segmentation, the BS of output is 2 coz, two patches are created for the image.
outputs = self.image_processor_tester.prepare_fake_eomt_outputs(inputs["pixel_values"].shape[0], patch_offsets)
segmentation = processor.post_process_semantic_segmentation(outputs, target_sizes)
self.assertEqual(segmentation[0].shape, (image.height, image.width))
def test_post_process_panoptic_segmentation(self):
processor = self.image_processing_class(**self.image_processor_dict)
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
original_sizes = [image.size[::-1], image.size[::-1]]
# lets test for batched input of 2
outputs = self.image_processor_tester.prepare_fake_eomt_outputs(2)
segmentation = processor.post_process_panoptic_segmentation(outputs, original_sizes)
self.assertTrue(len(segmentation) == 2)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (image.height, image.width))
def test_post_process_instance_segmentation(self):
processor = self.image_processing_class(**self.image_processor_dict)
image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
original_sizes = [image.size[::-1], image.size[::-1]]
# lets test for batched input of 2
outputs = self.image_processor_tester.prepare_fake_eomt_outputs(2)
segmentation = processor.post_process_instance_segmentation(outputs, original_sizes)
self.assertTrue(len(segmentation) == 2)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (image.height, image.width))
| EomtImageProcessingTest |
python | MongoEngine__mongoengine | tests/fields/test_decimal128_field.py | {
"start": 234,
"end": 617
} | class ____(Document):
dec128_fld = Decimal128Field()
dec128_min_0 = Decimal128Field(min_value=0)
dec128_max_100 = Decimal128Field(max_value=100)
def generate_test_cls() -> Document:
Decimal128Document.drop_collection()
Decimal128Document(dec128_fld=None).save()
Decimal128Document(dec128_fld=Decimal(1)).save()
return Decimal128Document
| Decimal128Document |
python | huggingface__transformers | src/transformers/models/glm4v/processing_glm4v.py | {
"start": 1881,
"end": 14435
} | class ____(ProcessorMixin):
r"""
Constructs a GLM-4V processor which wraps a GLM-4V image processor and a GLM-4 tokenizer into a single processor.
[`~Glm4vProcessor.__call__`] and [`~Glm4vProcessor.decode`] for more information.
Args:
image_processor ([`Glm4vProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Glm4vVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
self.image_token = "<|image|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.video_token_id = (
tokenizer.video_token_id
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token)
)
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
videos: Optional[VideoInput] = None,
**kwargs: Unpack[Glm4vProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
the text.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Glm4vProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
# If user has not requested video metadata, pop it
if not kwargs.get("return_metadata"):
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
video_grid_thw = videos_inputs["video_grid_thw"]
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if video_grid_thw is not None:
merge_length = self.video_processor.merge_size**2
video_index = 0
for i in range(len(text)):
while self.video_token in text[i]:
num_frames = video_grid_thw[video_index][0]
video_structure = ""
metadata = video_metadata[video_index]
if metadata.fps is None:
logger.warning_once(
"SmolVLM requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 if metadata.fps is None else metadata.fps
timestamps = metadata.timestamps[::2] # mrope
unique_timestamps = []
for idx in range(0, len(timestamps)):
unique_timestamps.append(timestamps[idx])
selected_timestamps = unique_timestamps[:num_frames]
while len(selected_timestamps) < num_frames:
selected_timestamps.append(selected_timestamps[-1] if selected_timestamps else 0)
for frame_idx in range(num_frames):
timestamp_sec = selected_timestamps[frame_idx]
frame_structure = self.replace_frame_token_id(timestamp_sec)
video_structure += frame_structure
text[i] = text[i].replace(self.video_token, video_structure, 1)
num_image_tokens = (
video_grid_thw[video_index].prod() // merge_length // video_grid_thw[video_index][0]
)
for frame_idx in range(num_frames):
if self.image_token in text[i]:
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
video_index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Glm4vProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
if video_sizes is not None:
videos_kwargs = Glm4vProcessorKwargs._defaults.get("videos_kwargs", {})
videos_kwargs.update(kwargs)
num_video_patches = [
self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs)
for video_size in video_sizes
]
num_video_tokens = [(num_patches // merge_size**2) for num_patches in num_video_patches]
vision_data["num_video_tokens"] = num_video_tokens
return MultiModalData(**vision_data)
def post_process_image_text_to_text(
self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(
generated_outputs,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def replace_frame_token_id(self, timestamp_sec):
return f"<|begin_of_image|>{self.image_token}<|end_of_image|>{int(timestamp_sec)}"
__all__ = ["Glm4vProcessor"]
| Glm4vProcessor |
python | kamyu104__LeetCode-Solutions | Python/reverse-prefix-of-word.py | {
"start": 29,
"end": 251
} | class ____(object):
def reversePrefix(self, word, ch):
"""
:type word: str
:type ch: str
:rtype: str
"""
i = word.find(ch)
return word[:i+1][::-1]+word[i+1:]
| Solution |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/base.py | {
"start": 30002,
"end": 31620
} | class ____(_DialogList[_T]):
"""
List of radio buttons. Only one can be checked at the same time.
:param values: List of (value, label) tuples.
"""
def __init__(
self,
values: Sequence[tuple[_T, AnyFormattedText]],
default: _T | None = None,
show_numbers: bool = False,
select_on_focus: bool = False,
open_character: str = "(",
select_character: str = "*",
close_character: str = ")",
container_style: str = "class:radio-list",
default_style: str = "class:radio",
selected_style: str = "class:radio-selected",
checked_style: str = "class:radio-checked",
number_style: str = "class:radio-number",
multiple_selection: bool = False,
show_cursor: bool = True,
show_scrollbar: bool = True,
) -> None:
if default is None:
default_values = None
else:
default_values = [default]
super().__init__(
values,
default_values=default_values,
select_on_focus=select_on_focus,
show_numbers=show_numbers,
open_character=open_character,
select_character=select_character,
close_character=close_character,
container_style=container_style,
default_style=default_style,
selected_style=selected_style,
checked_style=checked_style,
number_style=number_style,
multiple_selection=False,
show_cursor=show_cursor,
show_scrollbar=show_scrollbar,
)
| RadioList |
python | catalyst-team__catalyst | catalyst/metrics/_functional_metric.py | {
"start": 225,
"end": 3582
} | class ____(ICallbackBatchMetric):
"""Class for custom **batch-based** metrics in a functional way.
Args:
metric_fn: metric function, that get outputs,
targets and return score as torch.Tensor
metric_key: metric name
compute_on_call: Computes and returns metric value during metric call.
Used for per-batch logging. default: True
prefix: metric prefix
suffix: metric suffix
.. note::
Loader metrics calculated as average over all batch metrics.
Examples:
.. code-block:: python
import torch
from catalyst import metrics
import sklearn.metrics
outputs = torch.tensor([1, 0, 2, 1])
targets = torch.tensor([3, 0, 2, 2])
metric = metrics.FunctionalBatchMetric(
metric_fn=sklearn.metrics.accuracy_score,
metric_key="sk_accuracy",
)
metric.reset()
metric.update(batch_size=len(outputs), y_pred=outputs, y_true=targets)
metric.compute()
# (0.5, 0.0) # mean, std
metric.compute_key_value()
# {'sk_accuracy': 0.5, 'sk_accuracy/mean': 0.5, 'sk_accuracy/std': 0.0}
"""
def __init__(
self,
metric_fn: Callable,
metric_key: str,
compute_on_call: bool = True,
prefix: str = None,
suffix: str = None,
):
"""Init"""
super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
self.metric_fn = metric_fn
self.metric_name = f"{self.prefix}{metric_key}{self.suffix}"
self.additive_metric = AdditiveMetric()
def reset(self):
"""Reset all statistics"""
self.additive_metric.reset()
def update(self, batch_size: int, *args, **kwargs) -> torch.Tensor:
"""
Calculate metric and update average metric
Args:
batch_size: current batch size for metric statistics aggregation
*args: args for metric_fn
**kwargs: kwargs for metric_fn
Returns:
custom metric
"""
value = self.metric_fn(*args, **kwargs)
self.additive_metric.update(float(value), batch_size)
return value
def update_key_value(
self, batch_size: int, *args, **kwargs
) -> Dict[str, torch.Tensor]:
"""
Calculate metric and update average metric
Args:
batch_size: current batch size for metric statistics aggregation
*args: args for metric_fn
**kwargs: kwargs for metric_fn
Returns:
Dict with one element-custom metric
"""
value = self.update(batch_size, *args, **kwargs)
return {f"{self.metric_name}": value}
def compute(self) -> torch.Tensor:
"""
Get metric average over all examples
Returns:
custom metric
"""
return self.additive_metric.compute()
def compute_key_value(self) -> Dict[str, torch.Tensor]:
"""
Get metric average over all examples
Returns:
Dict with one element-custom metric
"""
mean, std = self.compute()
return {
self.metric_name: mean,
f"{self.metric_name}/mean": mean,
f"{self.metric_name}/std": std,
}
| FunctionalBatchMetric |
python | ApeWorX__ape | src/ape/api/networks.py | {
"start": 24079,
"end": 29606
} | class ____(ManagerAccessMixin):
"""
A context manager for temporarily connecting to a network.
When entering the context, calls the :meth:`ape.api.providers.ProviderAPI.connect` method.
And conversely, when exiting, calls the :meth:`ape.api.providers.ProviderPAI.disconnect`
method, unless in a multi-chain context, in which case it disconnects all providers at
the very end of the Python session.
The method :meth:`ape.api.networks.NetworkAPI.use_provider` returns
an instance of this context manager.
Usage example::
from ape import networks
mainnet = networks.ethereum.mainnet # An instance of NetworkAPI
with mainnet.use_provider("infura"):
...
# Or, using choice-strings:
with networks.parse_network_choice("ethereum:local:test"):
...
"""
connected_providers: dict[str, "ProviderAPI"] = {}
provider_stack: list[str] = []
disconnect_map: dict[str, bool] = {}
# We store a provider object at the class level for use when disconnecting
# due to an exception, when interactive mode is set. If we don't hold on
# to a reference to this object, the provider is dropped and reconnecting results
# in losing state when using a spawned local provider
_recycled_provider: ClassVar[Optional["ProviderAPI"]] = None
def __init__(
self,
provider: "ProviderAPI",
disconnect_after: bool = False,
disconnect_on_exit: bool = True,
):
self._provider = provider
self._disconnect_after = disconnect_after
self._disconnect_on_exit = disconnect_on_exit
self._skipped_disconnect = False
@property
def empty(self) -> bool:
"""
``True`` when there are no providers in the context.
"""
return not self.connected_providers or not self.provider_stack
def __enter__(self, *args, **kwargs):
# If we have a recycled provider available, this means our last exit
# was due to an exception during interactive mode. We should resume that
# same connection, but also clear the object so we don't do this again
# in later provider contexts, which we would want to behave normally
if self._recycled_provider is not None:
# set inner var to the recycled provider for use in push_provider()
self._provider = self._recycled_provider
ProviderContextManager._recycled_provider = None
return self.push_provider()
def __exit__(self, exception, *args, **kwargs):
if not self._disconnect_on_exit and exception is not None:
# We want to skip disconnection when exiting due to an exception in interactive mode
if provider := self.network_manager.active_provider:
ProviderContextManager._recycled_provider = provider
else:
self.pop_provider()
def push_provider(self):
must_connect = not self._provider.is_connected
if must_connect:
self._provider.connect()
connection_id = self._provider.connection_id
if connection_id is None:
raise ProviderNotConnectedError()
self.provider_stack.append(connection_id)
self.disconnect_map[connection_id] = self._disconnect_after
if connection_id in self.connected_providers:
# Using already connected instance
if must_connect:
# Disconnect if had to connect to check chain ID
self._provider.disconnect()
self._provider = self.connected_providers[connection_id]
else:
# Adding provider for the first time. Retain connection.
self.connected_providers[connection_id] = self._provider
self.network_manager.active_provider = self._provider
return self._provider
def pop_provider(self):
if self.empty:
return
# Clear last provider
current_id = self.provider_stack.pop()
# Disconnect the provider in same cases.
if self.disconnect_map.get(current_id):
if provider := self.network_manager.active_provider:
provider.disconnect()
del self.disconnect_map[current_id]
if current_id in self.connected_providers:
del self.connected_providers[current_id]
if not self.provider_stack:
self.network_manager.active_provider = None
return
# Reset the original active provider
prior_id = self.provider_stack[-1]
if prior_id == current_id:
# Active provider is not changing
return
if previous_provider := self.connected_providers[prior_id]:
self.network_manager.active_provider = previous_provider
def disconnect_all(self):
if self.empty:
return
for provider in self.connected_providers.values():
provider.disconnect()
self.network_manager.active_provider = None
self.connected_providers = {}
def _connect_provider(provider: "ProviderAPI") -> "ProviderAPI":
connection_id = provider.connection_id
if connection_id in ProviderContextManager.connected_providers:
# Likely multi-chain testing or utilizing multiple on-going connections.
provider = ProviderContextManager.connected_providers[connection_id]
if not provider.is_connected:
provider.connect()
return provider
| ProviderContextManager |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 14865,
"end": 15487
} | class ____(VOTableSpecWarning):
"""
The VOTable specification uses the attribute name ``ID`` (with
uppercase letters) to specify unique identifiers. Some
VOTable-producing tools use the more standard lowercase ``id``
instead. ``astropy.io.votable`` accepts ``id`` and emits this warning if
``verify`` is ``'warn'``.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:name>`__
"""
message_template = "ID attribute not capitalized"
| W09 |
python | keras-team__keras | keras/src/saving/file_editor_test.py | {
"start": 659,
"end": 4051
} | class ____(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.save(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
@pytest.mark.requires_trainable_backend
def test_scalar_weight(self):
model = keras.Sequential(name="my_sequential")
model.add(keras.Input(shape=(1,), name="my_input"))
model.add(keras.layers.Dense(1, activation="sigmoid", name="my_dense"))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
model.fit(np.array([[1]]), np.array([[1]]), verbose=0)
model_fpath = os.path.join(self.get_temp_dir(), "model.keras")
weights_fpath = os.path.join(self.get_temp_dir(), "model.weights.h5")
model.save(model_fpath)
model.save_weights(weights_fpath)
model_editor = KerasFileEditor(model_fpath)
self.assertEqual(
len(keras.src.tree.flatten(model_editor.weights_dict)), 8
)
model_weights_editor = KerasFileEditor(weights_fpath)
self.assertEqual(
len(keras.src.tree.flatten(model_weights_editor.weights_dict)), 8
)
| SavingTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 126465,
"end": 148045
} | class ____(Request):
"""
Create a new task
:param name: Task name. Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param type: Type of task
:type type: TaskTypeEnum
:param comment: Free text comment
:type comment: str
:param parent: Parent task ID Must be a completed task.
:type parent: str
:param project: Project ID of the project to which this task is assigned Must
exist[ab]
:type project: str
:param output_dest: Output storage ID Must be a reference to an existing
storage.
:type output_dest: str
:param execution: Task execution params
:type execution: Execution
:param script: Script info
:type script: Script
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param models: Task models
:type models: TaskModels
:param container: Docker container parameters
:type container: dict
"""
_service = "tasks"
_action = "create"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {"description": "System defined type", "type": "string"},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {"description": "The task model name", "type": "string"},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"comment": {"description": "Free text comment ", "type": "string"},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": "object",
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": ["string", "null"]},
},
"execution": {
"$ref": "#/definitions/execution",
"description": "Task execution params",
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": "object",
},
"models": {
"$ref": "#/definitions/task_models",
"description": "Task models",
},
"name": {
"description": "Task name. Unique within the company.",
"type": "string",
},
"output_dest": {
"description": "Output storage id Must be a reference to an existing storage.",
"type": "string",
},
"parent": {
"description": "Parent task id Must be a completed task.",
"type": "string",
},
"project": {
"description": "Project ID of the project to which this task is assigned Must exist[ab]",
"type": "string",
},
"script": {"$ref": "#/definitions/script", "description": "Script info"},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"type": {
"$ref": "#/definitions/task_type_enum",
"description": "Type of task",
},
},
"required": ["name", "type"],
"type": "object",
}
def __init__(
self,
name: str,
type: Any,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
comment: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
input: Any = None,
output_dest: Optional[str] = None,
execution: Any = None,
script: Any = None,
hyperparams: Optional[dict] = None,
configuration: Optional[dict] = None,
models: Any = None,
container: Optional[dict] = None,
**kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.name = name
self.tags = tags
self.system_tags = system_tags
self.type = type
self.comment = comment
self.parent = parent
self.project = project
self.input = input
self.output_dest = output_dest
self.execution = execution
self.script = script
self.hyperparams = hyperparams
self.configuration = configuration
self.models = models
self.container = container
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("type")
def type(self) -> Any:
return self._property_type
@type.setter
def type(self, value: Any) -> None:
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("input")
def input(self) -> Any:
return self._property_input
@input.setter
def input(self, value: Any) -> None:
self._property_input = value
@schema_property("output_dest")
def output_dest(self) -> Optional[str]:
return self._property_output_dest
@output_dest.setter
def output_dest(self, value: Optional[str]) -> None:
if value is None:
self._property_output_dest = None
return
self.assert_isinstance(value, "output_dest", six.string_types)
self._property_output_dest = value
@schema_property("execution")
def execution(self) -> Any:
return self._property_execution
@execution.setter
def execution(self, value: Any) -> None:
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("script")
def script(self) -> Any:
return self._property_script
@script.setter
def script(self, value: Any) -> None:
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("hyperparams")
def hyperparams(self) -> Optional[dict]:
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value: Optional[dict]) -> None:
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(value.keys(), "hyperparams_keys", six.string_types, is_array=True)
self.assert_isinstance(value.values(), "hyperparams_values", (SectionParams, dict), is_array=True)
value = dict(((k, SectionParams(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self) -> Optional[dict]:
return self._property_configuration
@configuration.setter
def configuration(self, value: Optional[dict]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(value.keys(), "configuration_keys", six.string_types, is_array=True)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(((k, ConfigurationItem(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_configuration = value
@schema_property("models")
def models(self) -> Any:
return self._property_models
@models.setter
def models(self, value: Any) -> None:
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("container")
def container(self) -> Optional[dict]:
return self._property_container
@container.setter
def container(self, value: Optional[dict]) -> None:
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", dict)
self._property_container = value
| CreateRequest |
python | tensorflow__tensorflow | tensorflow/python/keras/initializers/initializers_v2.py | {
"start": 6159,
"end": 7469
} | class ____(Initializer):
"""Initializer that generates tensors with constant values.
Also available via the shortcut function `tf.keras.initializers.constant`.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to `self.value`.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
del kwargs
return constant_op.constant(
self.value, dtype=_get_dtype(dtype), shape=shape)
def get_config(self):
return {'value': self.value}
| Constant |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_core/test_scheduler.py | {
"start": 969,
"end": 38402
} | class ____:
"""Tests scheduler."""
@pytest.mark.parametrize(
("executor", "persistence", "kind"),
[
("CeleryExecutor", False, "Deployment"),
("CeleryExecutor", True, "Deployment"),
("CeleryKubernetesExecutor", True, "Deployment"),
("CeleryExecutor,KubernetesExecutor", True, "Deployment"),
("KubernetesExecutor", True, "Deployment"),
("LocalKubernetesExecutor", False, "Deployment"),
("LocalKubernetesExecutor", True, "StatefulSet"),
("LocalExecutor", True, "StatefulSet"),
("LocalExecutor,KubernetesExecutor", True, "StatefulSet"),
("LocalExecutor", False, "Deployment"),
],
)
def test_scheduler_kind(self, executor, persistence, kind):
"""Test scheduler kind is StatefulSet only with a local executor & worker persistence is enabled."""
docs = render_chart(
values={
"executor": executor,
"workers": {"persistence": {"enabled": persistence}},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert kind == jmespath.search("kind", docs[0])
def test_should_add_extra_containers(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"scheduler": {
"extraContainers": [
{"name": "{{ .Chart.Name }}", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[-1]", docs[0]) == {
"name": "airflow",
"image": "test-registry/test-repo:test-tag",
}
def test_should_template_extra_containers(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"scheduler": {
"extraContainers": [{"name": "{{ .Release.Name }}-test-container"}],
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[-1]", docs[0]) == {
"name": "release-name-test-container"
}
def test_disable_wait_for_migration(self):
docs = render_chart(
values={
"scheduler": {
"waitForMigrations": {"enabled": False},
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
actual = jmespath.search(
"spec.template.spec.initContainers[?name=='wait-for-airflow-migrations']", docs[0]
)
assert actual is None
def test_should_add_extra_init_containers(self):
docs = render_chart(
values={
"scheduler": {
"extraInitContainers": [
{"name": "test-init-container", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.initContainers[-1]", docs[0]) == {
"name": "test-init-container",
"image": "test-registry/test-repo:test-tag",
}
def test_should_template_extra_init_containers(self):
docs = render_chart(
values={
"scheduler": {
"extraInitContainers": [{"name": "{{ .Release.Name }}-test-init-container"}],
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.initContainers[-1]", docs[0]) == {
"name": "release-name-test-init-container"
}
def test_should_add_extra_volume_and_extra_volume_mount(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"scheduler": {
"extraVolumes": [{"name": "test-volume-{{ .Chart.Name }}", "emptyDir": {}}],
"extraVolumeMounts": [
{"name": "test-volume-{{ .Chart.Name }}", "mountPath": "/opt/test"}
],
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert "test-volume-airflow" in jmespath.search("spec.template.spec.volumes[*].name", docs[0])
assert "test-volume-airflow" in jmespath.search(
"spec.template.spec.containers[0].volumeMounts[*].name", docs[0]
)
assert (
jmespath.search("spec.template.spec.initContainers[0].volumeMounts[-1].name", docs[0])
== "test-volume-airflow"
)
def test_should_add_global_volume_and_global_volume_mount(self):
docs = render_chart(
values={
"volumes": [{"name": "test-volume", "emptyDir": {}}],
"volumeMounts": [{"name": "test-volume", "mountPath": "/opt/test"}],
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert "test-volume" in jmespath.search("spec.template.spec.volumes[*].name", docs[0])
assert "test-volume" in jmespath.search(
"spec.template.spec.containers[0].volumeMounts[*].name", docs[0]
)
def test_should_add_extraEnvs(self):
docs = render_chart(
values={
"scheduler": {
"env": [
{"name": "TEST_ENV_1", "value": "test_env_1"},
{
"name": "TEST_ENV_2",
"valueFrom": {"secretKeyRef": {"name": "my-secret", "key": "my-key"}},
},
{
"name": "TEST_ENV_3",
"valueFrom": {"configMapKeyRef": {"name": "my-config-map", "key": "my-key"}},
},
],
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {"name": "TEST_ENV_1", "value": "test_env_1"} in jmespath.search(
"spec.template.spec.containers[0].env", docs[0]
)
assert {
"name": "TEST_ENV_2",
"valueFrom": {"secretKeyRef": {"name": "my-secret", "key": "my-key"}},
} in jmespath.search("spec.template.spec.containers[0].env", docs[0])
assert {
"name": "TEST_ENV_3",
"valueFrom": {"configMapKeyRef": {"name": "my-config-map", "key": "my-key"}},
} in jmespath.search("spec.template.spec.containers[0].env", docs[0])
def test_should_add_extraEnvs_to_wait_for_migration_container(self):
docs = render_chart(
values={
"scheduler": {
"waitForMigrations": {
"env": [{"name": "TEST_ENV_1", "value": "test_env_1"}],
},
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {"name": "TEST_ENV_1", "value": "test_env_1"} in jmespath.search(
"spec.template.spec.initContainers[0].env", docs[0]
)
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"scheduler": {
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert "test_label" in jmespath.search("spec.template.metadata.labels", docs[0])
assert jmespath.search("spec.template.metadata.labels", docs[0])["test_label"] == "test_label_value"
@pytest.mark.parametrize(
("revision_history_limit", "global_revision_history_limit"),
[(8, 10), (10, 8), (8, None), (None, 10), (None, None)],
)
def test_revision_history_limit(self, revision_history_limit, global_revision_history_limit):
values = {"scheduler": {}}
if revision_history_limit:
values["scheduler"]["revisionHistoryLimit"] = revision_history_limit
if global_revision_history_limit:
values["revisionHistoryLimit"] = global_revision_history_limit
docs = render_chart(
values=values,
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
expected_result = revision_history_limit or global_revision_history_limit
assert jmespath.search("spec.revisionHistoryLimit", docs[0]) == expected_result
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"scheduler": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
}
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("kind", docs[0]) == "Deployment"
assert (
jmespath.search(
"spec.template.spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
== "foo"
)
assert (
jmespath.search(
"spec.template.spec.nodeSelector.diskType",
docs[0],
)
== "ssd"
)
assert (
jmespath.search(
"spec.template.spec.tolerations[0].key",
docs[0],
)
== "dynamic-pods"
)
def test_affinity_tolerations_topology_spread_constraints_and_node_selector_precedence(self):
"""When given both global and scheduler affinity etc, scheduler affinity etc is used."""
expected_affinity = {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
}
expected_topology_spread_constraints = {
"maxSkew": 1,
"topologyKey": "foo",
"whenUnsatisfiable": "ScheduleAnyway",
"labelSelector": {"matchLabels": {"tier": "airflow"}},
}
docs = render_chart(
values={
"scheduler": {
"affinity": expected_affinity,
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"topologySpreadConstraints": [expected_topology_spread_constraints],
"nodeSelector": {"type": "ssd"},
},
"affinity": {
"nodeAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [
{
"weight": 1,
"preference": {
"matchExpressions": [
{"key": "not-me", "operator": "In", "values": ["true"]},
]
},
}
]
}
},
"tolerations": [
{"key": "not-me", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"topologySpreadConstraints": [
{
"maxSkew": 1,
"topologyKey": "not-me",
"whenUnsatisfiable": "ScheduleAnyway",
"labelSelector": {"matchLabels": {"tier": "airflow"}},
}
],
"nodeSelector": {"type": "not-me"},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert expected_affinity == jmespath.search("spec.template.spec.affinity", docs[0])
assert (
jmespath.search(
"spec.template.spec.nodeSelector.type",
docs[0],
)
== "ssd"
)
tolerations = jmespath.search("spec.template.spec.tolerations", docs[0])
assert len(tolerations) == 1
assert tolerations[0]["key"] == "dynamic-pods"
assert expected_topology_spread_constraints == jmespath.search(
"spec.template.spec.topologySpreadConstraints[0]", docs[0]
)
def test_scheduler_name(self):
docs = render_chart(
values={"schedulerName": "airflow-scheduler"},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert (
jmespath.search(
"spec.template.spec.schedulerName",
docs[0],
)
== "airflow-scheduler"
)
def test_should_create_default_affinity(self):
docs = render_chart(show_only=["templates/scheduler/scheduler-deployment.yaml"])
assert jmespath.search(
"spec.template.spec.affinity.podAntiAffinity."
"preferredDuringSchedulingIgnoredDuringExecution[0]."
"podAffinityTerm.labelSelector.matchLabels",
docs[0],
) == {"component": "scheduler"}
def test_livenessprobe_values_are_configurable(self):
docs = render_chart(
values={
"scheduler": {
"livenessProbe": {
"initialDelaySeconds": 111,
"timeoutSeconds": 222,
"failureThreshold": 333,
"periodSeconds": 444,
"command": ["sh", "-c", "echo", "wow such test"],
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.initialDelaySeconds", docs[0])
== 111
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.timeoutSeconds", docs[0]) == 222
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.failureThreshold", docs[0]) == 333
)
assert jmespath.search("spec.template.spec.containers[0].livenessProbe.periodSeconds", docs[0]) == 444
assert jmespath.search("spec.template.spec.containers[0].livenessProbe.exec.command", docs[0]) == [
"sh",
"-c",
"echo",
"wow such test",
]
def test_startupprobe_values_are_configurable(self):
docs = render_chart(
values={
"scheduler": {
"startupProbe": {
"timeoutSeconds": 111,
"failureThreshold": 222,
"periodSeconds": 333,
"command": ["sh", "-c", "echo", "wow such test"],
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].startupProbe.timeoutSeconds", docs[0]) == 111
assert (
jmespath.search("spec.template.spec.containers[0].startupProbe.failureThreshold", docs[0]) == 222
)
assert jmespath.search("spec.template.spec.containers[0].startupProbe.periodSeconds", docs[0]) == 333
assert jmespath.search("spec.template.spec.containers[0].startupProbe.exec.command", docs[0]) == [
"sh",
"-c",
"echo",
"wow such test",
]
@pytest.mark.parametrize(
("airflow_version", "probe_command"),
[
("1.9.0", "from airflow.jobs.scheduler_job import SchedulerJob"),
("2.1.0", "airflow jobs check --job-type SchedulerJob --hostname $(hostname)"),
("2.5.0", "airflow jobs check --job-type SchedulerJob --local"),
],
)
def test_livenessprobe_command_depends_on_airflow_version(self, airflow_version, probe_command):
docs = render_chart(
values={"airflowVersion": f"{airflow_version}"},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert (
probe_command
in jmespath.search("spec.template.spec.containers[0].livenessProbe.exec.command", docs[0])[-1]
)
@pytest.mark.parametrize(
("airflow_version", "probe_command"),
[
("1.9.0", "from airflow.jobs.scheduler_job import SchedulerJob"),
("2.1.0", "airflow jobs check --job-type SchedulerJob --hostname $(hostname)"),
("2.5.0", "airflow jobs check --job-type SchedulerJob --local"),
],
)
def test_startupprobe_command_depends_on_airflow_version(self, airflow_version, probe_command):
docs = render_chart(
values={"airflowVersion": f"{airflow_version}"},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert (
probe_command
in jmespath.search("spec.template.spec.containers[0].startupProbe.exec.command", docs[0])[-1]
)
@pytest.mark.parametrize(
("log_values", "expected_volume"),
[
({"persistence": {"enabled": False}}, {"emptyDir": {}}),
(
{"persistence": {"enabled": False}, "emptyDirConfig": {"sizeLimit": "10Gi"}},
{"emptyDir": {"sizeLimit": "10Gi"}},
),
(
{"persistence": {"enabled": True}},
{"persistentVolumeClaim": {"claimName": "release-name-logs"}},
),
(
{"persistence": {"enabled": True, "existingClaim": "test-claim"}},
{"persistentVolumeClaim": {"claimName": "test-claim"}},
),
],
)
def test_logs_persistence_changes_volume(self, log_values, expected_volume):
docs = render_chart(
values={"logs": log_values},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert {"name": "logs", **expected_volume} in jmespath.search("spec.template.spec.volumes", docs[0])
def test_scheduler_security_contexts_are_configurable(self):
docs = render_chart(
values={
"scheduler": {
"securityContexts": {
"pod": {
"fsGroup": 1000,
"runAsGroup": 1001,
"runAsNonRoot": True,
"runAsUser": 2000,
},
"container": {
"allowPrivilegeEscalation": False,
"readOnlyRootFilesystem": True,
},
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].securityContext", docs[0]) == {
"allowPrivilegeEscalation": False,
"readOnlyRootFilesystem": True,
}
assert jmespath.search("spec.template.spec.securityContext", docs[0]) == {
"runAsUser": 2000,
"runAsGroup": 1001,
"fsGroup": 1000,
"runAsNonRoot": True,
}
def test_scheduler_security_context_legacy(self):
docs = render_chart(
values={
"scheduler": {
"securityContext": {
"fsGroup": 1000,
"runAsGroup": 1001,
"runAsNonRoot": True,
"runAsUser": 2000,
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.securityContext", docs[0]) == {
"runAsUser": 2000,
"runAsGroup": 1001,
"fsGroup": 1000,
"runAsNonRoot": True,
}
def test_scheduler_resources_are_configurable(self):
docs = render_chart(
values={
"scheduler": {
"resources": {
"limits": {"cpu": "200m", "memory": "128Mi"},
"requests": {"cpu": "300m", "memory": "169Mi"},
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources.limits.memory", docs[0]) == "128Mi"
assert jmespath.search("spec.template.spec.containers[0].resources.limits.cpu", docs[0]) == "200m"
assert (
jmespath.search("spec.template.spec.containers[0].resources.requests.memory", docs[0]) == "169Mi"
)
assert jmespath.search("spec.template.spec.containers[0].resources.requests.cpu", docs[0]) == "300m"
assert (
jmespath.search("spec.template.spec.initContainers[0].resources.limits.memory", docs[0])
== "128Mi"
)
assert jmespath.search("spec.template.spec.initContainers[0].resources.limits.cpu", docs[0]) == "200m"
assert (
jmespath.search("spec.template.spec.initContainers[0].resources.requests.memory", docs[0])
== "169Mi"
)
assert (
jmespath.search("spec.template.spec.initContainers[0].resources.requests.cpu", docs[0]) == "300m"
)
def test_scheduler_resources_are_not_added_by_default(self):
docs = render_chart(
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources", docs[0]) == {}
def test_no_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": None}, show_only=["templates/scheduler/scheduler-deployment.yaml"]
)
volume_mounts = jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts)
volume_mounts_init = jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts_init)
def test_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": "# Well hello!"},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
volume_mount = {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
}
assert volume_mount in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
assert volume_mount in jmespath.search("spec.template.spec.initContainers[0].volumeMounts", docs[0])
@pytest.mark.parametrize(
("executor", "persistence", "update_strategy", "expected_update_strategy"),
[
("CeleryExecutor", False, {"rollingUpdate": {"partition": 0}}, None),
("CeleryExecutor", True, {"rollingUpdate": {"partition": 0}}, None),
("LocalKubernetesExecutor", False, {"rollingUpdate": {"partition": 0}}, None),
("LocalExecutor,KubernetesExecutor", False, {"rollingUpdate": {"partition": 0}}, None),
(
"LocalKubernetesExecutor",
True,
{"rollingUpdate": {"partition": 0}},
{"rollingUpdate": {"partition": 0}},
),
(
"LocalExecutor,KubernetesExecutor",
True,
{"rollingUpdate": {"partition": 0}},
{"rollingUpdate": {"partition": 0}},
),
("LocalExecutor", False, {"rollingUpdate": {"partition": 0}}, None),
("LocalExecutor", True, {"rollingUpdate": {"partition": 0}}, {"rollingUpdate": {"partition": 0}}),
("LocalExecutor", True, None, None),
("LocalExecutor,KubernetesExecutor", True, None, None),
],
)
def test_scheduler_update_strategy(
self, executor, persistence, update_strategy, expected_update_strategy
):
"""UpdateStrategy should only be used when we have a local executor and workers.persistence."""
docs = render_chart(
values={
"executor": executor,
"workers": {"persistence": {"enabled": persistence}},
"scheduler": {"updateStrategy": update_strategy},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert expected_update_strategy == jmespath.search("spec.updateStrategy", docs[0])
@pytest.mark.parametrize(
("executor", "persistence", "strategy", "expected_strategy"),
[
("LocalExecutor", False, None, None),
("LocalExecutor", False, {"type": "Recreate"}, {"type": "Recreate"}),
("LocalExecutor", True, {"type": "Recreate"}, None),
("LocalKubernetesExecutor", False, {"type": "Recreate"}, {"type": "Recreate"}),
("LocalKubernetesExecutor", True, {"type": "Recreate"}, None),
("CeleryExecutor", True, None, None),
("CeleryExecutor", False, None, None),
("CeleryExecutor", True, {"type": "Recreate"}, {"type": "Recreate"}),
(
"CeleryExecutor",
False,
{"rollingUpdate": {"maxSurge": "100%", "maxUnavailable": "50%"}},
{"rollingUpdate": {"maxSurge": "100%", "maxUnavailable": "50%"}},
),
],
)
def test_scheduler_strategy(self, executor, persistence, strategy, expected_strategy):
"""Strategy should be used when we aren't using both a local executor and workers.persistence."""
docs = render_chart(
values={
"executor": executor,
"workers": {"persistence": {"enabled": persistence}},
"scheduler": {"strategy": strategy},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert expected_strategy == jmespath.search("spec.strategy", docs[0])
def test_default_command_and_args(self):
docs = render_chart(show_only=["templates/scheduler/scheduler-deployment.yaml"])
assert jmespath.search("spec.template.spec.containers[0].command", docs[0]) is None
assert jmespath.search("spec.template.spec.containers[0].args", docs[0]) == [
"bash",
"-c",
"exec airflow scheduler",
]
@pytest.mark.parametrize("command", [None, ["custom", "command"]])
@pytest.mark.parametrize("args", [None, ["custom", "args"]])
def test_command_and_args_overrides(self, command, args):
docs = render_chart(
values={"scheduler": {"command": command, "args": args}},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert command == jmespath.search("spec.template.spec.containers[0].command", docs[0])
assert args == jmespath.search("spec.template.spec.containers[0].args", docs[0])
def test_command_and_args_overrides_are_templated(self):
docs = render_chart(
values={"scheduler": {"command": ["{{ .Release.Name }}"], "args": ["{{ .Release.Service }}"]}},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].command", docs[0]) == ["release-name"]
assert jmespath.search("spec.template.spec.containers[0].args", docs[0]) == ["Helm"]
@pytest.mark.parametrize(
"dags_values",
[
{"gitSync": {"enabled": True}},
{"gitSync": {"enabled": True}, "persistence": {"enabled": True}},
],
)
def test_dags_gitsync_sidecar_and_init_container_with_airflow_2(self, dags_values):
docs = render_chart(
values={"dags": dags_values, "airflowVersion": "2.10.4"},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert "git-sync" in [c["name"] for c in jmespath.search("spec.template.spec.containers", docs[0])]
assert "git-sync-init" in [
c["name"] for c in jmespath.search("spec.template.spec.initContainers", docs[0])
]
@pytest.mark.parametrize(
("airflow_version", "dag_processor", "executor", "skip_dags_mount"),
[
# standalone dag_processor is optional on 2.10, so we can skip dags for non-local if its on
("2.10.4", True, "LocalExecutor", False),
("2.10.4", True, "CeleryExecutor", True),
("2.10.4", True, "KubernetesExecutor", True),
("2.10.4", True, "LocalKubernetesExecutor", False),
# but if standalone dag_processor is off, we must always have dags
("2.10.4", False, "LocalExecutor", False),
("2.10.4", False, "CeleryExecutor", False),
("2.10.4", False, "KubernetesExecutor", False),
("2.10.4", False, "LocalKubernetesExecutor", False),
# by default, we don't have a standalone dag_processor
("2.10.4", None, "LocalExecutor", False),
("2.10.4", None, "CeleryExecutor", False),
("2.10.4", None, "KubernetesExecutor", False),
("2.10.4", None, "LocalKubernetesExecutor", False),
# but in airflow 3, standalone dag_processor required, so we again can skip dags for non-local
("3.0.0", None, "LocalExecutor", False),
("3.0.0", None, "CeleryExecutor", True),
("3.0.0", None, "KubernetesExecutor", True),
("3.0.0", None, "LocalKubernetesExecutor", False),
],
)
def test_dags_mount_and_gitsync_expected_with_dag_processor(
self, airflow_version, dag_processor, executor, skip_dags_mount
):
"""
DAG Processor can move gitsync and DAGs mount from the scheduler to the DAG Processor only.
The only exception is when we have a Local executor.
In these cases, the scheduler does the worker role and needs access to DAGs anyway.
"""
values = {
"airflowVersion": airflow_version,
"executor": executor,
"dags": {"gitSync": {"enabled": True}, "persistence": {"enabled": True}},
"scheduler": {"logGroomerSidecar": {"enabled": False}},
}
if dag_processor is not None:
values["dagProcessor"] = {"enabled": dag_processor}
docs = render_chart(
values=values,
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
if skip_dags_mount:
assert "dags" not in [
vm["name"] for vm in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
]
assert "dags" not in [vm["name"] for vm in jmespath.search("spec.template.spec.volumes", docs[0])]
assert len(jmespath.search("spec.template.spec.containers", docs[0])) == 1
else:
assert "dags" in [
vm["name"] for vm in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
]
assert "dags" in [vm["name"] for vm in jmespath.search("spec.template.spec.volumes", docs[0])]
assert "git-sync" in [
c["name"] for c in jmespath.search("spec.template.spec.containers", docs[0])
]
assert "git-sync-init" in [
c["name"] for c in jmespath.search("spec.template.spec.initContainers", docs[0])
]
def test_persistence_volume_annotations(self):
docs = render_chart(
values={"executor": "LocalExecutor", "workers": {"persistence": {"annotations": {"foo": "bar"}}}},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.volumeClaimTemplates[0].metadata.annotations", docs[0]) == {"foo": "bar"}
@pytest.mark.parametrize(
"executor",
[
"LocalExecutor",
"LocalKubernetesExecutor",
"CeleryExecutor",
"KubernetesExecutor",
"CeleryKubernetesExecutor",
"CeleryExecutor,KubernetesExecutor",
],
)
def test_scheduler_deployment_has_executor_label(self, executor):
docs = render_chart(
values={"executor": executor},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert len(docs) == 1
assert executor.replace(",", "-") == docs[0]["metadata"]["labels"].get("executor")
def test_should_add_component_specific_annotations(self):
docs = render_chart(
values={
"scheduler": {
"annotations": {"test_annotation": "test_annotation_value"},
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert "annotations" in jmespath.search("metadata", docs[0])
assert jmespath.search("metadata.annotations", docs[0])["test_annotation"] == "test_annotation_value"
def test_scheduler_pod_hostaliases(self):
docs = render_chart(
values={
"scheduler": {
"hostAliases": [{"ip": "127.0.0.1", "hostnames": ["foo.local"]}],
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.hostAliases[0].ip", docs[0]) == "127.0.0.1"
assert jmespath.search("spec.template.spec.hostAliases[0].hostnames[0]", docs[0]) == "foo.local"
def test_scheduler_template_storage_class_name(self):
docs = render_chart(
values={
"workers": {
"persistence": {
"storageClassName": "{{ .Release.Name }}-storage-class",
"enabled": True,
}
},
"logs": {"persistence": {"enabled": False}},
"executor": "LocalExecutor",
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert (
jmespath.search("spec.volumeClaimTemplates[0].spec.storageClassName", docs[0])
== "release-name-storage-class"
)
def test_persistent_volume_claim_retention_policy(self):
docs = render_chart(
values={
"executor": "LocalExecutor",
"workers": {
"persistence": {
"enabled": True,
"persistentVolumeClaimRetentionPolicy": {"whenDeleted": "Delete"},
}
},
},
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert jmespath.search("spec.persistentVolumeClaimRetentionPolicy", docs[0]) == {
"whenDeleted": "Delete",
}
@pytest.mark.parametrize(
("scheduler_values", "expected"),
[
({}, 10),
({"scheduler": {"terminationGracePeriodSeconds": 1200}}, 1200),
],
)
def test_scheduler_termination_grace_period_seconds(self, scheduler_values, expected):
docs = render_chart(
values=scheduler_values,
show_only=["templates/scheduler/scheduler-deployment.yaml"],
)
assert expected == jmespath.search("spec.template.spec.terminationGracePeriodSeconds", docs[0])
| TestScheduler |
python | jazzband__django-model-utils | model_utils/fields.py | {
"start": 8838,
"end": 9857
} | class ____(_SplitFieldBase):
def contribute_to_class(self, cls: type[models.Model], name: str, *args: Any, **kwargs: Any) -> None:
if not cls._meta.abstract:
excerpt_field: models.TextField = models.TextField(editable=False)
cls.add_to_class(_excerpt_field_name(name), excerpt_field)
super().contribute_to_class(cls, name, *args, **kwargs)
setattr(cls, self.name, SplitDescriptor(self))
def pre_save(self, model_instance: models.Model, add: bool) -> str:
value: SplitText = super().pre_save(model_instance, add)
excerpt = get_excerpt(value.content)
setattr(model_instance, _excerpt_field_name(self.attname), excerpt)
return value.content
def value_to_string(self, obj: models.Model) -> str:
value = self.value_from_object(obj)
return value.content
def get_prep_value(self, value: Any) -> str:
try:
return value.content
except AttributeError:
return value
| SplitField |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/vector/vectorize_action.py | {
"start": 7717,
"end": 8698
} | class ____(VectorizeTransformAction):
"""Clip the continuous action within the valid :class:`Box` observation space bound.
Example - Passing an out-of-bounds action to the environment to be clipped.
>>> import numpy as np
>>> import gymnasium as gym
>>> envs = gym.make_vec("MountainCarContinuous-v0", num_envs=3)
>>> envs = ClipAction(envs)
>>> _ = envs.action_space.seed(123)
>>> obs, info = envs.reset(seed=123)
>>> obs, rew, term, trunc, info = envs.step(np.array([5.0, -5.0, 2.0]))
>>> envs.close()
>>> obs
array([[-0.4624777 , 0.00105192],
[-0.44504836, -0.00209899],
[-0.42884544, 0.00080468]], dtype=float32)
"""
def __init__(self, env: VectorEnv):
"""Constructor for the Clip Action wrapper.
Args:
env: The vector environment to wrap
"""
super().__init__(env, transform_action.ClipAction)
| ClipAction |
python | davidhalter__jedi | test/completion/descriptors.py | {
"start": 1148,
"end": 2140
} | class ____():
def __init__(self, a):
self.a = a
@property
def ret(self):
return self.a
@ret.setter
def ret(self, value):
return 1.0
def ret2(self):
return self.a
ret2 = property(ret2)
@property
def nested(self):
""" causes recusions in properties, should work """
return self.ret
@property
def nested2(self):
""" causes recusions in properties, should not work """
return self.nested2
@property
def join1(self):
""" mutual recusion """
return self.join2
@property
def join2(self):
""" mutual recusion """
return self.join1
#? str()
PropClass("").ret
#? []
PropClass().ret.
#? str()
PropClass("").ret2
#?
PropClass().ret2
#? int()
PropClass(1).nested
#? []
PropClass().nested.
#?
PropClass(1).nested2
#? []
PropClass().nested2.
#?
PropClass(1).join1
# -----------------
# staticmethod/classmethod
# -----------------
| PropClass |
python | celery__celery | t/unit/backends/test_cosmosdbsql.py | {
"start": 323,
"end": 4994
} | class ____:
def setup_method(self):
self.url = "cosmosdbsql://:key@endpoint"
self.backend = CosmosDBSQLBackend(app=self.app, url=self.url)
def test_missing_third_party_sdk(self):
pydocumentdb = cosmosdbsql.pydocumentdb
try:
cosmosdbsql.pydocumentdb = None
with pytest.raises(ImproperlyConfigured):
CosmosDBSQLBackend(app=self.app, url=self.url)
finally:
cosmosdbsql.pydocumentdb = pydocumentdb
def test_bad_connection_url(self):
with pytest.raises(ImproperlyConfigured):
CosmosDBSQLBackend._parse_url(
"cosmosdbsql://:key@")
with pytest.raises(ImproperlyConfigured):
CosmosDBSQLBackend._parse_url(
"cosmosdbsql://:@host")
with pytest.raises(ImproperlyConfigured):
CosmosDBSQLBackend._parse_url(
"cosmosdbsql://corrupted")
def test_default_connection_url(self):
endpoint, password = CosmosDBSQLBackend._parse_url(
"cosmosdbsql://:key@host")
assert password == "key"
assert endpoint == "https://host:443"
endpoint, password = CosmosDBSQLBackend._parse_url(
"cosmosdbsql://:key@host:443")
assert password == "key"
assert endpoint == "https://host:443"
endpoint, password = CosmosDBSQLBackend._parse_url(
"cosmosdbsql://:key@host:8080")
assert password == "key"
assert endpoint == "http://host:8080"
def test_bad_partition_key(self):
with pytest.raises(ValueError):
CosmosDBSQLBackend._get_partition_key("")
with pytest.raises(ValueError):
CosmosDBSQLBackend._get_partition_key(" ")
with pytest.raises(ValueError):
CosmosDBSQLBackend._get_partition_key(None)
def test_bad_consistency_level(self):
with pytest.raises(ImproperlyConfigured):
CosmosDBSQLBackend(app=self.app, url=self.url,
consistency_level="DoesNotExist")
@patch(MODULE_TO_MOCK + ".DocumentClient")
def test_create_client(self, mock_factory):
mock_instance = Mock()
mock_factory.return_value = mock_instance
backend = CosmosDBSQLBackend(app=self.app, url=self.url)
# ensure database and collection get created on client access...
assert mock_instance.CreateDatabase.call_count == 0
assert mock_instance.CreateCollection.call_count == 0
assert backend._client is not None
assert mock_instance.CreateDatabase.call_count == 1
assert mock_instance.CreateCollection.call_count == 1
# ...but only once per backend instance
assert backend._client is not None
assert mock_instance.CreateDatabase.call_count == 1
assert mock_instance.CreateCollection.call_count == 1
@patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client")
def test_get(self, mock_client):
self.backend.get(b"mykey")
mock_client.ReadDocument.assert_has_calls(
[call("dbs/celerydb/colls/celerycol/docs/mykey",
{"partitionKey": "mykey"}),
call().get("value")])
@patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client")
def test_get_missing(self, mock_client):
mock_client.ReadDocument.side_effect = \
cosmosdbsql.HTTPFailure(cosmosdbsql.ERROR_NOT_FOUND)
assert self.backend.get(b"mykey") is None
@patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client")
def test_set(self, mock_client):
self.backend._set_with_state(b"mykey", "myvalue", states.SUCCESS)
mock_client.CreateDocument.assert_called_once_with(
"dbs/celerydb/colls/celerycol",
{"id": "mykey", "value": "myvalue"},
{"partitionKey": "mykey"})
@patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client")
def test_mget(self, mock_client):
keys = [b"mykey1", b"mykey2"]
self.backend.mget(keys)
mock_client.ReadDocument.assert_has_calls(
[call("dbs/celerydb/colls/celerycol/docs/mykey1",
{"partitionKey": "mykey1"}),
call().get("value"),
call("dbs/celerydb/colls/celerycol/docs/mykey2",
{"partitionKey": "mykey2"}),
call().get("value")])
@patch(MODULE_TO_MOCK + ".CosmosDBSQLBackend._client")
def test_delete(self, mock_client):
self.backend.delete(b"mykey")
mock_client.DeleteDocument.assert_called_once_with(
"dbs/celerydb/colls/celerycol/docs/mykey",
{"partitionKey": "mykey"})
| test_DocumentDBBackend |
python | kamyu104__LeetCode-Solutions | Python/palindrome-partitioning-iv.py | {
"start": 1158,
"end": 1812
} | class ____(object):
def checkPartitioning(self, s):
"""
:type s: str
:rtype: bool
"""
dp = [[False]*len(s) for _ in xrange(len(s))]
for i in reversed(xrange(len(s))):
for j in xrange(i, len(s)):
if s[i] == s[j] and (j-i < 2 or dp[i+1][j-1]):
dp[i][j] = True
for i in xrange(1, len(s)-1):
if not dp[0][i-1]:
continue
for j in xrange(i+1, len(s)):
if not dp[j][-1]:
continue
if dp[i][j-1]:
return True
return False
| Solution2 |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/conversational/output_parser.py | {
"start": 273,
"end": 1625
} | class ____(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> AgentAction | AgentFinish:
"""Parse the output from the agent into an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
"""
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()},
text,
)
regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)"
match = re.search(regex, text, re.DOTALL)
if not match:
msg = f"Could not parse LLM output: `{text}`"
raise OutputParserException(msg)
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
| ConvoOutputParser |
python | pola-rs__polars | py-polars/src/polars/io/cloud/credential_provider/_providers.py | {
"start": 15947,
"end": 18212
} | class ____(CachingCredentialProvider):
"""
GCP Credential Provider.
Using this requires the `google-auth` Python package to be installed.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
"""
def __init__( # noqa: D417 (TODO)
self,
*,
scopes: Any | None = None,
request: Any | None = None,
quota_project_id: Any | None = None,
default_scopes: Any | None = None,
) -> None:
"""
Initialize a credential provider for Google Cloud (GCP).
Parameters
----------
Parameters are passed to `google.auth.default()`
"""
msg = "`CredentialProviderGCP` functionality is considered unstable"
issue_unstable_warning(msg)
self._ensure_module_availability()
import google.auth
self._init_creds = partial(
google.auth.default,
scopes=(
scopes
if scopes is not None
else ["https://www.googleapis.com/auth/cloud-platform"]
),
request=request,
quota_project_id=quota_project_id,
default_scopes=default_scopes,
)
super().__init__()
def retrieve_credentials_impl(self) -> CredentialProviderFunctionReturn:
"""Fetch the credentials."""
import google.auth.transport.requests
creds, _project_id = self._init_creds()
creds.refresh(google.auth.transport.requests.Request()) # type: ignore[no-untyped-call, unused-ignore]
return {"bearer_token": creds.token}, (
int(
(
expiry.replace(tzinfo=zoneinfo.ZoneInfo("UTC"))
if expiry.tzinfo is None
else expiry
).timestamp()
)
if (expiry := creds.expiry) is not None
else None
)
@classmethod
def _ensure_module_availability(cls) -> None:
if importlib.util.find_spec("google.auth") is None:
msg = "google-auth must be installed to use `CredentialProviderGCP`"
raise ImportError(msg)
| CredentialProviderGCP |
python | modin-project__modin | modin/_version.py | {
"start": 1831,
"end": 24597
} | class ____(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f: Callable) -> Callable:
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands: List[str],
args: List[str],
cwd: Optional[str] = None,
verbose: bool = False,
hide_stderr: bool = False,
env: Optional[Dict[str, str]] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs: Dict[str, Any] = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
**popen_kwargs,
)
break
except OSError as e:
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(
parentdir_prefix: str,
root: str,
verbose: bool,
) -> Dict[str, Any]:
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs: str) -> Dict[str, str]:
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords: Dict[str, str] = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(
keywords: Dict[str, str],
tag_prefix: str,
verbose: bool,
) -> Dict[str, Any]:
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(
tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command
) -> Dict[str, Any]:
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
f"{tag_prefix}[[:digit:]]*",
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces: Dict[str, Any] = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces: Dict[str, Any]) -> str:
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces: Dict[str, Any]) -> str:
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces: Dict[str, Any]) -> str:
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]:
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces: Dict[str, Any]) -> str:
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces: Dict[str, Any]) -> str:
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces: Dict[str, Any]) -> str:
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]:
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions() -> Dict[str, Any]:
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| NotThisMethod |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 83883,
"end": 85138
} | class ____(ExecutionControlCommandBase):
invoke = dont_suppress_errors(ExecutionControlCommandBase.cont)
def _pointervalue(gdbval):
"""
Return the value of the pointer as a Python int.
gdbval.type must be a pointer type
"""
# don't convert with int() as it will raise a RuntimeError
if gdbval.address is not None:
return int(gdbval.address)
else:
# the address attribute is None sometimes, in which case we can
# still convert the pointer to an int
return int(gdbval)
def pointervalue(gdbval):
pointer = _pointervalue(gdbval)
try:
if pointer < 0:
raise gdb.GdbError("Negative pointer value, presumably a bug "
"in gdb, aborting.")
except RuntimeError:
# work around yet another bug in gdb where you get random behaviour
# and tracebacks
pass
return pointer
def get_inferior_unicode_postfix():
try:
gdb.parse_and_eval('PyUnicode_FromEncodedObject')
except RuntimeError:
try:
gdb.parse_and_eval('PyUnicodeUCS2_FromEncodedObject')
except RuntimeError:
return 'UCS4'
else:
return 'UCS2'
else:
return ''
| PyCont |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_heapq.py | {
"start": 13156,
"end": 13363
} | class ____:
def __init__(self, value, heap):
self.value = value
self.heap = heap
def __lt__(self, other):
self.heap[:] = []
return self.value < other.value
| SideEffectLT |
python | ipython__ipython | IPython/core/magic_arguments.py | {
"start": 7531,
"end": 8147
} | class ____(ArgDecorator):
""" Mark the magic as having argparse arguments and possibly adjust the
name.
"""
def __init__(self, name=None):
self.name = name
def __call__(self, func):
if not getattr(func, 'has_arguments', False):
func.has_arguments = True
func.decorators = []
if self.name is not None:
func.argcmd_name = self.name
# This should be the first decorator in the list of decorators, thus the
# last to execute. Build the parser.
func.parser = construct_parser(func)
return func
| magic_arguments |
python | tensorflow__tensorflow | tensorflow/compiler/tests/reduce_window_test.py | {
"start": 1034,
"end": 3405
} | class ____(xla_test.XLATestCase):
"""Test cases for xla.reduce_window."""
def _reduce_window(self, operand, init, reducer, **kwargs):
with self.session():
placeholder = array_ops.placeholder(operand.dtype)
with self.test_scope():
output = xla.reduce_window(placeholder, init, reducer, **kwargs)
return output.eval(feed_dict={placeholder: operand})
def testReduceWindow(self):
# TODO(b/77644762): float16 and float64 ReduceWindow are unimplemented.
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
self.assertAllClose(
np.array([3, 5, 7, 9, 11, 13], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2]))
self.assertAllClose(
np.array([3, 7, 11], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2],
window_strides=[2]))
self.assertAllClose(
np.array([1, 4, 7], dtype=dtype),
self._reduce_window(
np.array([1, 2, 3, 4, 5, 6, 7], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[1],
window_strides=[3]))
self.assertAllClose(
np.array([[24, 36, 24], [96, 0, 0]], dtype=dtype),
self._reduce_window(
np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),
1.0,
mul_reducer,
window_dimensions=[2, 2],
window_strides=[1, 1]))
self.assertAllClose(
np.array([[0, 0, 0], [5, 10, 5], [2, 4, 1], [0, 0, 0]], dtype=dtype),
self._reduce_window(
np.array([[1, 2, 3, 4], [4, 3, 2, 1], [2, 4, 0, 1]], dtype=dtype),
0.0,
sum_reducer,
window_dimensions=[2, 2],
window_strides=[2, 2],
padding=[[2, 3], [1, 2]]))
if __name__ == '__main__':
googletest.main()
| ReduceWindowTest |
python | tensorflow__tensorflow | tensorflow/python/debug/wrappers/framework.py | {
"start": 7867,
"end": 8174
} | class ____:
"""Enum-like values for possible action to take on start of a run() call."""
# Run once with debug tensor-watching.
DEBUG_RUN = "debug_run"
# Run once with profiler.
PROFILE_RUN = "profile_run"
# Run without debug tensor-watching.
NON_DEBUG_RUN = "non_debug_run"
| OnRunStartAction |
python | django-extensions__django-extensions | django_extensions/management/commands/reset_db.py | {
"start": 595,
"end": 8653
} | class ____(BaseCommand):
help = "Resets the database for this project."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--noinput",
"--no-input",
action="store_false",
dest="interactive",
default=True,
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_argument(
"--no-utf8",
action="store_true",
dest="no_utf8_support",
default=False,
help="Tells Django to not create a UTF-8 charset database",
)
parser.add_argument(
"-U",
"--user",
action="store",
dest="user",
default=None,
help="Use another user for the database than defined in settings.py",
)
parser.add_argument(
"-O",
"--owner",
action="store",
dest="owner",
default=None,
help="Use another owner for creating the database than the user defined "
"in settings or via --user",
)
parser.add_argument(
"-P",
"--password",
action="store",
dest="password",
default=None,
help="Use another password for the database than defined in settings.py",
)
parser.add_argument(
"-D",
"--dbname",
action="store",
dest="dbname",
default=None,
help="Use another database name than defined in settings.py",
)
parser.add_argument(
"-R",
"--router",
action="store",
dest="router",
default=DEFAULT_DB_ALIAS,
help="Use this router-database other than defined in settings.py",
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
help='Nominates a database to run command for. Defaults to the "%s".'
% DEFAULT_DB_ALIAS,
)
parser.add_argument(
"-c",
"--close-sessions",
action="store_true",
dest="close_sessions",
default=False,
help="Close database connections before dropping database "
"(currently works on PostgreSQL only)",
)
@signalcommand
def handle(self, *args, **options):
"""
Reset the database for this project.
Note: Transaction wrappers are in reverse as a work around for
autocommit, anybody know how to do this the right way?
"""
database = options["database"]
if options["router"] != DEFAULT_DB_ALIAS:
warnings.warn(
"--router is deprecated. You should use --database.",
RemovedInNextVersionWarning,
stacklevel=2,
)
database = options["router"]
dbinfo = settings.DATABASES.get(database)
if dbinfo is None:
raise CommandError("Unknown database %s" % database)
engine = dbinfo.get("ENGINE")
user = password = database_name = database_host = database_port = ""
if engine == "mysql":
(user, password, database_name, database_host, database_port) = (
parse_mysql_cnf(dbinfo)
)
user = options["user"] or dbinfo.get("USER") or user
password = options["password"] or dbinfo.get("PASSWORD") or password
owner = options["owner"] or user
database_name = options["dbname"] or dbinfo.get("NAME") or database_name
if database_name == "":
raise CommandError(
"You need to specify DATABASE_NAME in your Django settings file."
)
database_host = dbinfo.get("HOST") or database_host
database_port = dbinfo.get("PORT") or database_port
verbosity = options["verbosity"]
if options["interactive"]:
confirm = input(
"""
You have requested a database reset.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """
% (database_name,)
)
else:
confirm = "yes"
if confirm != "yes":
print("Reset cancelled.")
return
if engine in SQLITE_ENGINES:
try:
logging.info("Unlinking %s database", engine)
os.unlink(database_name)
except OSError:
pass
elif engine in MYSQL_ENGINES:
import MySQLdb as Database
kwargs = {
"user": user,
"passwd": password,
}
if database_host.startswith("/"):
kwargs["unix_socket"] = database_host
else:
kwargs["host"] = database_host
if database_port:
kwargs["port"] = int(database_port)
connection = Database.connect(**kwargs)
drop_query = "DROP DATABASE IF EXISTS `%s`" % database_name
utf8_support = "" if options["no_utf8_support"] else "CHARACTER SET utf8"
create_query = "CREATE DATABASE `%s` %s" % (database_name, utf8_support)
logging.info('Executing... "%s"', drop_query)
connection.query(drop_query)
logging.info('Executing... "%s"', create_query)
connection.query(create_query.strip())
elif engine in POSTGRESQL_ENGINES:
has_psycopg3 = importlib.util.find_spec("psycopg")
if has_psycopg3:
import psycopg as Database # NOQA
else:
import psycopg2 as Database # NOQA
conn_params = {"dbname": "template1"}
if user:
conn_params["user"] = user
if password:
conn_params["password"] = password
if database_host:
conn_params["host"] = database_host
if database_port:
conn_params["port"] = database_port
connection = Database.connect(**conn_params)
if has_psycopg3:
connection.autocommit = True
else:
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
if options["close_sessions"]:
close_sessions_query = (
"""
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '%s';
"""
% database_name
)
logging.info('Executing... "%s"', close_sessions_query.strip())
try:
cursor.execute(close_sessions_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s", str(e))
drop_query = 'DROP DATABASE "%s";' % database_name
logging.info('Executing... "%s"', drop_query)
try:
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s", str(e))
create_query = 'CREATE DATABASE "%s"' % database_name
if owner:
create_query += ' WITH OWNER = "%s" ' % owner
create_query += " ENCODING = 'UTF8'"
if settings.DEFAULT_TABLESPACE:
create_query += " TABLESPACE = %s;" % settings.DEFAULT_TABLESPACE
else:
create_query += ";"
logging.info('Executing... "%s"', create_query)
cursor.execute(create_query)
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options["interactive"]:
print("Reset successful.")
| Command |
python | getsentry__sentry | tests/sentry/integrations/gitlab/test_client.py | {
"start": 2084,
"end": 13717
} | class ____(GitLabClientTest):
get_user_should_succeed = True
def setUp(self) -> None:
super().setUp()
def tearDown(self) -> None:
responses.reset()
def make_users_request(self):
return self.gitlab_client.get_user()
def add_refresh_auth(self, success=True):
responses.add(
responses.POST,
self.refresh_url,
status=200 if success else 401,
json=self.refresh_response if success else {},
)
def add_get_user_response(self, success):
responses.add(
responses.GET,
self.request_url,
json=self.request_data if success else {},
status=200 if success else 401,
)
def assert_response_call(self, call, url, status):
assert call.request.url == url
assert call.response.status_code == status
def assert_data(self, data, expected_data):
assert data["access_token"] == expected_data["access_token"]
assert data["refresh_token"] == expected_data["refresh_token"]
assert data["created_at"] == expected_data["created_at"]
def assert_request_failed_refresh(self):
responses_calls = responses.calls
assert len(responses_calls) == 2
self.assert_response_call(responses_calls[0], self.request_url, 401)
self.assert_response_call(responses_calls[1], self.refresh_url, 401)
def assert_request_with_refresh(self):
responses_calls = responses.calls
assert len(responses_calls) == 3
self.assert_response_call(responses_calls[0], self.request_url, 401)
self.assert_response_call(responses_calls[1], self.refresh_url, 200)
self.assert_response_call(responses_calls[2], self.request_url, 200)
assert orjson.loads(responses_calls[2].response.text) == self.request_data
def assert_identity_was_refreshed(self):
data = self.gitlab_client.identity.data
self.assert_data(data, self.refresh_response)
data = Identity.objects.get(id=self.gitlab_client.identity.id).data
self.assert_data(data, self.refresh_response)
def assert_identity_was_not_refreshed(self):
data = self.gitlab_client.identity.data
self.assert_data(data, self.original_identity_data)
data = Identity.objects.get(id=self.gitlab_client.identity.id).data
self.assert_data(data, self.original_identity_data)
@responses.activate
def test_refresh_auth_flow(self) -> None:
# Fail first then succeed
self.add_get_user_response(success=False)
self.add_get_user_response(success=True)
self.add_refresh_auth(success=True)
resp = self.make_users_request()
self.assert_request_with_refresh()
assert resp == self.request_data
self.assert_identity_was_refreshed()
@responses.activate
def test_refresh_auth_fails_gracefully(self) -> None:
self.add_get_user_response(success=False)
self.add_refresh_auth(success=False)
with pytest.raises(IdentityNotValid):
self.make_users_request()
self.assert_request_failed_refresh()
self.assert_identity_was_not_refreshed()
@responses.activate
def test_no_refresh_when_api_call_successful(self) -> None:
self.add_get_user_response(success=True)
resp = self.make_users_request()
assert len(responses.calls) == 1
call = responses.calls[0]
self.assert_response_call(call, self.request_url, 200)
assert resp == self.request_data
self.assert_identity_was_not_refreshed()
@responses.activate
def test_check_file(self) -> None:
path = "src/file.py"
ref = "537f2e94fbc489b2564ca3d6a5f0bd9afa38c3c3"
responses.add(
responses.HEAD,
f"https://example.gitlab.com/api/v4/projects/{self.gitlab_id}/repository/files/src%2Ffile.py?ref={ref}",
json={"text": 200},
)
resp = self.gitlab_client.check_file(self.repo, path, ref)
assert responses.calls[0].response.status_code == 200
assert resp # this is None on error
@responses.activate
def test_check_no_file(self) -> None:
path = "src/file.py"
ref = "537f2e94fbc489b2564ca3d6a5f0bd9afa38c3c3"
responses.add(
responses.HEAD,
f"https://example.gitlab.com/api/v4/projects/{self.gitlab_id}/repository/files/src%2Ffile.py?ref={ref}",
status=404,
)
with pytest.raises(ApiError):
self.gitlab_client.check_file(self.repo, path, ref)
assert responses.calls[0].response.status_code == 404
@responses.activate
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_stacktrace_link(self, mock_record: mock.MagicMock) -> None:
path = "/src/file.py"
ref = "537f2e94fbc489b2564ca3d6a5f0bd9afa38c3c3"
responses.add(
responses.HEAD,
f"https://example.gitlab.com/api/v4/projects/{self.gitlab_id}/repository/files/src%2Ffile.py?ref={ref}",
json={"text": 200},
)
source_url = self.installation.get_stacktrace_link(self.repo, path, "master", ref)
assert (
source_url
== "https://example.gitlab.com/example-repo/blob/537f2e94fbc489b2564ca3d6a5f0bd9afa38c3c3/src/file.py"
)
assert (
len(mock_record.mock_calls) == 4
) # get_stacktrace_link calls check_file, which also has metrics
start1, start2, halt1, halt2 = mock_record.mock_calls
assert start1.args[0] == EventLifecycleOutcome.STARTED
assert start2.args[0] == EventLifecycleOutcome.STARTED # check_file
assert halt1.args[0] == EventLifecycleOutcome.SUCCESS # check_file
assert halt2.args[0] == EventLifecycleOutcome.SUCCESS
@responses.activate
@mock.patch(
"sentry.integrations.gitlab.client.GitLabApiClient.check_file",
side_effect=ApiRetryError(text="retry error"),
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_stacktrace_link_retry_error(
self, mock_record: mock.MagicMock, mock_check_file: mock.MagicMock
) -> None:
path = "/src/file.py"
ref = "537f2e94fbc489b2564ca3d6a5f0bd9afa38c3c3"
responses.add(
responses.HEAD,
f"https://example.gitlab.com/api/v4/projects/{self.gitlab_id}/repository/files/src%2Ffile.py?ref={ref}",
json={"text": 200},
)
source_url = self.installation.get_stacktrace_link(self.repo, path, "master", None)
assert source_url is None
assert (
len(mock_record.mock_calls) == 4
) # get_stacktrace_link calls check_file, which also has metrics
start1, start2, halt1, halt2 = mock_record.mock_calls
assert start1.args[0] == EventLifecycleOutcome.STARTED
assert start2.args[0] == EventLifecycleOutcome.STARTED # check_file
assert halt1.args[0] == EventLifecycleOutcome.HALTED # check_file
assert halt2.args[0] == EventLifecycleOutcome.SUCCESS
@responses.activate
@mock.patch("requests.sessions.Session.send")
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_stacktrace_link_restricted_ip_address(
self, mock_record: mock.MagicMock, mock_send: mock.MagicMock
) -> None:
path = "/src/file.py"
ref = "537f2e94fbc489b2564ca3d6a5f0bd9afa38c3c3"
responses.add(
responses.HEAD,
f"https://example.gitlab.com/api/v4/projects/{self.gitlab_id}/repository/files/src%2Ffile.py?ref={ref}",
json={"text": 200},
)
mock_send.side_effect = RestrictedIPAddress
with pytest.raises(ApiHostError):
self.installation.get_stacktrace_link(self.repo, path, "master", None)
assert (
len(mock_record.mock_calls) == 4
) # get_stacktrace_link calls check_file, which also has metrics
start1, start2, halt1, halt2 = mock_record.mock_calls
assert start1.args[0] == EventLifecycleOutcome.STARTED
assert start2.args[0] == EventLifecycleOutcome.STARTED # check_file
assert halt1.args[0] == EventLifecycleOutcome.HALTED # check_file
assert halt2.args[0] == EventLifecycleOutcome.HALTED
@mock.patch(
"sentry.integrations.gitlab.integration.GitlabIntegration.check_file",
return_value=GITLAB_CODEOWNERS["html_url"],
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@responses.activate
def test_get_codeowner_file(
self, mock_record: mock.MagicMock, mock_check_file: mock.MagicMock
) -> None:
self.config = self.create_code_mapping(
repo=self.repo,
project=self.project,
)
responses.add(
method=responses.GET,
url=f"https://example.gitlab.com/api/v4/projects/{self.gitlab_id}/repository/files/CODEOWNERS/raw?ref=master",
body="docs/* @NisanthanNanthakumar @getsentry/ecosystem\n* @NisanthanNanthakumar\n",
)
result = self.installation.get_codeowner_file(
self.config.repository, ref=self.config.default_branch
)
assert result == GITLAB_CODEOWNERS
assert (
len(mock_record.mock_calls) == 2
) # check_file is mocked in this test, so there will be no metrics logged for it
assert mock_record.mock_calls[0].args[0] == EventLifecycleOutcome.STARTED
assert mock_record.mock_calls[1].args[0] == EventLifecycleOutcome.SUCCESS
@responses.activate
def test_get_commit(self) -> None:
commit = "a" * 40
responses.add(
method=responses.GET,
url=f"https://example.gitlab.com/api/v4/projects/{self.gitlab_id}/repository/commits/{commit}",
json=orjson.loads(GET_COMMIT_RESPONSE),
)
resp = self.gitlab_client.get_commit(self.gitlab_id, commit)
assert resp == orjson.loads(GET_COMMIT_RESPONSE)
@responses.activate
def test_get_rate_limit_info_from_response(self) -> None:
"""
When rate limit headers present, parse them and return a GitLabRateLimitInfo object
"""
responses.add(
responses.GET,
self.request_url,
json={},
status=200,
adding_headers={
"RateLimit-Limit": "1000",
"RateLimit-Remaining": "999",
"RateLimit-Reset": "1372700873",
"RateLimit-Observed": "1",
},
)
resp = self.gitlab_client.get_user()
rate_limit_info = get_rate_limit_info_from_response(resp)
assert rate_limit_info
assert rate_limit_info.limit == 1000
assert rate_limit_info.remaining == 999
assert rate_limit_info.used == 1
assert rate_limit_info.reset == 1372700873
assert rate_limit_info.next_window() == "17:47:53"
@responses.activate
def test_get_rate_limit_info_from_response_invalid(self) -> None:
"""
When rate limit headers are not present, handle gracefully and return None
"""
responses.add(
responses.GET,
self.request_url,
json={},
status=200,
)
resp = self.gitlab_client.get_user()
rate_limit_info = get_rate_limit_info_from_response(resp)
assert not rate_limit_info
@control_silo_test
| GitlabRefreshAuthTest |
python | apache__airflow | airflow-core/tests/unit/serialization/test_serde.py | {
"start": 5241,
"end": 5345
} | class ____:
x: int
__version__: ClassVar[int] = 1
def __init__(self, x):
self.x = x
| Y |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/cli_tests/fake_python_logger_module/__init__.py | {
"start": 162,
"end": 213
} | class ____(logging.StreamHandler):
pass
| FakeHandler |
python | arrow-py__arrow | tests/test_arrow.py | {
"start": 10275,
"end": 11745
} | class ____:
def test_eq(self):
assert self.arrow == self.arrow
assert self.arrow == self.arrow.datetime
assert not (self.arrow == "abc")
def test_ne(self):
assert not (self.arrow != self.arrow)
assert not (self.arrow != self.arrow.datetime)
assert self.arrow != "abc"
def test_gt(self):
arrow_cmp = self.arrow.shift(minutes=1)
assert not (self.arrow > self.arrow)
assert not (self.arrow > self.arrow.datetime)
with pytest.raises(TypeError):
self.arrow > "abc" # noqa: B015
assert self.arrow < arrow_cmp
assert self.arrow < arrow_cmp.datetime
def test_ge(self):
with pytest.raises(TypeError):
self.arrow >= "abc" # noqa: B015
assert self.arrow >= self.arrow
assert self.arrow >= self.arrow.datetime
def test_lt(self):
arrow_cmp = self.arrow.shift(minutes=1)
assert not (self.arrow < self.arrow)
assert not (self.arrow < self.arrow.datetime)
with pytest.raises(TypeError):
self.arrow < "abc" # noqa: B015
assert self.arrow < arrow_cmp
assert self.arrow < arrow_cmp.datetime
def test_le(self):
with pytest.raises(TypeError):
self.arrow <= "abc" # noqa: B015
assert self.arrow <= self.arrow
assert self.arrow <= self.arrow.datetime
@pytest.mark.usefixtures("time_2013_01_01")
| TestArrowComparison |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 10053,
"end": 10588
} | class ____(object):
"""*
jina gRPC service to expose Endpoints from Executors.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.endpoint_discovery = channel.unary_unary(
'/jina.JinaDiscoverEndpointsRPC/endpoint_discovery',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=jina__pb2.EndpointsProto.FromString,
)
| JinaDiscoverEndpointsRPCStub |
python | great-expectations__great_expectations | great_expectations/metrics/column/values_not_match_regex_count.py | {
"start": 261,
"end": 487
} | class ____(ColumnMetric[ColumnValuesNotMatchRegexCountResult]):
"""Count of values in a column that do not match a regex"""
name = "column_values.not_match_regex.count"
regex: StrictStr
| ColumnValuesNotMatchRegexCount |
python | django__django | django/db/models/functions/comparison.py | {
"start": 2928,
"end": 4213
} | class ____(Func):
"""Return, from left to right, the first non-null expression."""
function = "COALESCE"
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError("Coalesce must take at least two expressions")
super().__init__(*expressions, **extra)
@property
def empty_result_set_value(self):
for expression in self.get_source_expressions():
result = expression.empty_result_set_value
if result is NotImplemented or result is not None:
return result
return None
def as_oracle(self, compiler, connection, **extra_context):
# Oracle prohibits mixing TextField (NCLOB) and CharField (NVARCHAR2),
# so convert all fields to NCLOB when that type is expected.
if self.output_field.get_internal_type() == "TextField":
clone = self.copy()
clone.set_source_expressions(
[
Func(expression, function="TO_NCLOB")
for expression in self.get_source_expressions()
]
)
return super(Coalesce, clone).as_sql(compiler, connection, **extra_context)
return self.as_sql(compiler, connection, **extra_context)
| Coalesce |
python | gevent__gevent | src/greentest/3.14/test_urllib2_localnet.py | {
"start": 11161,
"end": 15430
} | class ____(unittest.TestCase):
URL = "http://localhost"
USER = "tester"
PASSWD = "test123"
REALM = "TestRealm"
def setUp(self):
super(ProxyAuthTests, self).setUp()
# Ignore proxy bypass settings in the environment.
def restore_environ(old_environ):
os.environ.clear()
os.environ.update(old_environ)
self.addCleanup(restore_environ, os.environ.copy())
os.environ['NO_PROXY'] = ''
os.environ['no_proxy'] = ''
self.digest_auth_handler = DigestAuthHandler()
self.digest_auth_handler.set_users({self.USER: self.PASSWD})
self.digest_auth_handler.set_realm(self.REALM)
# With Digest Authentication.
def create_fake_proxy_handler(*args, **kwargs):
return FakeProxyHandler(self.digest_auth_handler, *args, **kwargs)
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.addCleanup(self.stop_server)
self.server.start()
self.server.ready.wait()
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib.request.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib.request.ProxyDigestAuthHandler()
self.opener = urllib.request.build_opener(
handler, self.proxy_digest_handler)
def stop_server(self):
self.server.stop()
self.server = None
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
self.digest_auth_handler.set_qop("auth")
with self.assertRaises(urllib.error.HTTPError) as cm:
self.opener.open(self.URL)
cm.exception.close()
def test_proxy_with_no_password_raises_httperror(self):
self.digest_auth_handler.set_qop("auth")
with self.assertRaises(urllib.error.HTTPError) as cm:
self.opener.open(self.URL)
cm.exception.close()
def test_proxy_qop_auth_works(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth")
with self.opener.open(self.URL) as result:
while result.read():
pass
def test_proxy_qop_auth_int_works_or_throws_urlerror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD)
self.digest_auth_handler.set_qop("auth-int")
try:
result = self.opener.open(self.URL)
except urllib.error.URLError:
# It's okay if we don't support auth-int, but we certainly
# shouldn't receive any kind of exception here other than
# a URLError.
pass
else:
with result:
while result.read():
pass
def GetRequestHandler(responses):
class FakeHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "TestHTTP/"
requests = []
headers_received = []
port = 80
def do_GET(self):
body = self.send_head()
while body:
done = self.wfile.write(body)
body = body[done:]
def do_POST(self):
content_length = self.headers["Content-Length"]
post_data = self.rfile.read(int(content_length))
self.do_GET()
self.requests.append(post_data)
def send_head(self):
FakeHTTPRequestHandler.headers_received = self.headers
self.requests.append(self.path)
response_code, headers, body = responses.pop(0)
self.send_response(response_code)
for (header, value) in headers:
self.send_header(header, value % {'port':self.port})
if body:
self.send_header("Content-type", "text/plain")
self.end_headers()
return body
self.end_headers()
def log_message(self, *args):
pass
return FakeHTTPRequestHandler
| ProxyAuthTests |
python | tensorflow__tensorflow | tensorflow/python/keras/mixed_precision/loss_scale_optimizer.py | {
"start": 3868,
"end": 10031
} | class ____(trackable.Trackable):
"""The state of a dynamic loss scale."""
def __init__(self,
initial_loss_scale,
growth_steps,
multiplier):
"""Creates the dynamic loss scale."""
super(_DynamicLossScaleState, self).__init__()
self._initial_loss_scale = float(initial_loss_scale)
self._growth_steps = int(growth_steps)
self._multiplier = float(multiplier)
self._weights = {}
self._current_loss_scale = self._add_weight(
name='current_loss_scale',
dtype=dtypes.float32,
initial_value=self._initial_loss_scale)
# The number of consecutive steps with finite gradients since the last
# nonfinite gradient or change in loss scale. The name is 'good_steps' for
# backwards compatibility with older checkpoints.
self._counter = self._add_weight(
name='good_steps', dtype=dtypes.int64, initial_value=0)
def _add_weight(self, name, initial_value, dtype=None):
"""Adds a weight to this loss scale.
Args:
name: Variable name.
initial_value: The variable's initial value.
dtype: The type of the variable.
Returns:
A variable.
Raises:
RuntimeError: If a weight with `name` has already been added.
"""
variable = variable_v1.VariableV1(
initial_value=initial_value,
name=name,
dtype=dtype,
trainable=False,
use_resource=True,
synchronization=variables.VariableSynchronization.AUTO,
# Set aggregation to NONE, as loss scaling variables should never be
# aggregated.
aggregation=variables.VariableAggregation.NONE)
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
key = (name, graph_key)
self._weights[key] = variable
self._handle_deferred_dependencies(name=name, trackable=variable)
backend.track_variable(variable)
return variable
def _trackable_children(self,
save_type=trackable.SaveType.CHECKPOINT,
**kwargs):
"""From Trackable. Gather graph-specific weights to save."""
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
weights = {}
for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):
if g == graph_key:
weights[name] = v
weights.update(
super(_DynamicLossScaleState,
self)._trackable_children(save_type, **kwargs))
return weights
def _lookup_dependency(self, name):
"""From Trackable. Find a weight in the current graph."""
unconditional = super(_DynamicLossScaleState, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
return self._weights.get((name, graph_key), None)
@property
def initial_loss_scale(self):
return self._initial_loss_scale
@property
def growth_steps(self):
return self._growth_steps
@property
def multiplier(self):
return self._multiplier
@property
def current_loss_scale(self):
"""Returns the current loss scale as a float32 `tf.Variable`."""
return self._current_loss_scale
@property
def counter(self):
"""Returns the counter as a float32 `tf.Variable`."""
return self._counter
def __call__(self):
"""Returns the current loss scale as a scalar `float32` tensor."""
return tensor_conversion.convert_to_tensor_v2_with_dispatch(
self._current_loss_scale
)
def update(self, grads):
"""Updates the value of the loss scale.
Args:
grads: A nested structure of unscaled gradients, each which is an
all-reduced gradient of the loss with respect to a weight.
Returns:
update_op: In eager mode, None. In graph mode, an op to update the loss
scale.
should_apply_gradients: Either a bool or a scalar boolean tensor. If
False, the caller should skip applying `grads` to the variables this
step.
"""
grads = nest.flatten(grads)
if distribute_lib.has_strategy(
) and distribute_lib.in_cross_replica_context():
distribution = distribute_lib.get_strategy()
is_finite_per_replica = distribution.extended.call_for_each_replica(
_is_all_finite, args=(grads,))
# Each replica computed the same `is_finite` value, since `grads` is
# all-reduced across replicas. Arbitrarily take `is_finite` from the first
# replica.
is_finite = (
distribution.experimental_local_results(is_finite_per_replica)[0])
else:
is_finite = _is_all_finite(grads)
def update_if_finite_grads():
"""Update assuming the gradients are finite."""
def incr_loss_scale():
new_loss_scale = self.current_loss_scale * self.multiplier
return control_flow_ops.group(
_assign_if_finite(self.current_loss_scale, new_loss_scale),
self.counter.assign(0))
return cond.cond(
self.counter + 1 >= self.growth_steps,
incr_loss_scale,
lambda: _op_in_graph_mode(self.counter.assign_add(1)))
def update_if_not_finite_grads():
"""Update assuming the gradients are nonfinite."""
new_loss_scale = math_ops.maximum(
self.current_loss_scale / self.multiplier, 1)
return control_flow_ops.group(
self.counter.assign(0),
self.current_loss_scale.assign(new_loss_scale))
update_op = cond.cond(is_finite, update_if_finite_grads,
update_if_not_finite_grads)
should_apply_gradients = is_finite
return update_op, should_apply_gradients
# See LossScaleOptimizer docstring for why this is so big
_DEFAULT_INITIAL_SCALE = 2 ** 15
_DEFAULT_GROWTH_STEPS = 2000
# pylint: disable=g-classes-have-attributes
| _DynamicLossScaleState |
python | donnemartin__system-design-primer | solutions/object_oriented_design/online_chat/online_chat.py | {
"start": 1904,
"end": 2089
} | class ____(object):
def __init__(self, message_id, message, timestamp):
self.message_id = message_id
self.message = message
self.timestamp = timestamp
| Message |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.