language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/_functorch/_aot_autograd/schemas.py | {
"start": 40578,
"end": 40701
} | class ____:
cache_key: str
start_time_ns: int
forward_symints: list[torch.SymInt]
@dataclass
| AOTAutogradCacheInfo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/unions1.py | {
"start": 852,
"end": 1245
} | class ____:
class ClassA_A:
pass
@decorator1
class ClassA_B:
pass
@decorator2
class ClassA_C:
pass
a_or_str: "ClassA.ClassA_A | str"
b_or_str: "ClassA.ClassA_B | str"
b_or_str_Union: Union[ClassA.ClassA_B, str]
c_or_str: "ClassA.ClassA_C | str"
Alias1 = None | str
Alias2 = str | None
_T = TypeVar("_T")
Alias3 = _T | str
Alias4 = str | _T
| ClassA |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/exception.py | {
"start": 748,
"end": 872
} | class ____(UnityException):
"""
Related to errors with communication timeouts.
"""
pass
| UnityTimeOutException |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 54328,
"end": 54466
} | class ____(_PrintableStructure):
_fields_ = [
('cause', c_char * 256),
('color', _nvmlLedColor_t),
]
| c_nvmlLedState_t |
python | python-poetry__poetry | src/poetry/utils/env/python/exceptions.py | {
"start": 85,
"end": 265
} | class ____(PythonVersionError):
def __init__(self, expected: str) -> None:
super().__init__(f"Could not find the python executable {expected}")
| PythonVersionNotFoundError |
python | pytorch__pytorch | torch/_dynamo/codegen.py | {
"start": 2043,
"end": 29005
} | class ____:
"""
Helper class uses for constructing Python bytecode
"""
def __init__(
self,
tx: "InstructionTranslatorBase",
root: Optional[torch.nn.Module] = None,
graph_output_var: Optional[str] = None,
tempvars: Optional[dict[Union[VariableTracker, Source], Any]] = None,
overridden_sources: Optional[dict[Source, Source]] = None,
) -> None:
self.root = root
self.top_of_stack: Optional[Union[VariableTracker, Source]] = None
self.uses: Counter[Union[VariableTracker, Source]] = collections.Counter()
self.graph_outputs: dict[int, GraphOutputEntry] = {}
self._output: list[Instruction] = []
# This determines which VariableTracker/Source should be stored as
# locals, and maps the VariableTracker/Source to the local variable
# name. Note that it could map to None initially, in which case we'll
# overwrite it to map to real temporary names via `add_cache`.
self.tempvars: dict[Union[VariableTracker, Source], Any] = tempvars or {}
self.tx = tx
self.graph_output_var = graph_output_var
self.code_options = self.tx.output.code_options
self.cell_and_freevars = self.tx.cell_and_freevars
self.new_var = self.tx.output.new_var
self.value_from_source: bool = True
# This serves as a way for codegen to use a different source; we need
# this because sometimes we can't easily modify the original source
# without affecting other components, e.g., guards.
self.overridden_sources: dict[Source, Source] = overridden_sources or {}
def restore_stack(
self, stack_values: list[Any], *, value_from_source: bool = True
) -> None:
prev = self.value_from_source
self.value_from_source &= value_from_source
try:
self.foreach(stack_values)
finally:
self.value_from_source = prev
def graph_output_vars(self) -> list[VariableTracker]:
return [x.variable for x in self.graph_outputs.values()]
def call_reconstruct(
self, value: Union[VariableTracker, Source, "GraphArg"]
) -> None:
res = value.reconstruct(self)
assert res is None, f"reconstruct!=None {value}"
def add_push_null(
self, gen_fn: Callable[[], None], call_function_ex: bool = False
) -> None:
"""
`gen_fn` generates instructions via PyCodegen methods
that push a single callable to the stack.
`add_push_null` pushes a NULL to the stack before or after the
instructions generated by `gen_fn`, depending on Python version.
Will attempt to use the NULL push bit for instructions
with such bits (LOAD_GLOBAL 3.11+, LOAD_ATTR 3.12+, LOAD_SUPER_ATTR).
"""
old_len = len(self._output)
if sys.version_info < (3, 13):
# gen_fn may DUP_TOP instead if TOS is not cleared.
# Will cause problems since NULL will be pushed right
# before the generated instructions in <= 3.12
self.clear_tos()
gen_fn()
# inplace modify self._output
added_insts = self._output[old_len:]
del self._output[old_len:]
if call_function_ex:
self._output.extend(add_push_null_call_function_ex(added_insts))
else:
self._output.extend(add_push_null(added_insts))
if sys.version_info >= (3, 13):
# NULL will be at top of stack
self.clear_tos()
def __call__(
self, value: Union[VariableTracker, Source, None], allow_cache: bool = True
) -> None:
"""
Generate code such that top-of-stack (TOS) is set to value.
`allow_cache` controls the behavior in the following manner. `value` can
either be a VariableTracker or a Source.
If `value` is a `Source`, `allow_cache` must be True (invariant asserted
below). If the source was reconstructed earlier, we will reuse the
generated code by loading from top of stack or tempvars.
If `value` is a `VariableTracker`, we have the following cases:
1) `allow_cache=True`
a) If the value.source is not None, we will emit the code based on
`value.source` to handle aliasing.
b) If value.source is None (example reconstructing a local list
returned by the compiled function), we will reconstruct the variable
tracker (w/o any source) to emit bytecode that generates a new
python object.
In both cases of value.source being None or not, if the value was
reconstructed earlier, we will reuse the generated code by loading from
top of stack or tempvars.
2) `allow_cache=False` - This is a special case (allow_cache defaults to
True).
a) If the value.source is not None, we reconstruct the variable
tracker and emit a new python object. You might wonder what about
aliasing? The place where we use this config also has the followup
code where the original python object is assigned to this new python
value to handle aliasing (check side_effects.py and search for
allow_cache=False).
b) If value.source is None, this is not allowed
Notable effects:
1. `self.top_of_stack` will be set to `value`, if we don't codegen
`value` based on source.
2. `self.uses[value]` will increment, unless (a). we codegen via
`top_of_stack` or cached `tempvars`, or (b). `value` has special VT
types like `NNModuleVariable`, etc.
"""
assert value is not None
if isinstance(value, Source):
# If the source needs to be overridden, use the new one.
source = self.overridden_sources.get(value, value)
assert allow_cache is True, "allow_cache must be True for Source"
if self.top_of_stack is value:
self._output.append(create_dup_top())
return
if self.tempvars.get(source) is not None:
self._output.append(self.create_load(self.tempvars[source]))
self.top_of_stack = source
return
self.uses[source] += 1
try:
self.call_reconstruct(source)
except NotImplementedError:
unimplemented(
gb_type="Reconstruction failure: source.reconstruct not implemented",
context=str(source),
explanation=f"Dynamo has no bytecode reconstruction implemented for {type(source)} variable {source}.",
hints=[*graph_break_hints.DYNAMO_BUG],
)
if source in self.tempvars:
self._output.append(create_dup_top())
self.add_cache(source)
self.top_of_stack = source
return
assert isinstance(value, VariableTracker)
output = self._output
graph_outputs = self.graph_outputs
if allow_cache:
if self.top_of_stack is value:
output.append(create_dup_top())
return
if self.tempvars.get(value) is not None:
output.append(self.create_load(self.tempvars[value]))
self.top_of_stack = value
return
if value.is_realized() and isinstance(
value, ContextlibContextManagerLocalGeneratorObjectVariable
):
raise IncorrectUsage(
"NYI: Returning a @contextmanager object from a torch.compile function"
)
# Dynamo normally prefers codegen from source to account for aliasing.
if (
value.source is not None
and allow_cache
and not (
value.is_realized() and isinstance(value, LocalGeneratorObjectVariable)
)
):
# There's a corner case for export: for instance, if the computation
# graph is just identity on an input tensor, Dynamo would just emit
# a `LOAD_FAST` from the input source, rather than generating an
# identity FX graph.
#
# However, export wants to maximize graph capture; in the case
# above, export _wants to_ obtain an identity FX graph (despite it
# appears unnecessarily expensive for `torch.compile`), so we have
# the following option to override Dynamo's preference for codegen
# from source. Moreover, this option applies recursively, for cases
# like input tensor being returned in a new dictionary.
#
# And why the `ValueMutationExisting` check? Not sure, so leaving it
# to keep the old behavior, as when `value_from_source` was
# introduced. TODO sort out the invariants among side effect,
# codegen and export.
if (
isinstance(value.mutation_type, ValueMutationExisting)
or self.value_from_source
):
return self(value.source)
if value.is_python_constant() and is_safe_constant(value.as_python_constant()):
output.append(self.create_load_const(value.as_python_constant()))
elif isinstance(value, TensorWithTFOverrideVariable):
graph_outputs_key = self.add_graph_output(value)
self.add_push_null(
lambda: self.load_import_from(utils.__name__, "to_subclass")
)
self.load_graph_output(graph_outputs[graph_outputs_key].index)
output.append(
self.create_load_global(
value.global_mangled_class_name(self.tx), # type: ignore[arg-type]
add=True,
)
)
output.extend(create_call_function(2, False))
elif (
isinstance(value, SymNodeVariable)
and value.python_type() is float
and not self.tx.export
):
# This is a little unusual; force the output convention to be a
# Tensor here. Don't do this for export because this is
# apparently load bearing for export tests (but I am a bit
# doubtful it actually works in the real world)
# NB: It works to add_graph_output on a computed expression
# as_tensor here, because we memoize as_tensor calls on
# SymNodeVariable!
graph_outputs_key = self.add_graph_output(
value.as_tensor(self.tx, torch.float64)
)
def gen_fn() -> None:
self.load_graph_output(graph_outputs[graph_outputs_key].index)
output.append(self.create_load_attr("item"))
self.add_push_null(gen_fn)
output.extend(create_call_function(0, False))
elif isinstance(
value,
(
TensorVariable,
SymNodeVariable,
UnspecializedPythonVariable,
NumpyNdarrayVariable,
),
):
graph_outputs_key = self.add_graph_output(value)
if isinstance(value, NumpyNdarrayVariable):
self.add_push_null(
lambda: self.load_import_from(utils.__name__, "to_numpy_helper")
)
self.load_graph_output(graph_outputs[graph_outputs_key].index)
output.extend(create_call_function(1, False))
elif isinstance(value, UnspecializedPythonVariable) and value.need_unwrap:
def gen_fn() -> None:
self.load_graph_output(graph_outputs[graph_outputs_key].index)
output.append(self.create_load_attr("item"))
self.add_push_null(gen_fn)
output.extend(create_call_function(0, False))
else:
self.load_graph_output(graph_outputs[graph_outputs_key].index)
elif isinstance(value, NNModuleVariable):
parts = value.module_key.split(".")
if parts[0] in self.code_options["co_varnames"]:
output.append(self.create_load(parts[0]))
parts = parts[1:]
else:
assert self.root is not None
output.append(self.create_load_const_unchecked(self.root))
for part in parts:
output.append(self.create_load_attr(part))
else:
self.uses[value] += 1
try:
self.call_reconstruct(value)
except NotImplementedError:
unimplemented(
gb_type="Reconstruction failure",
context=str(value),
explanation=f"Dynamo has no bytecode reconstruction implemented for sourceless variable {value}.",
hints=[
"If Dynamo is attempting to trace a return statement and your code is attempting to return a variable "
"that Dynamo cannot reconstruct, then remove it from the return statement.",
*graph_break_hints.CAUSED_BY_EARLIER_GRAPH_BREAK,
"Report an issue to PyTorch if you need reconstrtuction support. Note that objects that don't have "
"reconstruction rules may be fundamentally unreconstructable.",
],
)
if allow_cache and value in self.tempvars:
self._output.append(create_dup_top())
self.add_cache(value)
self.top_of_stack = value
def add_graph_output(self, value: VariableTracker) -> int:
graph_outputs_key = id(value.as_proxy())
if graph_outputs_key not in self.graph_outputs:
self.graph_outputs[graph_outputs_key] = GraphOutputEntry(
len(self.graph_outputs), value
)
return graph_outputs_key
def load_graph_output(self, index: int) -> None:
output = self._output
assert self.graph_output_var is not None
output.append(self.create_load(self.graph_output_var))
output.append(self.create_load_const(index))
output.append(self.create_binary_subscr())
def add_cache(self, value: Union[VariableTracker, Source]) -> None:
var = self.new_var()
self.tempvars[value] = var
self._output.append(self.create_store(var))
def foreach(self, items: Iterable[Union[VariableTracker, Source]]) -> None:
for i in items:
self(i)
def create_binary_subscr(self) -> Instruction:
return create_binary_subscr()
def setup_globally_cached(self, name: str, value: Any) -> list[Instruction]:
"""Store value in a new global"""
name = re.sub(r"[^a-zA-Z0-9_]+", "_", name)
f_globals = self.tx.f_globals
if name in f_globals:
assert id(f_globals[name]) == id(value)
else:
f_globals[name] = value
return [self.create_load_global(name, add=True)]
def clear_tos(self) -> None:
self.top_of_stack = None
def append_output(self, inst: Instruction) -> None:
assert isinstance(inst, Instruction)
self._output.append(inst)
self.clear_tos()
def extend_output(self, insts: list[Instruction]) -> None:
assert all(isinstance(x, Instruction) for x in insts)
self._output.extend(insts)
self.clear_tos()
def get_instructions(self) -> list[Instruction]:
return self._output
def create_load(self, name: str) -> Instruction:
assert name in self.code_options["co_varnames"], f"{name} missing"
return create_instruction("LOAD_FAST", argval=name)
def create_load_closure(self, name: str) -> Instruction:
assert name in self.cell_and_freevars()
inst_name = "LOAD_FAST" if sys.version_info >= (3, 13) else "LOAD_CLOSURE"
return create_instruction(inst_name, argval=name)
def create_load_deref(self, name: str) -> Instruction:
assert name in self.cell_and_freevars()
return create_instruction("LOAD_DEREF", argval=name)
def create_store(self, name: str) -> Instruction:
assert name in self.code_options["co_varnames"], f"{name} missing"
return create_instruction("STORE_FAST", argval=name)
def create_store_deref(self, name: str) -> Instruction:
assert name in self.cell_and_freevars()
return create_instruction("STORE_DEREF", argval=name)
def create_load_global(self, name: str, add: bool = False) -> Instruction:
if add:
self.tx.output.update_co_names(name)
assert name in self.code_options["co_names"], f"{name} not in co_names"
return create_instruction("LOAD_GLOBAL", argval=name)
def create_load_const(self, value: Any) -> Instruction:
return create_load_const(value)
def create_load_const_unchecked(self, value: Any) -> Instruction:
return create_load_const(value, checked=False)
def load_method(self, name: str) -> None:
self.tx.output.update_co_names(name)
self.append_output(create_load_method(name))
def call_method(self, nargs: int) -> None:
self.extend_output(create_call_method(nargs))
def create_load_attr(self, name: str) -> Instruction:
if name not in self.code_options["co_names"]:
self.code_options["co_names"] += (name,)
return create_instruction("LOAD_ATTR", argval=name)
def load_attr(self, name: str) -> None:
self.append_output(self.create_load_attr(name))
def create_load_attrs(self, names: str) -> list[Instruction]:
return [self.create_load_attr(name) for name in names.split(".")]
def create_store_attr(self, name: str) -> Instruction:
if name not in self.code_options["co_names"]:
self.code_options["co_names"] += (name,)
return create_instruction("STORE_ATTR", argval=name)
def store_attr(self, name: str) -> None:
self.append_output(self.create_store_attr(name))
def load_function_name(
self, fn_name: str, push_null: bool, num_on_stack: int = 0
) -> list[Instruction]:
"""Load the global fn_name on the stack num_on_stack down"""
output = []
if push_null and sys.version_info >= (3, 11):
output.extend(add_push_null(self.create_load_global(fn_name, add=True)))
if num_on_stack > 0:
output.extend(
[
*self.rot_n(num_on_stack + 2),
*self.rot_n(num_on_stack + 2),
]
)
else:
output.extend(
[
self.create_load_global(fn_name, add=True),
*self.rot_n(num_on_stack + 1),
]
)
return output
def rot_n(self, n: int) -> list[Instruction]:
try:
return create_rot_n(n)
except AttributeError:
# desired rotate bytecode doesn't exist, generate equivalent bytecode
return [
create_build_tuple(n),
self.create_load_const_unchecked(rot_n_helper(n)),
*create_rot_n(2),
*create_call_function_ex(False, False),
create_instruction("UNPACK_SEQUENCE", arg=n),
]
def pop_top(self) -> None:
self.append_output(create_instruction("POP_TOP"))
def call_function(self, nargs: int, push_null: bool) -> None:
self.extend_output(create_call_function(nargs, push_null=push_null))
def dup_top(self) -> None:
self.append_output(create_dup_top())
def store(self, varname: str) -> None:
self.append_output(self.create_store(varname))
def load_deref(self, varname: str) -> None:
self.append_output(self.create_load_deref(varname))
def make_function_with_closure(
self,
fn_name: str,
code: types.CodeType,
) -> None:
"""Creates a closure with code object `code`.
Expects the TOS to be the tuple of cells to use for this closure.
TOS will be popped to create the closure.
Args:
- fn_name: name of the function
- code: code object of the function
(does not include the tuple of cells on the TOS)
"""
output = self._output
output.append(self.create_load_const(code))
if sys.version_info < (3, 11):
output.append(self.create_load_const(fn_name))
if sys.version_info >= (3, 13):
output.extend(
[
create_instruction("MAKE_FUNCTION"),
create_instruction("SET_FUNCTION_ATTRIBUTE", arg=0x08),
]
)
else:
output.append(create_instruction("MAKE_FUNCTION", arg=0x08))
self.clear_tos()
def create_load_python_module(self, mod: types.ModuleType) -> Instruction:
"""
Generate a LOAD_GLOBAL instruction to fetch a given python module.
"""
output = self.tx.output
global_scope = output.global_scope
name = re.sub(r"^.*[.]", "", mod.__name__)
if global_scope.get(name, None) is mod:
return self.create_load_global(name, add=True)
prefix = f"___module_{name}"
global_name = self.tx.output.install_global_by_id(prefix, mod)
return self.create_load_global(global_name, add=True)
def mark_source_temp(self, source: Source) -> None:
"""
Mark a source as a temp variable, so that it can be reused.
"""
if source not in self.tempvars:
self.tempvars[source] = None
def make_call_generated_code(self, fn_name: str) -> None:
"""Call the generated code function stored in fn_name"""
self.extend_output(self.load_function_name(fn_name, True))
graphargs = self.tx.output.graphargs
def extract_nested_sources(source: Source) -> list[Source]:
nested_sources: list[Source] = []
if isinstance(source, ChainedSource):
nested_sources.append(source.base)
if isinstance(source, DictGetItemSource) and isinstance(
source.index, Source
):
nested_sources.append(source.index)
return nested_sources
def collect_temp_sources(sources: deque[Source], codegen: PyCodegen) -> None:
seen_sources: OrderedSet[Source] = OrderedSet()
while sources:
current_source = sources.popleft()
if current_source in seen_sources:
# This source is used at least twice, so it can be reused
codegen.mark_source_temp(current_source)
# Dont trace source further. This prevents us from marking too
# many nodes as temp sources.
continue
seen_sources.add(current_source)
sources.extend(extract_nested_sources(current_source))
# Collect all the sources that are used more than once, so that we can
# generate tmp variables in the generated pre-graph bytecode. This
# essentially implements CSE.
collect_temp_sources(
deque([arg.source for arg in graphargs if arg.source is not None]), self
)
cm_var = None
if config.record_runtime_overhead:
# Record the pregraph bytecode start
self.add_push_null(
lambda: self.load_import_from(
utils.__name__, "record_pregraph_bytecode_enter"
)
)
self.extend_output(create_call_function(0, False))
cm_var = self.new_var()
self.store(cm_var)
for arg in graphargs:
if arg.pass_arg_as_tensor:
self.add_push_null(
lambda: self.extend_output(
[
self.create_load_python_module(torch),
self.create_load_attr("_as_tensor_fullprec"),
]
)
)
self.call_reconstruct(arg)
self.extend_output(create_call_function(1, False))
else:
self.call_reconstruct(arg)
if config.record_runtime_overhead:
# Record the pregraph bytecode end
self.add_push_null(
lambda: self.load_import_from(
utils.__name__, "record_pregraph_bytecode_exit"
)
)
assert cm_var is not None
self.extend_output([self.create_load(cm_var)])
self.extend_output(create_call_function(1, False))
self.pop_top()
self.extend_output(create_call_function(len(graphargs), False))
def create_import_name(self, module_name: str) -> Instruction:
return create_instruction("IMPORT_NAME", argval=module_name)
def load_import_from(self, module_name: str, object_name: str) -> None:
source = AttrSource(self.tx.import_source(module_name), object_name)
# Note: This approach is somewhat aggressive because typically, a source is marked
# as a tempvar only when it is used more than once. In this case, we're marking it
# as a tempvar without performing that analysis. However, this is a simple solution,
# and in many cases, load imports are reused multiple times.
self.mark_source_temp(source)
self(source)
def create_call_function_kw(
self, nargs: int, kw_names: Iterable[str], push_null: bool
) -> list[Instruction]:
if sys.version_info >= (3, 13):
output = create_call_function(nargs, push_null)
assert output[-1].opname == "CALL"
output.insert(-1, self.create_load_const(kw_names))
output[-1] = create_instruction("CALL_KW", arg=nargs)
return output
elif sys.version_info >= (3, 11):
output = create_call_function(nargs, push_null)
if sys.version_info >= (3, 12):
idx = -1
expected_inst = "CALL"
else:
idx = -2
expected_inst = "PRECALL"
assert output[idx].opname == expected_inst
kw_names_inst = create_instruction("KW_NAMES", argval=kw_names)
output.insert(idx, kw_names_inst)
return output
return [
self.create_load_const(kw_names),
create_instruction("CALL_FUNCTION_KW", arg=nargs),
]
def create_delete(self, value: object) -> Instruction:
return create_instruction("DELETE_FAST", argval=value)
| PyCodegen |
python | django__django | tests/lookup/test_timefield.py | {
"start": 62,
"end": 844
} | class ____(TestCase):
@classmethod
def setUpTestData(self):
# Create a few Alarms
self.al1 = Alarm.objects.create(desc="Early", time="05:30")
self.al2 = Alarm.objects.create(desc="Late", time="10:00")
self.al3 = Alarm.objects.create(desc="Precise", time="12:34:56")
def test_hour_lookups(self):
self.assertSequenceEqual(
Alarm.objects.filter(time__hour=5),
[self.al1],
)
def test_minute_lookups(self):
self.assertSequenceEqual(
Alarm.objects.filter(time__minute=30),
[self.al1],
)
def test_second_lookups(self):
self.assertSequenceEqual(
Alarm.objects.filter(time__second=56),
[self.al3],
)
| TimeFieldLookupTests |
python | ray-project__ray | python/ray/llm/tests/common/cloud/test_pyarrow_filesystem.py | {
"start": 7225,
"end": 12994
} | class ____:
"""Tests for the _filter_files method in PyArrowFileSystem."""
def test_filter_files_no_filters(self):
"""Test filtering files with no inclusion or exclusion filters."""
# Setup mock filesystem
mock_fs = MagicMock()
# Create mock file infos
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/model/file1.txt"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/model/subdir/file2.json"
dir_info = MagicMock()
dir_info.type = pa_fs.FileType.Directory
dir_info.path = "bucket/model/subdir"
mock_fs.get_file_info.return_value = [file_info1, file_info2, dir_info]
# Test filtering with no filters
result = PyArrowFileSystem._filter_files(
fs=mock_fs, source_path="bucket/model", destination_path="/local/dest"
)
# Should include all files, exclude directories
expected = [
("bucket/model/file1.txt", "/local/dest/file1.txt"),
("bucket/model/subdir/file2.json", "/local/dest/subdir/file2.json"),
]
assert sorted(result) == sorted(expected)
# Verify filesystem was called correctly
mock_fs.get_file_info.assert_called_once()
call_args = mock_fs.get_file_info.call_args[0][0]
assert call_args.base_dir == "bucket/model"
assert call_args.recursive is True
def test_filter_files_with_inclusion_substrings(self):
"""Test filtering files with inclusion substrings."""
# Setup mock filesystem
mock_fs = MagicMock()
# Create mock file infos
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/model/config.json"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/model/weights.bin"
file_info3 = MagicMock()
file_info3.type = pa_fs.FileType.File
file_info3.path = "bucket/model/tokenizer.json"
mock_fs.get_file_info.return_value = [file_info1, file_info2, file_info3]
# Test filtering with inclusion substrings
result = PyArrowFileSystem._filter_files(
fs=mock_fs,
source_path="bucket/model",
destination_path="/local/dest",
substrings_to_include=["config", "tokenizer"],
)
# Should only include files with "config" or "tokenizer" in path
expected = [
("bucket/model/config.json", "/local/dest/config.json"),
("bucket/model/tokenizer.json", "/local/dest/tokenizer.json"),
]
assert sorted(result) == sorted(expected)
def test_filter_files_with_exclusion_suffixes(self):
"""Test filtering files with exclusion suffixes."""
# Setup mock filesystem
mock_fs = MagicMock()
# Create mock file infos
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/model/model.bin"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/model/config.json"
file_info3 = MagicMock()
file_info3.type = pa_fs.FileType.File
file_info3.path = "bucket/model/temp.tmp"
file_info4 = MagicMock()
file_info4.type = pa_fs.FileType.File
file_info4.path = "bucket/model/log.txt"
mock_fs.get_file_info.return_value = [
file_info1,
file_info2,
file_info3,
file_info4,
]
# Test filtering with exclusion suffixes
result = PyArrowFileSystem._filter_files(
fs=mock_fs,
source_path="bucket/model",
destination_path="/local/dest",
suffixes_to_exclude=[".tmp", ".txt"],
)
# Should exclude files ending with .tmp or .txt
expected = [
("bucket/model/model.bin", "/local/dest/model.bin"),
("bucket/model/config.json", "/local/dest/config.json"),
]
assert sorted(result) == sorted(expected)
def test_filter_files_with_both_filters(self):
"""Test filtering files with both inclusion and exclusion filters."""
# Setup mock filesystem
mock_fs = MagicMock()
# Create mock file infos
file_info1 = MagicMock()
file_info1.type = pa_fs.FileType.File
file_info1.path = "bucket/model/config.json"
file_info2 = MagicMock()
file_info2.type = pa_fs.FileType.File
file_info2.path = "bucket/model/config.tmp"
file_info3 = MagicMock()
file_info3.type = pa_fs.FileType.File
file_info3.path = "bucket/model/weights.bin"
file_info4 = MagicMock()
file_info4.type = pa_fs.FileType.File
file_info4.path = "bucket/model/tokenizer.json"
mock_fs.get_file_info.return_value = [
file_info1,
file_info2,
file_info3,
file_info4,
]
# Test filtering with both inclusion and exclusion
result = PyArrowFileSystem._filter_files(
fs=mock_fs,
source_path="bucket/model",
destination_path="/local/dest",
substrings_to_include=["config", "tokenizer"],
suffixes_to_exclude=[".tmp"],
)
# Should include files with "config" or "tokenizer" but exclude .tmp files
expected = [
("bucket/model/config.json", "/local/dest/config.json"),
("bucket/model/tokenizer.json", "/local/dest/tokenizer.json"),
]
assert sorted(result) == sorted(expected)
| TestFilterFiles |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_capacity_request_policy.py | {
"start": 383,
"end": 6796
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'default': 'str',
'valid_range': 'V1beta2CapacityRequestPolicyRange',
'valid_values': 'list[str]'
}
attribute_map = {
'default': 'default',
'valid_range': 'validRange',
'valid_values': 'validValues'
}
def __init__(self, default=None, valid_range=None, valid_values=None, local_vars_configuration=None): # noqa: E501
"""V1beta2CapacityRequestPolicy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._default = None
self._valid_range = None
self._valid_values = None
self.discriminator = None
if default is not None:
self.default = default
if valid_range is not None:
self.valid_range = valid_range
if valid_values is not None:
self.valid_values = valid_values
@property
def default(self):
"""Gets the default of this V1beta2CapacityRequestPolicy. # noqa: E501
Default specifies how much of this capacity is consumed by a request that does not contain an entry for it in DeviceRequest's Capacity. # noqa: E501
:return: The default of this V1beta2CapacityRequestPolicy. # noqa: E501
:rtype: str
"""
return self._default
@default.setter
def default(self, default):
"""Sets the default of this V1beta2CapacityRequestPolicy.
Default specifies how much of this capacity is consumed by a request that does not contain an entry for it in DeviceRequest's Capacity. # noqa: E501
:param default: The default of this V1beta2CapacityRequestPolicy. # noqa: E501
:type: str
"""
self._default = default
@property
def valid_range(self):
"""Gets the valid_range of this V1beta2CapacityRequestPolicy. # noqa: E501
:return: The valid_range of this V1beta2CapacityRequestPolicy. # noqa: E501
:rtype: V1beta2CapacityRequestPolicyRange
"""
return self._valid_range
@valid_range.setter
def valid_range(self, valid_range):
"""Sets the valid_range of this V1beta2CapacityRequestPolicy.
:param valid_range: The valid_range of this V1beta2CapacityRequestPolicy. # noqa: E501
:type: V1beta2CapacityRequestPolicyRange
"""
self._valid_range = valid_range
@property
def valid_values(self):
"""Gets the valid_values of this V1beta2CapacityRequestPolicy. # noqa: E501
ValidValues defines a set of acceptable quantity values in consuming requests. Must not contain more than 10 entries. Must be sorted in ascending order. If this field is set, Default must be defined and it must be included in ValidValues list. If the requested amount does not match any valid value but smaller than some valid values, the scheduler calculates the smallest valid value that is greater than or equal to the request. That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues). If the requested amount exceeds all valid values, the request violates the policy, and this device cannot be allocated. # noqa: E501
:return: The valid_values of this V1beta2CapacityRequestPolicy. # noqa: E501
:rtype: list[str]
"""
return self._valid_values
@valid_values.setter
def valid_values(self, valid_values):
"""Sets the valid_values of this V1beta2CapacityRequestPolicy.
ValidValues defines a set of acceptable quantity values in consuming requests. Must not contain more than 10 entries. Must be sorted in ascending order. If this field is set, Default must be defined and it must be included in ValidValues list. If the requested amount does not match any valid value but smaller than some valid values, the scheduler calculates the smallest valid value that is greater than or equal to the request. That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues). If the requested amount exceeds all valid values, the request violates the policy, and this device cannot be allocated. # noqa: E501
:param valid_values: The valid_values of this V1beta2CapacityRequestPolicy. # noqa: E501
:type: list[str]
"""
self._valid_values = valid_values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2CapacityRequestPolicy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2CapacityRequestPolicy):
return True
return self.to_dict() != other.to_dict()
| V1beta2CapacityRequestPolicy |
python | getsentry__sentry | tests/sentry/manager/test_external_issue_manager.py | {
"start": 332,
"end": 4772
} | class ____(TestCase):
def setUp(self) -> None:
self.project = self.create_project(organization=self.organization)
self.integration1 = self.create_integration(
organization=self.organization, external_id="example:1", provider="example"
)
self.api_integration1 = serialize_integration(self.integration1)
self.integration2 = self.create_integration(
organization=self.organization, external_id="example:2", provider="example"
)
self.api_integration2 = serialize_integration(self.integration2)
self.integration3 = self.create_integration(
organization=self.create_organization(), external_id="example:3", provider="example"
)
self.api_integration3 = serialize_integration(self.integration3)
self.event1 = self.store_event(
data={"event_id": "a" * 32, "message": "ooop"},
project_id=self.project.id,
)
self.event2 = self.store_event(
data={"event_id": "b" * 32, "message": "boop"},
project_id=self.project.id,
)
assert self.event1.group is not None
self.group_event1 = GroupEvent.from_event(self.event1, self.event1.group)
assert self.event2.group is not None
self.group_event2 = GroupEvent.from_event(self.event2, self.event2.group)
self.external_issue1 = self.create_integration_external_issue(
group=self.event1.group, integration=self.integration1, key="ABC-123"
)
self.external_issue2 = self.create_integration_external_issue(
group=self.event2.group, integration=self.integration1, key="DEF-456"
)
self.external_issue3 = self.create_integration_external_issue(
group=self.event1.group, integration=self.integration2, key="GHI-789"
)
def test_get_for_integration(self) -> None:
# Base case
result = ExternalIssue.objects.get_for_integration(integration=self.api_integration1)
assert len(result) == 2
for ei in [self.external_issue1, self.external_issue2]:
assert ei in result
# Empty case
result = ExternalIssue.objects.get_for_integration(integration=self.api_integration3)
assert len(result) == 0
# Key provided case
result = ExternalIssue.objects.get_for_integration(
integration=self.api_integration1, external_issue_key=self.external_issue2.key
)
assert len(result) == 1
assert self.external_issue2 in result
def test_get_linked_issues(self) -> None:
# Base case
result = ExternalIssue.objects.get_linked_issues(
event=self.group_event1, integration=self.api_integration1
)
assert len(result) == 1
assert self.external_issue1 in result
external_issue4 = self.create_integration_external_issue(
group=self.event1.group, integration=self.integration1, key="JKL-000"
)
result = ExternalIssue.objects.get_linked_issues(
event=self.group_event1, integration=self.api_integration1
)
assert len(result) == 2
for ei in [self.external_issue1, external_issue4]:
assert ei in result
# Empty case
result = ExternalIssue.objects.get_linked_issues(
event=self.group_event2, integration=self.api_integration2
)
assert len(result) == 0
def test_has_linked_issue(self) -> None:
# Base case
result = ExternalIssue.objects.has_linked_issue(
event=self.group_event1, integration=self.api_integration1
)
assert result
event = self.store_event(
data={"event_id": "a" * 32, "message": "new event"},
project_id=self.project.id,
)
assert event.group is not None
group_event = GroupEvent.from_event(event, event.group)
# Empty case
result = ExternalIssue.objects.has_linked_issue(
event=group_event, integration=self.api_integration1
)
assert not result
# Update case
self.create_integration_external_issue(
group=event.group, integration=self.integration1, key="JKL-000"
)
result = ExternalIssue.objects.has_linked_issue(
event=group_event, integration=self.api_integration1
)
assert result
| ExternalIssueManagerTest |
python | sqlalchemy__sqlalchemy | test/orm/test_relationship_criteria.py | {
"start": 6816,
"end": 55376
} | class ____(_Fixtures, testing.AssertsCompiledSQL):
"""
combinations:
with_loader_criteria
# for these we have mapper_criteria
select(mapper) # select_mapper
select(mapper.col, mapper.col) # select_mapper_col
select(func.count()).select_from(mapper) # select_from_mapper
select(a).join(mapper, a.target) # select_join_mapper
select(a).options(joinedload(a.target)) # select_joinedload_mapper
# for these we have aliased_criteria, inclaliased_criteria
select(aliased) # select_aliased
select(aliased.col, aliased.col) # select_aliased_col
select(func.count()).select_from(aliased) # select_from_aliased
select(a).join(aliased, a.target) # select_join_aliased
select(a).options(joinedload(a.target.of_type(aliased))
# select_joinedload_aliased
"""
__dialect__ = "default"
def test_select_mapper_mapper_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = select(User).options(
with_loader_criteria(User, User.name != "name")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name "
"FROM users WHERE users.name != :name_1",
)
def test_err_given_in_pathed(self, user_address_fixture):
User, Address = user_address_fixture
with expect_raises_message(
sa_exc.ArgumentError,
r"Loader option <.*LoaderCriteriaOption.*> is not compatible "
r"with the Load.options\(\) method.",
):
select(User).options(
selectinload(User.addresses).options(
with_loader_criteria(
Address, Address.email_address != "foo"
)
)
)
def test_criteria_post_replace(self, user_address_fixture):
User, Address = user_address_fixture
stmt = (
select(User)
.select_from(User)
.options(with_loader_criteria(User, User.name != "name"))
.with_only_columns(func.count())
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users "
"WHERE users.name != :name_1",
)
@testing.combinations(
(
lambda User, Address: select(Address)
.select_from(User)
.join(User.addresses)
.options(with_loader_criteria(User, User.name != "name")),
),
(
# issue #10365
lambda User, Address: select(Address)
.select_from(User)
.join(Address, User.id == Address.user_id)
.options(with_loader_criteria(User, User.name != "name")),
),
(
lambda User, Address: select(Address)
.select_from(orm_join(User, Address, User.addresses))
.options(with_loader_criteria(User, User.name != "name")),
),
(
lambda User, Address: select(Address)
.join_from(User, Address, User.addresses)
.options(with_loader_criteria(User, User.name != "name")),
),
argnames="stmt_fn",
)
@testing.combinations(True, False, argnames="alias_user")
def test_criteria_select_from_w_join_left(
self, user_address_fixture, stmt_fn, alias_user
):
"""test #8721"""
User, Address = user_address_fixture
if alias_user:
User = aliased(User)
stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address)
if alias_user:
self.assert_compile(
stmt,
"SELECT addresses.id, addresses.user_id, "
"addresses.email_address FROM users AS users_1 "
"JOIN addresses ON users_1.id = addresses.user_id "
"WHERE users_1.name != :name_1",
)
else:
self.assert_compile(
stmt,
"SELECT addresses.id, addresses.user_id, "
"addresses.email_address "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"WHERE users.name != :name_1",
)
@testing.combinations(
(
lambda User, Address: select(Address.id, User.id)
.select_from(User)
.join(User.addresses)
.options(with_loader_criteria(User, User.name != "name")),
),
(
# issue #10365 - this seems to have already worked
lambda User, Address: select(Address.id, User.id)
.select_from(User)
.join(Address, User.id == Address.user_id)
.options(with_loader_criteria(User, User.name != "name")),
),
(
lambda User, Address: select(Address.id, User.id)
.select_from(orm_join(User, Address, User.addresses))
.options(with_loader_criteria(User, User.name != "name")),
),
(
lambda User, Address: select(Address.id, User.id)
.join_from(User, Address, User.addresses)
.options(with_loader_criteria(User, User.name != "name")),
),
argnames="stmt_fn",
)
@testing.combinations(True, False, argnames="alias_user")
def test_criteria_select_from_w_join_left_including_entity(
self, user_address_fixture, stmt_fn, alias_user
):
"""test #8721"""
User, Address = user_address_fixture
if alias_user:
User = aliased(User)
stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address)
if alias_user:
self.assert_compile(
stmt,
"SELECT addresses.id, users_1.id AS id_1 "
"FROM users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id "
"WHERE users_1.name != :name_1",
)
else:
self.assert_compile(
stmt,
"SELECT addresses.id, users.id AS id_1 "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"WHERE users.name != :name_1",
)
@testing.combinations(
(
lambda User, Address: select(Address)
.select_from(User)
.join(User.addresses)
.options(
with_loader_criteria(Address, Address.email_address != "email")
),
),
(
# issue #10365
lambda User, Address: select(Address)
.select_from(User)
.join(Address, User.id == Address.user_id)
.options(
with_loader_criteria(Address, Address.email_address != "email")
),
),
(
# for orm_join(), this is set up before we have the context
# available that allows with_loader_criteria to be set up
# correctly
lambda User, Address: select(Address)
.select_from(orm_join(User, Address, User.addresses))
.options(
with_loader_criteria(Address, Address.email_address != "email")
),
testing.fails("not implemented right now"),
),
(
lambda User, Address: select(Address)
.join_from(User, Address, User.addresses)
.options(
with_loader_criteria(Address, Address.email_address != "email")
),
),
argnames="stmt_fn",
)
def test_criteria_select_from_w_join_right(
self, user_address_fixture, stmt_fn
):
"""test #8721"""
User, Address = user_address_fixture
stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address)
self.assert_compile(
stmt,
"SELECT addresses.id, addresses.user_id, addresses.email_address "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"AND addresses.email_address != :email_address_1",
)
@testing.combinations(
"select",
"joined",
"subquery",
"selectin",
"immediate",
argnames="loader_strategy",
)
def test_loader_strategy_on_refresh(
self, loader_strategy, user_address_custom_strat_fixture
):
User, Address = user_address_custom_strat_fixture(loader_strategy)
sess = fixture_session()
@event.listens_for(sess, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(
Address,
~Address.id.in_([5, 3]),
)
)
u1 = sess.get(User, 7)
u2 = sess.get(User, 8)
eq_(u1.addresses, [Address(id=1)])
eq_(u2.addresses, [Address(id=2), Address(id=4)])
for i in range(3):
sess.expire_all()
eq_(u1.addresses, [Address(id=1)])
eq_(u2.addresses, [Address(id=2), Address(id=4)])
def test_criteria_post_replace_legacy(self, user_address_fixture):
User, Address = user_address_fixture
s = fixture_session()
stmt = (
s.query(User)
.select_from(User)
.options(with_loader_criteria(User, User.name != "name"))
.with_entities(func.count())
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users "
"WHERE users.name != :name_1",
)
def test_criteria_applies_to_column_property(
self, user_address_col_property_fixture
):
"""test related to #8064, added after discussion #9091 which
requested this behavior for with_loader_criteria() where it was
found to be working as of this issue, just not tested"""
User, Address = user_address_col_property_fixture
stmt = select(User)
self.assert_compile(
stmt,
"SELECT (SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE addresses.user_id = users.id) AS anon_1, "
"users.id, users.name FROM users",
)
stmt = select(User).options(
with_loader_criteria(
Address, Address.email_address != "email_address"
)
)
self.assert_compile(
stmt,
"SELECT (SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE addresses.user_id = users.id AND "
"addresses.email_address != :email_address_1) AS anon_1, "
"users.id, users.name FROM users",
)
def test_select_from_mapper_mapper_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = (
select(sql.func.count())
.select_from(User)
.options(with_loader_criteria(User, User.name != "name"))
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users "
"WHERE users.name != :name_1",
)
def test_with_loader_criteria_recursion_check_scalar_subq(
self, user_address_fixture
):
"""test #7491"""
User, Address = user_address_fixture
subq = select(Address).where(Address.id == 8).scalar_subquery()
stmt = (
select(User)
.join(Address)
.options(with_loader_criteria(Address, Address.id == subq))
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id AND addresses.id = "
"(SELECT addresses.id, addresses.user_id, "
"addresses.email_address FROM addresses "
"WHERE addresses.id = :id_1)",
)
def test_with_loader_criteria_recursion_check_from_subq(
self, user_address_fixture
):
"""test #7491"""
User, Address = user_address_fixture
subq = select(Address).where(Address.id == 8).subquery()
stmt = (
select(User)
.join(Address)
.options(with_loader_criteria(Address, Address.id == subq.c.id))
)
# note this query is incorrect SQL right now. This is a current
# artifact of how with_loader_criteria() is used and may be considered
# a bug at some point, in which case if fixed this query can be
# changed. the main thing we are testing at the moment is that
# there is not a recursion overflow.
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id AND addresses.id = anon_1.id",
)
def test_select_mapper_columns_mapper_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = select(User.id, User.name).options(
with_loader_criteria(User, User.name != "name")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name "
"FROM users WHERE users.name != :name_1",
)
@testing.variation("style", ["direct_union", "from_statement"])
@testing.variation("add_nested_union", [True, False])
def test_select_mapper_columns_w_union_mapper_criteria(
self, multi_mixin_fixture, style: testing.Variation, add_nested_union
):
"""test #9635"""
HasFoob, Order, Item = multi_mixin_fixture
stmt = (
select(Order.id, Order.description)
.where(Order.id > 8)
.union(select(Order.id, Order.description).where(Order.id <= 8))
)
if add_nested_union:
stmt = union(
stmt,
union(
select(Item.id, Item.description).where(Item.id <= 8),
select(Item.id, Item.description).where(Item.id > 8),
),
)
if style.direct_union:
stmt = stmt.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description != "name",
include_aliases=True,
)
)
elif style.from_statement:
stmt = (
select(Order.id, Order.description)
.from_statement(stmt)
.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description != "name",
include_aliases=True,
)
)
)
else:
style.fail()
if add_nested_union:
# the criteria is embedded into all UNIONS regardless of nesting.
self.assert_compile(
stmt,
"(SELECT orders.id, orders.description FROM orders WHERE "
"orders.id > :id_1 AND orders.description != :description_1 "
"UNION SELECT orders.id, orders.description FROM orders WHERE "
"orders.id <= :id_2 AND orders.description != :description_2) "
"UNION (SELECT items.id, items.description FROM items WHERE "
"items.id <= :id_3 AND items.description != :description_3 "
"UNION SELECT items.id, items.description FROM items WHERE "
"items.id > :id_4 AND items.description != :description_4)",
checkparams={
"id_1": 8,
"description_1": "name",
"id_2": 8,
"description_2": "name",
"id_3": 8,
"description_3": "name",
"id_4": 8,
"description_4": "name",
},
)
else:
self.assert_compile(
stmt,
"SELECT orders.id, orders.description FROM orders WHERE "
"orders.id > :id_1 AND orders.description != :description_1 "
"UNION SELECT orders.id, orders.description FROM orders WHERE "
"orders.id <= :id_2 AND orders.description != :description_2",
checkparams={
"description_1": "name",
"description_2": "name",
"id_1": 8,
"id_2": 8,
},
)
def test_select_mapper_columns_w_core_dml_mapper_criteria(
self, multi_mixin_fixture
):
"""test #9635"""
HasFoob, Order, Item = multi_mixin_fixture
stmt = (
insert(Order)
.from_select(
["id", "description"],
select(Order.id, Order.description).where(Order.id > 8),
)
.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description != "name",
include_aliases=True,
)
)
)
self.assert_compile(
stmt,
"INSERT INTO orders (id, description) SELECT orders.id, "
"orders.description FROM orders WHERE orders.id > :id_1 "
"AND orders.description != :description_1",
checkparams={"description_1": "name", "id_1": 8},
)
@testing.variation("update_is_orm", [True, False])
def test_select_mapper_columns_w_core_cte_update_mapper_criteria(
self, multi_mixin_fixture, update_is_orm
):
"""test #9635"""
HasFoob, Order, Item = multi_mixin_fixture
cte = select(Order).cte("pd")
if update_is_orm:
stmt = (
update(Order)
.where(Order.id == cte.c.id)
.values(description="newname")
)
else:
stmt = (
update(Order.__table__)
.where(Order.__table__.c.id == cte.c.id)
.values(description="newname")
)
stmt = stmt.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description != "name",
include_aliases=True,
)
)
if update_is_orm:
self.assert_compile(
stmt,
"WITH pd AS (SELECT orders.id AS id, "
"orders.user_id AS user_id, "
"orders.address_id AS address_id, "
"orders.description AS description, orders.isopen AS isopen "
"FROM orders WHERE orders.description != %(description_1)s) "
"UPDATE orders SET description=%(description)s "
"FROM pd WHERE orders.id = pd.id "
"AND orders.description != %(description_2)s",
dialect="postgresql",
checkparams={
"description": "newname",
"description_1": "name",
"description_2": "name",
},
)
else:
# non ORM update, no criteria, but criteria still gets rendered
# inside the SELECT
self.assert_compile(
stmt,
"WITH pd AS (SELECT orders.id AS id, "
"orders.user_id AS user_id, "
"orders.address_id AS address_id, "
"orders.description AS description, orders.isopen AS isopen "
"FROM orders WHERE orders.description != %(description_1)s) "
"UPDATE orders SET description=%(description)s "
"FROM pd WHERE orders.id = pd.id",
dialect="postgresql",
checkparams={
"description": "newname",
"description_1": "name",
},
)
@testing.variation("delete_is_orm", [True, False])
def test_select_mapper_columns_w_core_cte_delete_mapper_criteria(
self, multi_mixin_fixture, delete_is_orm
):
"""test #9635"""
HasFoob, Order, Item = multi_mixin_fixture
cte = select(Order).cte("pd")
if delete_is_orm:
stmt = delete(Order).where(Order.id == cte.c.id)
else:
stmt = delete(Order.__table__).where(
Order.__table__.c.id == cte.c.id
)
stmt = stmt.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description != "name",
include_aliases=True,
)
)
if delete_is_orm:
self.assert_compile(
stmt,
"WITH pd AS (SELECT orders.id AS id, orders.user_id AS "
"user_id, orders.address_id AS address_id, "
"orders.description AS description, orders.isopen AS isopen "
"FROM orders WHERE orders.description != %(description_1)s) "
"DELETE FROM orders USING pd WHERE orders.id = pd.id "
"AND orders.description != %(description_2)s",
dialect="postgresql",
checkparams={"description_1": "name", "description_2": "name"},
)
else:
# non ORM update, no criteria, but criteria still gets rendered
# inside the SELECT
self.assert_compile(
stmt,
"WITH pd AS (SELECT orders.id AS id, orders.user_id AS "
"user_id, orders.address_id AS address_id, "
"orders.description AS description, orders.isopen AS isopen "
"FROM orders WHERE orders.description != %(description_1)s) "
"DELETE FROM orders USING pd WHERE orders.id = pd.id",
dialect="postgresql",
checkparams={"description_1": "name"},
)
def test_select_join_mapper_mapper_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = (
select(User)
.join(User.addresses)
.options(
with_loader_criteria(Address, Address.email_address != "name")
)
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id "
"AND addresses.email_address != :email_address_1",
)
def test_select_implicit_join_mapper_mapper_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
stmt = (
select(User)
.join(Address)
.options(
with_loader_criteria(Address, Address.email_address != "name")
)
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id "
"AND addresses.email_address != :email_address_1",
)
def test_select_joinm2m_mapper_mapper_criteria(self, order_item_fixture):
Order, Item = order_item_fixture
stmt = (
select(Order)
.join(Order.items)
.options(
with_loader_criteria(Item, Item.description != "description")
)
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen FROM orders "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"AND items.description != :description_1",
)
def test_select_joinedload_mapper_mapper_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
stmt = select(User).options(
joinedload(User.addresses),
with_loader_criteria(Address, Address.email_address != "name"),
)
self.assert_compile(
stmt,
"SELECT users.id, users.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address "
"FROM users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"AND addresses_1.email_address != :email_address_1 "
"ORDER BY addresses_1.id",
)
def test_select_selectinload_mapper_mapper_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
stmt = select(User).options(
selectinload(User.addresses),
with_loader_criteria(Address, Address.email_address != "name"),
)
s = Session(testing.db, future=True)
with self.sql_execution_asserter() as asserter:
s.execute(stmt).all()
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users",
[],
),
CompiledSQL(
"SELECT addresses.user_id, addresses.id, "
"addresses.email_address "
"FROM addresses "
"WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"primary_keys": [7, 8, 9, 10], "email_address_1": "name"}],
),
)
def test_select_selectinload_mapper_mapper_closure_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
def get_statement(closure="name"):
stmt = select(User).options(
selectinload(User.addresses),
with_loader_criteria(
Address, lambda cls: cls.email_address != closure
),
)
return stmt
s = Session(testing.db, future=True)
stmt = get_statement(closure="name")
with self.sql_execution_asserter() as asserter:
s.execute(stmt).all()
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users",
[],
),
CompiledSQL(
"SELECT addresses.user_id, addresses.id, "
"addresses.email_address "
"FROM addresses "
"WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"primary_keys": [7, 8, 9, 10], "closure_1": "name"}],
),
)
stmt = get_statement(closure="new name")
with self.sql_execution_asserter() as asserter:
s.execute(stmt).all()
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users",
[],
),
CompiledSQL(
"SELECT addresses.user_id, addresses.id, "
"addresses.email_address "
"FROM addresses "
"WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"primary_keys": [7, 8, 9, 10], "closure_1": "new name"}],
),
)
def test_select_lazyload_mapper_mapper_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
stmt = (
select(User)
.options(
with_loader_criteria(Address, Address.email_address != "name"),
)
.order_by(User.id)
)
s = Session(testing.db, future=True)
with self.sql_execution_asserter() as asserter:
for u in s.execute(stmt).scalars():
u.addresses
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id",
[],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 7, "email_address_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 8, "email_address_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 9, "email_address_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 10, "email_address_1": "name"}],
),
)
def test_select_lazyload_mapper_mapper_closure_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
def get_statement(closure="name"):
stmt = (
select(User)
.options(
lazyload(User.addresses),
with_loader_criteria(
Address, lambda cls: cls.email_address != closure
),
)
.order_by(User.id)
)
return stmt
s = Session(testing.db, future=True)
stmt = get_statement(closure="name")
with self.sql_execution_asserter() as asserter:
for obj in s.scalars(stmt).all():
obj.addresses
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id",
[],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"param_1": 7, "closure_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"param_1": 8, "closure_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"param_1": 9, "closure_1": "name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"param_1": 10, "closure_1": "name"}],
),
)
stmt = get_statement(closure="new name")
with self.sql_execution_asserter() as asserter:
for obj in s.scalars(
stmt, execution_options={"populate_existing": True}
).all():
obj.addresses
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id",
[],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"param_1": 7, "closure_1": "new name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"param_1": 8, "closure_1": "new name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"param_1": 9, "closure_1": "new name"}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :closure_1 "
"ORDER BY addresses.id",
[{"param_1": 10, "closure_1": "new name"}],
),
)
def test_select_aliased_inclaliased_criteria(self, user_address_fixture):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = select(u1).options(
with_loader_criteria(
User, User.name != "name", include_aliases=True
)
)
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name "
"FROM users AS users_1 WHERE users_1.name != :name_1",
)
@testing.combinations(
(lambda User: [User.id], "users.id"),
(lambda User: [User.id.label("foo")], "users.id AS foo"),
(lambda User: [User.name + "bar"], "users.name || :name_1 AS anon_1"),
(
lambda User: [(User.name + "bar").label("foo")],
"users.name || :name_1 AS foo",
),
(lambda User: [func.count(User.id)], "count(users.id) AS count_1"),
(
lambda User: [func.count(User.id).label("foo")],
"count(users.id) AS foo",
),
argnames="case, expected",
)
def test_select_expr_with_criteria(
self, case, expected, user_address_fixture
):
"""test #7205"""
User, Address = user_address_fixture
stmt = select(*resolve_lambda(case, User=User)).options(
# use non-bound value so that we dont have to accommodate for
# the "anon" counter
with_loader_criteria(
User, User.name != literal_column("some_crit")
)
)
self.assert_compile(
stmt,
"SELECT %s FROM users WHERE users.name != some_crit" % (expected,),
)
def test_select_from_aliased_inclaliased_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = (
select(sql.func.count())
.select_from(u1)
.options(
with_loader_criteria(
User, User.name != "name", include_aliases=True
)
)
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users AS users_1 "
"WHERE users_1.name != :name_1",
)
def test_select_aliased_columns_inclaliased_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = select(u1.id, u1.name).options(
with_loader_criteria(
User, User.name != "name", include_aliases=True
)
)
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name "
"FROM users AS users_1 WHERE users_1.name != :name_1",
)
def test_select_join_aliased_inclaliased_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
a1 = aliased(Address)
stmt = (
select(User)
.join(User.addresses.of_type(a1))
.options(
with_loader_criteria(
Address,
Address.email_address != "name",
include_aliases=True,
)
)
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id "
"AND addresses_1.email_address != :email_address_1",
)
def test_select_joinm2m_aliased_inclaliased_criteria(
self, order_item_fixture
):
Order, Item = order_item_fixture
i1 = aliased(Item)
stmt = (
select(Order)
.join(Order.items.of_type(i1))
.options(
with_loader_criteria(
Item,
Item.description != "description",
include_aliases=True,
)
)
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen FROM orders "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id "
"AND items_1.description != :description_1",
)
def test_select_aliased_aliased_criteria(self, user_address_fixture):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = select(u1).options(with_loader_criteria(u1, u1.name != "name"))
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name "
"FROM users AS users_1 WHERE users_1.name != :name_1",
)
def test_select_aliased_columns_aliased_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
u1 = aliased(User)
stmt = select(u1.id, u1.name).options(
with_loader_criteria(u1, u1.name != "name")
)
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name "
"FROM users AS users_1 WHERE users_1.name != :name_1",
)
def test_joinedload_global_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
stmt = select(User).options(
joinedload(User.addresses),
with_loader_criteria(Address, Address.email_address != "email"),
)
with self.sql_execution_asserter() as asserter:
s.execute(stmt)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"AND addresses_1.email_address != :email_address_1 "
"ORDER BY addresses_1.id",
[{"email_address_1": "email"}],
),
)
def test_query_count_global_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
q = s.query(User).options(with_loader_criteria(User, User.id != 8))
with self.sql_execution_asserter() as asserter:
q.count()
asserter.assert_(
CompiledSQL(
"SELECT count(*) AS count_1 FROM (SELECT "
"users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id != :id_1) AS anon_1",
[{"id_1": 8}],
),
)
def test_query_count_after_the_fact_global_criteria(
self, user_address_fixture
):
User, Address = user_address_fixture
s = Session(testing.db)
# this essentially tests that the query.from_self() which takes
# place in count() is one that can still be affected by
# the loader criteria, meaning it has to be an ORM query
q = s.query(User)
@event.listens_for(s, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(User, User.id != 8)
)
with self.sql_execution_asserter() as asserter:
q.count()
asserter.assert_(
CompiledSQL(
"SELECT count(*) AS count_1 FROM (SELECT "
"users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id != :id_1) AS anon_1",
[{"id_1": 8}],
),
)
def test_select_count_subquery_global_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = select(User).subquery()
stmt = (
select(sql.func.count())
.select_from(stmt)
.options(with_loader_criteria(User, User.id != 8))
)
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM (SELECT users.id AS id, "
"users.name AS name FROM users WHERE users.id != :id_1) AS anon_1",
)
def test_query_outerjoin_global_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
q = (
s.query(User, Address)
.outerjoin(User.addresses)
.options(
with_loader_criteria(
Address,
~Address.email_address.like("ed@%"),
)
)
.order_by(User.id)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM users LEFT OUTER JOIN addresses "
"ON users.id = addresses.user_id AND "
"addresses.email_address NOT LIKE :email_address_1 "
"ORDER BY users.id",
)
eq_(
q.all(),
[
(User(id=7), Address(id=1)),
(User(id=8), None), # three addresses not here
(User(id=9), Address(id=5)),
(User(id=10), None),
],
)
def test_caching_and_binds_lambda(self, mixin_fixture):
HasFoob, UserWFoob = mixin_fixture
statement = select(UserWFoob).filter(UserWFoob.id < 10)
def go(value):
return statement.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.name == value,
include_aliases=True,
)
)
s = Session(testing.db, future=True)
for i in range(10):
name = random.choice(["ed", "fred", "jack"])
stmt = go(name)
eq_(s.execute(stmt).scalars().all(), [UserWFoob(name=name)])
def test_unnamed_param_dont_fail(self, multi_mixin_fixture):
HasFoob, Order, Item = multi_mixin_fixture
def go(stmt, value):
return stmt.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description == "order 3",
include_aliases=True,
)
)
with Session(testing.db) as sess:
for i in range(10):
name = random.choice(["order 1", "order 3", "order 5"])
statement = select(Order)
stmt = go(statement, name)
eq_(
sess.execute(stmt).scalars().all(),
[Order(description="order 3")],
)
def test_declared_attr_no_warning(self, declattr_mixin_fixture):
HasFoob, UserWFoob = declattr_mixin_fixture
statement = select(UserWFoob).filter(UserWFoob.id < 10)
def go(value):
return statement.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.name == value,
include_aliases=True,
)
)
s = Session(testing.db, future=True)
for i in range(10):
name = random.choice(["ed", "fred", "jack"])
stmt = go(name)
eq_(s.execute(stmt).scalars().all(), [UserWFoob(name=name)])
def test_caching_and_binds_lambda_more_mixins(self, multi_mixin_fixture):
# By including non-mapped mixin HasBat in the middle of the
# hierarchy, we test issue #5766
HasFoob, Order, Item = multi_mixin_fixture
def go(stmt, value):
return stmt.options(
with_loader_criteria(
HasFoob,
lambda cls: cls.description == value,
include_aliases=True,
)
)
with Session(testing.db) as sess:
for i in range(10):
name = random.choice(["order 1", "order 3", "order 5"])
statement = select(Order)
stmt = go(statement, name)
eq_(
sess.execute(stmt).scalars().all(),
[Order(description=name)],
)
name = random.choice(["item 1", "item 3", "item 5"])
statement = select(Item)
stmt = go(statement, name)
eq_(
sess.execute(stmt).scalars().all(),
[Item(description=name)],
)
def test_never_for_refresh(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
u1 = s.get(User, 8)
@event.listens_for(s, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(User, User.id != 8)
)
s.refresh(u1)
eq_(u1.name, "ed")
def test_never_for_unexpire(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
u1 = s.get(User, 8)
s.expire(u1)
@event.listens_for(s, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(User, User.id != 8)
)
eq_(u1.name, "ed")
def test_never_for_undefer(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
u1 = s.execute(
select(User).options(defer(User.name)).filter(User.id == 8)
).scalar_one()
@event.listens_for(s, "do_orm_execute")
def add_criteria(orm_context):
orm_context.statement = orm_context.statement.options(
with_loader_criteria(User, User.id != 8)
)
eq_(u1.name, "ed")
| LoaderCriteriaTest |
python | sympy__sympy | sympy/functions/special/gamma_functions.py | {
"start": 13576,
"end": 19526
} | class ____(DefinedFunction):
r"""
The upper incomplete gamma function.
Explanation
===========
It can be defined as the meromorphic continuation of
.. math::
\Gamma(s, x) := \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \gamma(s, x).
where $\gamma(s, x)$ is the lower incomplete gamma function,
:class:`lowergamma`. This can be shown to be the same as
.. math::
\Gamma(s, x) = \Gamma(s) - \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where ${}_1F_1$ is the (confluent) hypergeometric function.
The upper incomplete gamma function is also essentially equivalent to the
generalized exponential integral:
.. math::
\operatorname{E}_{n}(x) = \int_{1}^{\infty}{\frac{e^{-xt}}{t^n} \, dt} = x^{n-1}\Gamma(1-n,x).
Examples
========
>>> from sympy import uppergamma, S
>>> from sympy.abc import s, x
>>> uppergamma(s, x)
uppergamma(s, x)
>>> uppergamma(3, x)
2*(x**2/2 + x + 1)*exp(-x)
>>> uppergamma(-S(1)/2, x)
-2*sqrt(pi)*erfc(sqrt(x)) + 2*exp(-x)/sqrt(x)
>>> uppergamma(-2, x)
expint(3, x)/x**2
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Incomplete_gamma_function#Upper_incomplete_gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6,
Section 5, Handbook of Mathematical Functions with Formulas, Graphs,
and Mathematical Tables
.. [3] https://dlmf.nist.gov/8
.. [4] https://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] https://functions.wolfram.com/GammaBetaErf/Gamma3/
.. [6] https://en.wikipedia.org/wiki/Exponential_integral#Relation_with_other_functions
"""
def fdiff(self, argindex=2):
from sympy.functions.special.hyper import meijerg
if argindex == 2:
a, z = self.args
return -exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return uppergamma(a, z)*log(z) + meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
if all(x.is_number for x in self.args):
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, z, mp.inf)
return Expr._from_mpmath(res, prec)
return self
@classmethod
def eval(cls, a, z):
from sympy.functions.special.error_functions import expint
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is oo:
return S.Zero
elif z.is_zero:
if re(a).is_positive:
return gamma(a)
# We extract branching information here. C/f lowergamma.
nx, n = z.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(z)
if z != nx:
return uppergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return -2*pi*I*n*S.NegativeOne**(-a)/factorial(-a) + uppergamma(a, nx)
elif n != 0:
return gamma(a)*(1 - exp(2*pi*I*n*a)) + exp(2*pi*I*n*a)*uppergamma(a, nx)
# Special values.
if a.is_Number:
if a is S.Zero and z.is_positive:
return -Ei(-z)
elif a is S.One:
return exp(-z)
elif a is S.Half:
return sqrt(pi)*erfc(sqrt(z))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
if a.is_integer:
return exp(-z) * factorial(b) * Add(*[z**k / factorial(k)
for k in range(a)])
else:
return (gamma(a) * erfc(sqrt(z)) +
S.NegativeOne**(a - S(3)/2) * exp(-z) * sqrt(z)
* Add(*[gamma(-S.Half - k) * (-z)**k / gamma(1-a)
for k in range(a - S.Half)]))
elif b.is_Integer:
return expint(-b, z)*unpolarify(z)**(b + 1)
if not a.is_Integer:
return (S.NegativeOne**(S.Half - a) * pi*erfc(sqrt(z))/gamma(1-a)
- z**a * exp(-z) * Add(*[z**k * gamma(a) / gamma(a+k+1)
for k in range(S.Half - a)]))
if a.is_zero and z.is_positive:
return -Ei(-z)
if z.is_zero and re(a).is_positive:
return gamma(a)
def _eval_conjugate(self):
z = self.args[1]
if z not in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_is_meromorphic(self, x, a):
return lowergamma._eval_is_meromorphic(self, x, a)
def _eval_rewrite_as_lowergamma(self, s, x, **kwargs):
return gamma(s) - lowergamma(s, x)
def _eval_rewrite_as_tractable(self, s, x, **kwargs):
return exp(loggamma(s)) - lowergamma(s, x)
def _eval_rewrite_as_expint(self, s, x, **kwargs):
from sympy.functions.special.error_functions import expint
return expint(1 - s, x)*x**s
###############################################################################
###################### POLYGAMMA and LOGGAMMA FUNCTIONS #######################
###############################################################################
| uppergamma |
python | streamlit__streamlit | lib/tests/streamlit/data_mocks/dask_mocks.py | {
"start": 731,
"end": 1449
} | class ____:
"""This is dummy DataFrame class, which imitates dask.dataframe.core.DataFrame class
for testing purposes. We use this to make sure that our code does a special handling
if it detects a Dask DataFrame.
This allows testing of the functionality without having the library installed,
but it won't capture changes in the API of the library. This requires
integration tests.
"""
__module__ = "dask.dataframe.core"
def __init__(self, data: pd.DataFrame):
self._data: pd.DataFrame = data
def head(self, n: int, compute: bool) -> pd.DataFrame:
"""Returns the top n element of a mock version of Dask DataFrame."""
return self._data.head(n)
| DataFrame |
python | django__django | django/contrib/postgres/fields/citext.py | {
"start": 948,
"end": 1357
} | class ____(TextField):
system_check_removed_details = {
"msg": (
"django.contrib.postgres.fields.CITextField is removed except for support "
"in historical migrations."
),
"hint": (
'Use TextField(db_collation="…") with a case-insensitive non-deterministic '
"collation instead."
),
"id": "fields.E907",
}
| CITextField |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_responses/report_download_response_builder.py | {
"start": 274,
"end": 701
} | class ____(HttpResponseBuilder):
@classmethod
def download_report(cls) -> "ReportDownloadResponseBuilder":
return cls(find_template("download_report_file", __file__), ListTemplatePath(), None)
def build(self) -> HttpResponse:
http_response = super().build()
http_response._body = gzip.compress(http_response._body.encode("iso-8859-1"))
return http_response
| ReportDownloadResponseBuilder |
python | getsentry__sentry | src/sentry/sentry_metrics/use_case_id_registry.py | {
"start": 156,
"end": 305
} | class ____(Enum):
"""
Represents the access levels of a UseCaseID for sentry's APIs.
"""
PUBLIC = 0
PRIVATE = 1
| UseCaseIDAPIAccess |
python | django__django | tests/servers/test_basehttp.py | {
"start": 490,
"end": 687
} | class ____(BytesIO):
def close(self):
# WSGIRequestHandler closes the output file; we need to make this a
# no-op so we can still read its contents.
pass
| UnclosableBytesIO |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 9096,
"end": 9323
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
return np.sum([value == "numerical" for value in feat_type.values()])
@metafeatures.define("NumberOfCategoricalFeatures")
| NumberOfNumericFeatures |
python | spyder-ide__spyder | spyder/plugins/completion/providers/snippets/trie.py | {
"start": 212,
"end": 2009
} | class ____:
def __init__(self, key=None, value=None):
self.children = {}
self.key = key
self.value = value
def __setitem__(self, sequence, value):
elem = sequence[0]
if self.key is None:
self.key = elem
if len(self.key) > 0:
sequence = sequence[1:]
if sequence:
elem = sequence[0]
node = self.children.get(elem, None)
if node is None:
node = TrieNode()
self.children[elem] = node
node[sequence] = value
else:
self.value = value
def __getitem__(self, sequence):
node = None
if sequence[0] == self.key:
sequence = sequence[1:]
if sequence:
if sequence[0] in self.children:
next_children = self.children[sequence[0]]
node = next_children[sequence]
else:
node = self
return node
def __iter__(self):
queue = [self]
while queue != []:
node = queue.pop(0)
queue += list(node.children.values())
if node.value is not None:
yield node
def __contains__(self, sequence):
if len(sequence) + len(self.key) == 0:
return True
elem = sequence[0]
if elem == self.key:
sequence = sequence[1:]
if not sequence:
if self.value is not None:
return True
else:
return False
elem = sequence[0]
found = elem in self.children
if found:
next_children = self.children[elem]
found = sequence in next_children
return found
| TrieNode |
python | numba__numba | numba/misc/llvm_pass_timings.py | {
"start": 8983,
"end": 13362
} | class ____(Sequence):
"""A collection of pass timings.
This class implements the ``Sequence`` protocol for accessing the
individual timing records.
"""
def __init__(self, name):
self._name = name
self._records = []
@contextmanager
def record_legacy(self, name):
"""Record new timings and append to this collection.
Note: this is mainly for internal use inside the compiler pipeline.
See also ``RecordLLVMPassTimingsLegacy``
Parameters
----------
name: str
Name for the records.
"""
if config.LLVM_PASS_TIMINGS:
# Recording of pass timings is enabled
with RecordLLVMPassTimingsLegacy() as timings:
yield
rec = timings.get()
# Only keep non-empty records
if rec:
self._append(name, rec)
else:
# Do nothing. Recording of pass timings is disabled.
yield
@contextmanager
def record(self, name, pb):
"""Record new timings and append to this collection.
Note: this is mainly for internal use inside the compiler pipeline.
See also ``RecordLLVMPassTimings``
Parameters
----------
name: str
Name for the records.
"""
if config.LLVM_PASS_TIMINGS:
# Recording of pass timings is enabled
with RecordLLVMPassTimings(pb) as timings:
yield
rec = timings.get()
# Only keep non-empty records
if rec:
self._append(name, rec)
else:
# Do nothing. Recording of pass timings is disabled.
yield
def _append(self, name, timings):
"""Append timing records
Parameters
----------
name: str
Name for the records.
timings: ProcessedPassTimings
the timing records.
"""
self._records.append(NamedTimings(name, timings))
def get_total_time(self):
"""Computes the sum of the total time across all contained timings.
Returns
-------
res: float or None
Returns the total number of seconds or None if no timings were
recorded
"""
if self._records:
return sum(r.timings.get_total_time() for r in self._records)
else:
return None
def list_longest_first(self):
"""Returns the timings in descending order of total time duration.
Returns
-------
res: List[ProcessedPassTimings]
"""
return sorted(self._records,
key=lambda x: x.timings.get_total_time(),
reverse=True)
@property
def is_empty(self):
"""
"""
return not self._records
def summary(self, topn=5):
"""Return a string representing the summary of the timings.
Parameters
----------
topn: int; optional, default=5.
This limits the maximum number of items to show.
This function will show the ``topn`` most time-consuming passes.
Returns
-------
res: str
See also ``ProcessedPassTimings.summary()``
"""
if self.is_empty:
return "No pass timings were recorded"
else:
buf = []
ap = buf.append
ap(f"Printing pass timings for {self._name}")
overall_time = self.get_total_time()
ap(f"Total time: {overall_time:.4f}")
for i, r in enumerate(self._records):
ap(f"== #{i} {r.name}")
percent = r.timings.get_total_time() / overall_time * 100
ap(f" Percent: {percent:.1f}%")
ap(r.timings.summary(topn=topn, indent=1))
return "\n".join(buf)
def __getitem__(self, i):
"""Get the i-th timing record.
Returns
-------
res: (name, timings)
A named tuple with two fields:
- name: str
- timings: ProcessedPassTimings
"""
return self._records[i]
def __len__(self):
"""Length of this collection.
"""
return len(self._records)
def __str__(self):
return self.summary()
| PassTimingsCollection |
python | h5py__h5py | h5py/tests/test_h5d_direct_chunk.py | {
"start": 107,
"end": 1161
} | class ____(TestCase):
def test_write_direct_chunk(self):
filename = self.mktemp().encode()
with h5py.File(filename, "w") as filehandle:
dataset = filehandle.create_dataset("data", (100, 100, 100),
maxshape=(None, 100, 100),
chunks=(1, 100, 100),
dtype='float32')
# writing
array = numpy.zeros((10, 100, 100))
for index in range(10):
a = numpy.random.rand(100, 100).astype('float32')
dataset.id.write_direct_chunk((index, 0, 0), a.tobytes(), filter_mask=1)
array[index] = a
# checking
with h5py.File(filename, "r") as filehandle:
for i in range(10):
read_data = filehandle["data"][i]
numpy.testing.assert_array_equal(array[i], read_data)
@ut.skipIf('gzip' not in h5py.filters.encode, "DEFLATE is not installed")
| TestWriteDirectChunk |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/traversal_context.py | {
"start": 309,
"end": 550
} | class ____:
config_schema_snapshot: ConfigSchemaSnapshot
config_type_snap: ConfigTypeSnap
stack: EvaluationStackEntry
@property
def config_type_key(self) -> str:
return self.config_type_snap.key
@record
| ContextData |
python | ethereum__web3.py | web3/middleware/filter.py | {
"start": 8929,
"end": 13051
} | class ____:
def __init__(self, w3: "Web3") -> None:
self.w3 = w3
self.start_block = BlockNumber(w3.eth.block_number + 1)
@property
def filter_changes(self) -> Iterator[list[Hash32]]:
return self.get_filter_changes()
def get_filter_changes(self) -> Iterator[list[Hash32]]:
block_range_iter = iter_latest_block_ranges(self.w3, self.start_block, None)
for block_range in block_range_iter:
yield (block_hashes_in_range(self.w3, block_range))
@to_list
def block_hashes_in_range(
w3: "Web3", block_range: tuple[BlockNumber, BlockNumber]
) -> Iterable[Hash32]:
from_block, to_block = block_range
if from_block is None or to_block is None:
return
for block_number in range(from_block, to_block + 1):
yield getattr(w3.eth.get_block(BlockNumber(block_number)), "hash", None)
# --- async --- #
async def async_iter_latest_block(
w3: "AsyncWeb3[Any]",
to_block: BlockNumber | LatestBlockParam | None = None,
) -> AsyncIterable[BlockNumber]:
"""
Returns a generator that dispenses the latest block, if
any new blocks have been mined since last iteration.
If there are no new blocks or the latest block is greater than
the ``to_block`` None is returned.
>>> new_blocks = iter_latest_block(w3, 0, 10)
>>> next(new_blocks) # Latest block = 0
0
>>> next(new_blocks) # No new blocks
>>> next(new_blocks) # Latest block = 1
1
>>> next(new_blocks) # Latest block = 10
10
>>> next(new_blocks) # latest block > to block
"""
_last = None
is_bounded_range = to_block is not None and to_block != "latest"
while True:
latest_block = await w3.eth.block_number
# type ignored b/c is_bounded_range prevents unsupported comparison
if is_bounded_range and latest_block > cast(int, to_block):
yield None
# No new blocks since last iteration.
if _last is not None and _last == latest_block:
yield None
else:
yield latest_block
_last = latest_block
async def async_iter_latest_block_ranges(
w3: "AsyncWeb3[Any]",
from_block: BlockNumber,
to_block: BlockNumber | LatestBlockParam | None = None,
) -> AsyncIterable[tuple[BlockNumber | None, BlockNumber | None]]:
"""
Returns an iterator unloading ranges of available blocks
starting from `from_block` to the latest mined block,
until reaching to_block. e.g.:
>>> blocks_to_filter = iter_latest_block_ranges(w3, 0, 50)
>>> next(blocks_to_filter) # latest block number = 11
(0, 11)
>>> next(blocks_to_filter) # latest block number = 45
(12, 45)
>>> next(blocks_to_filter) # latest block number = 50
(46, 50)
"""
latest_block_iterator = async_iter_latest_block(w3, to_block)
async for latest_block in latest_block_iterator:
if latest_block is None:
yield (None, None)
elif from_block > latest_block:
yield (None, None)
else:
yield (from_block, latest_block)
from_block = BlockNumber(latest_block + 1)
async def async_get_logs_multipart(
w3: "AsyncWeb3[Any]",
start_block: BlockNumber,
stop_block: BlockNumber,
address: Address | ChecksumAddress | list[Address | ChecksumAddress],
topics: list[_Hash32 | list[_Hash32] | None],
max_blocks: int,
) -> AsyncIterable[list[LogReceipt]]:
"""
Used to break up requests to ``eth_getLogs``
The getLog request is partitioned into multiple calls of the max number of blocks
``max_blocks``.
"""
_block_ranges = block_ranges(start_block, stop_block, max_blocks)
for from_block, to_block in _block_ranges:
params = {
"fromBlock": from_block,
"toBlock": to_block,
"address": address,
"topics": topics,
}
params_with_none_dropped = cast(
FilterParams, drop_items_with_none_value(params)
)
next_logs = await w3.eth.get_logs(params_with_none_dropped)
yield next_logs
| RequestBlocks |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataform.py | {
"start": 9936,
"end": 10667
} | class ____:
@mock.patch(HOOK_STR)
@mock.patch(WORKSPACE_STR)
def test_execute(self, _, hook_mock):
op = DataformCreateWorkspaceOperator(
task_id="create-workspace",
project_id=PROJECT_ID,
region=REGION,
repository_id=REPOSITORY_ID,
workspace_id=WORKSPACE_ID,
)
op.execute(context=mock.MagicMock())
hook_mock.return_value.create_workspace.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
repository_id=REPOSITORY_ID,
workspace_id=WORKSPACE_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataformCreateWorkspaceOperator |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_event_logs.py | {
"start": 7167,
"end": 11930
} | class ____(TestEventLogsEndpoint):
@pytest.mark.parametrize(
("query_params", "expected_status_code", "expected_total_entries", "expected_events"),
[
(
{},
200,
4,
[EVENT_NORMAL, EVENT_WITH_OWNER, TASK_INSTANCE_EVENT, EVENT_WITH_OWNER_AND_TASK_INSTANCE],
),
# offset, limit
(
{"offset": 1, "limit": 2},
200,
4,
[EVENT_WITH_OWNER, TASK_INSTANCE_EVENT],
),
# equal filter
(
{"event": EVENT_NORMAL},
200,
1,
[EVENT_NORMAL],
),
(
{"event": EVENT_WITH_OWNER},
200,
1,
[EVENT_WITH_OWNER],
),
(
{"task_id": TASK_ID},
200,
2,
[TASK_INSTANCE_EVENT, EVENT_WITH_OWNER_AND_TASK_INSTANCE],
),
# multiple equal filters
(
{"event": EVENT_WITH_OWNER, "owner": OWNER},
200,
1,
[EVENT_WITH_OWNER],
),
(
{"event": EVENT_WITH_OWNER_AND_TASK_INSTANCE, "task_id": TASK_ID, "run_id": DAG_RUN_ID},
200,
1,
[EVENT_WITH_OWNER_AND_TASK_INSTANCE],
),
# list filter
(
{"excluded_events": [EVENT_NORMAL, EVENT_WITH_OWNER]},
200,
2,
[TASK_INSTANCE_EVENT, EVENT_WITH_OWNER_AND_TASK_INSTANCE],
),
(
{"included_events": [EVENT_NORMAL, EVENT_WITH_OWNER]},
200,
2,
[EVENT_NORMAL, EVENT_WITH_OWNER],
),
# multiple list filters
(
{"excluded_events": [EVENT_NORMAL], "included_events": [EVENT_WITH_OWNER]},
200,
1,
[EVENT_WITH_OWNER],
),
# before, after filters
(
{"before": "2024-06-15T00:00:00Z"},
200,
0,
[],
),
(
{"after": "2024-06-15T00:00:00Z"},
200,
4,
[EVENT_NORMAL, EVENT_WITH_OWNER, TASK_INSTANCE_EVENT, EVENT_WITH_OWNER_AND_TASK_INSTANCE],
),
# order_by
(
{"order_by": "-id"},
200,
4,
[EVENT_WITH_OWNER_AND_TASK_INSTANCE, TASK_INSTANCE_EVENT, EVENT_WITH_OWNER, EVENT_NORMAL],
),
(
{"order_by": "logical_date"},
200,
4,
[TASK_INSTANCE_EVENT, EVENT_WITH_OWNER_AND_TASK_INSTANCE, EVENT_NORMAL, EVENT_WITH_OWNER],
),
# combination of query parameters
(
{"offset": 1, "excluded_events": ["non_existed_event"], "order_by": "event"},
200,
4,
[EVENT_WITH_OWNER_AND_TASK_INSTANCE, EVENT_NORMAL, TASK_INSTANCE_EVENT],
),
(
{"excluded_events": [EVENT_NORMAL], "included_events": [EVENT_WITH_OWNER], "order_by": "-id"},
200,
1,
[EVENT_WITH_OWNER],
),
(
{"map_index": -1, "try_number": 0, "order_by": "event", "limit": 1},
200,
2,
[EVENT_WITH_OWNER_AND_TASK_INSTANCE],
),
],
)
def test_get_event_logs(
self, test_client, query_params, expected_status_code, expected_total_entries, expected_events
):
with assert_queries_count(2):
response = test_client.get("/eventLogs", params=query_params)
assert response.status_code == expected_status_code
if expected_status_code != 200:
return
resp_json = response.json()
assert resp_json["total_entries"] == expected_total_entries
for event_log, expected_event in zip(resp_json["event_logs"], expected_events):
assert event_log["event"] == expected_event
def test_should_raises_401_unauthenticated(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/eventLogs")
assert response.status_code == 401
def test_should_raises_403_forbidden(self, unauthorized_test_client):
response = unauthorized_test_client.get("/eventLogs")
assert response.status_code == 403
| TestGetEventLogs |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/test_indexing.py | {
"start": 6001,
"end": 9827
} | class ____:
def test_take(self):
# GH 10295
idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == Timedelta("1 day")
result = idx.take([-1])
assert result == Timedelta("31 day")
result = idx.take([0, 1, 2])
expected = timedelta_range("1 day", "3 day", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = timedelta_range("1 day", "5 day", freq="2D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = timedelta_range("8 day", "2 day", freq="-3D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(["4 day", "3 day", "6 day"], name="idx")
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(["29 day", "3 day", "6 day"], name="idx")
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = timedelta_range("1 day", "31 day", freq="D", name="idx")
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
def test_take_equiv_getitem(self):
tds = ["1day 02:00:00", "1 day 04:00:00", "1 day 10:00:00"]
idx = timedelta_range(start="1D", end="2D", freq="h", name="idx")
expected = TimedeltaIndex(tds, freq=None, name="idx")
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, TimedeltaIndex)
assert taken.freq is None
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = TimedeltaIndex(["1 days", "2 days", "3 days"], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = TimedeltaIndex(["2 days", "1 days", "NaT"], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx")
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
| TestTake |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 20515,
"end": 22128
} | class ____(_ColumnCoercions, RoleImpl):
__slots__ = ()
def _literal_coercion(
self, element, *, name=None, type_=None, is_crud=False, **kw
):
if (
element is None
and not is_crud
and (type_ is None or not type_.should_evaluate_none)
):
# TODO: there's no test coverage now for the
# "should_evaluate_none" part of this, as outside of "crud" this
# codepath is not normally used except in some special cases
return elements.Null()
else:
try:
return elements.BindParameter(
name, element, type_, unique=True, _is_crud=is_crud
)
except exc.ArgumentError as err:
self._raise_for_expected(element, err=err)
def _raise_for_expected(self, element, argname=None, resolved=None, **kw):
# select uses implicit coercion with warning instead of raising
if isinstance(element, selectable.Values):
advice = (
"To create a column expression from a VALUES clause, "
"use the .scalar_values() method."
)
elif isinstance(element, roles.AnonymizedFromClauseRole):
advice = (
"To create a column expression from a FROM clause row "
"as a whole, use the .table_valued() method."
)
else:
advice = None
return super()._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
| ExpressionElementImpl |
python | redis__redis-py | redis/asyncio/multidb/healthcheck.py | {
"start": 1325,
"end": 1988
} | class ____(HealthCheckPolicy):
def __init__(self, health_check_probes: int, health_check_delay: float):
if health_check_probes < 1:
raise ValueError("health_check_probes must be greater than 0")
self._health_check_probes = health_check_probes
self._health_check_delay = health_check_delay
@property
def health_check_probes(self) -> int:
return self._health_check_probes
@property
def health_check_delay(self) -> float:
return self._health_check_delay
@abstractmethod
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
pass
| AbstractHealthCheckPolicy |
python | django__django | django/forms/fields.py | {
"start": 9281,
"end": 10674
} | class ____(Field):
def __init__(
self, *, max_length=None, min_length=None, strip=True, empty_value="", **kwargs
):
self.max_length = max_length
self.min_length = min_length
self.strip = strip
self.empty_value = empty_value
super().__init__(**kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
self.validators.append(validators.ProhibitNullCharactersValidator())
def to_python(self, value):
"""Return a string."""
if value not in self.empty_values:
value = str(value)
if self.strip:
value = value.strip()
if value in self.empty_values:
return self.empty_value
return value
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if self.max_length is not None and not widget.is_hidden:
# The HTML attribute is maxlength, not max_length.
attrs["maxlength"] = str(self.max_length)
if self.min_length is not None and not widget.is_hidden:
# The HTML attribute is minlength, not min_length.
attrs["minlength"] = str(self.min_length)
return attrs
| CharField |
python | sqlalchemy__sqlalchemy | test/orm/test_transaction.py | {
"start": 45794,
"end": 48080
} | class ____(_LocalFixture):
__sparse_driver_backend__ = True
def test_pk_violation(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
a1 = Address(email_address="foo")
u1 = User(id=1, name="ed", addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address="bar")
u2 = User(id=1, name="jack", addresses=[a2])
u1.name = "edward"
a1.email_address = "foober"
s.add(u2)
with expect_warnings("New instance"):
assert_raises(sa_exc.IntegrityError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
assert u1.name == "ed"
assert a1.email_address == "foo"
u1.name = "edward"
a1.email_address = "foober"
s.commit()
eq_(
s.query(User).all(),
[
User(
id=1,
name="edward",
addresses=[Address(email_address="foober")],
)
],
)
@testing.requires.savepoints
def test_pk_violation_with_savepoint(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
a1 = Address(email_address="foo")
u1 = User(id=1, name="ed", addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address="bar")
u2 = User(id=1, name="jack", addresses=[a2])
u1.name = "edward"
a1.email_address = "foober"
nt1 = s.begin_nested()
s.add(u2)
with expect_warnings("New instance"):
assert_raises(sa_exc.IntegrityError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
nt1.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
s.commit()
eq_(
s.query(User).all(),
[
User(
id=1,
name="edward",
addresses=[Address(email_address="foober")],
)
],
)
| RollbackRecoverTest |
python | streamlit__streamlit | lib/streamlit/runtime/state/safe_session_state.py | {
"start": 1202,
"end": 5604
} | class ____:
"""Thread-safe wrapper around SessionState.
When AppSession gets a re-run request, it can interrupt its existing
ScriptRunner and spin up a new ScriptRunner to handle the request.
When this happens, the existing ScriptRunner will continue executing
its script until it reaches a yield point - but during this time, it
must not mutate its SessionState.
"""
_state: SessionState
_lock: threading.RLock
_yield_callback: Callable[[], None]
def __init__(self, state: SessionState, yield_callback: Callable[[], None]) -> None:
# Fields must be set using the object's setattr method to avoid
# infinite recursion from trying to look up the fields we're setting.
object.__setattr__(self, "_state", state)
# TODO: we'd prefer this be a threading.Lock instead of RLock -
# but `call_callbacks` first needs to be rewritten.
object.__setattr__(self, "_lock", threading.RLock())
object.__setattr__(self, "_yield_callback", yield_callback)
def register_widget(
self, metadata: WidgetMetadata[T], user_key: str | None
) -> RegisterWidgetResult[T]:
self._yield_callback()
with self._lock:
return self._state.register_widget(metadata, user_key)
def on_script_will_rerun(self, latest_widget_states: WidgetStatesProto) -> None:
self._yield_callback()
with self._lock:
# TODO: rewrite this to copy the callbacks list into a local
# variable so that we don't need to hold our lock for the
# duration. (This will also allow us to downgrade our RLock
# to a Lock.)
self._state.on_script_will_rerun(latest_widget_states)
def on_script_finished(self, widget_ids_this_run: set[str]) -> None:
with self._lock:
self._state.on_script_finished(widget_ids_this_run)
def maybe_check_serializable(self) -> None:
with self._lock:
self._state.maybe_check_serializable()
def get_widget_states(self) -> list[WidgetStateProto]:
"""Return a list of serialized widget values for each widget with a value."""
with self._lock:
return self._state.get_widget_states()
def is_new_state_value(self, user_key: str) -> bool:
with self._lock:
return self._state.is_new_state_value(user_key)
def reset_state_value(self, user_key: str, value: Any | None) -> None:
"""Reset a new session state value to a given value
without triggering the "state value cannot be modified" error.
"""
self._yield_callback()
with self._lock:
self._state.reset_state_value(user_key, value)
@property
def filtered_state(self) -> dict[str, Any]:
"""The combined session and widget state, excluding keyless widgets."""
with self._lock:
return self._state.filtered_state
def __getitem__(self, key: str) -> Any:
self._yield_callback()
with self._lock:
return self._state[key]
def __setitem__(self, key: str, value: Any) -> None:
self._yield_callback()
with self._lock:
self._state[key] = value
def __delitem__(self, key: str) -> None:
self._yield_callback()
with self._lock:
del self._state[key]
def __contains__(self, key: str) -> bool:
self._yield_callback()
with self._lock:
return key in self._state
def __getattr__(self, key: str) -> Any:
try:
return self[key]
except KeyError:
raise AttributeError(f"{key} not found in session_state.")
def __setattr__(self, key: str, value: Any) -> None:
self[key] = value
def __delattr__(self, key: str) -> None:
try:
del self[key]
except KeyError:
raise AttributeError(f"{key} not found in session_state.")
def __repr__(self) -> str:
"""Presents itself as a simple dict of the underlying SessionState instance."""
kv = ((k, self._state[k]) for k in self._state._keys())
s = ", ".join(f"{k}: {v!r}" for k, v in kv)
return f"{{{s}}}"
@contextmanager
def query_params(self) -> Iterator[QueryParams]:
self._yield_callback()
with self._lock:
yield self._state.query_params
| SafeSessionState |
python | scrapy__scrapy | tests/test_spidermiddleware.py | {
"start": 12280,
"end": 12438
} | class ____:
async def process_start(self, start):
async for item_or_request in start:
yield item_or_request
| ProcessStartSimpleMiddleware |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_client.py | {
"start": 1175,
"end": 4492
} | class ____:
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.config")
def test_load_cluster_config(self, config):
get_kube_client(in_cluster=True)
config.load_incluster_config.assert_called()
config.load_kube_config.assert_not_called()
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.config")
def test_load_file_config(self, config):
get_kube_client(in_cluster=False)
config.load_incluster_config.assert_not_called()
config.load_kube_config.assert_called()
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.config")
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.conf")
def test_load_config_disable_ssl(self, conf, config):
conf.getboolean.return_value = False
conf.getjson.return_value = {"total": 3, "backoff_factor": 0.5}
client = get_kube_client(in_cluster=False)
conf.getboolean.assert_called_with("kubernetes_executor", "verify_ssl")
assert not client.api_client.configuration.verify_ssl
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.config")
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.conf")
def test_load_config_ssl_ca_cert(self, conf, config):
conf.get.return_value = "/path/to/ca.crt"
conf.getjson.return_value = {"total": 3, "backoff_factor": 0.5}
client = get_kube_client(in_cluster=False)
conf.get.assert_called_with("kubernetes_executor", "ssl_ca_cert")
assert client.api_client.configuration.ssl_ca_cert == "/path/to/ca.crt"
@pytest.mark.platform("linux")
def test_enable_tcp_keepalive(self):
socket_options = [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 120),
(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 30),
(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 6),
]
expected_http_connection_options = HTTPConnection.default_socket_options + socket_options
expected_https_connection_options = HTTPSConnection.default_socket_options + socket_options
_enable_tcp_keepalive()
assert HTTPConnection.default_socket_options == expected_http_connection_options
assert HTTPSConnection.default_socket_options == expected_https_connection_options
def test_disable_verify_ssl(self):
configuration = Configuration()
assert configuration.verify_ssl
_disable_verify_ssl()
# Support wide range of kube client libraries
if hasattr(Configuration, "get_default_copy"):
configuration = Configuration.get_default_copy()
else:
configuration = Configuration()
assert not configuration.verify_ssl
@mock.patch("kubernetes.config.incluster_config.InClusterConfigLoader")
@conf_vars(
{("kubernetes_executor", "api_client_retry_configuration"): '{"total": 3, "backoff_factor": 0.5}'}
)
def test_api_client_retry_configuration_correct_values(self, mock_in_cluster_loader):
get_kube_client(in_cluster=True)
client_configuration = mock_in_cluster_loader().load_and_set.call_args.args[0]
assert client_configuration.retries.total == 3
assert client_configuration.retries.backoff_factor == 0.5
| TestClient |
python | huggingface__transformers | src/transformers/models/align/configuration_align.py | {
"start": 782,
"end": 5667
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AlignTextModel`]. It is used to instantiate a
ALIGN text encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the text encoder of the ALIGN
[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture. The default values here are
copied from BERT.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Align Text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`AlignTextModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`AlignTextModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Example:
```python
>>> from transformers import AlignTextConfig, AlignTextModel
>>> # Initializing a AlignTextConfig with kakaobrain/align-base style configuration
>>> configuration = AlignTextConfig()
>>> # Initializing a AlignTextModel (with random weights) from the kakaobrain/align-base style configuration
>>> model = AlignTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "align_text_model"
base_config_key = "text_config"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
use_cache=True,
**kwargs,
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.pad_token_id = pad_token_id
| AlignTextConfig |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/workflow/workflow_events.py | {
"start": 758,
"end": 1053
} | class ____(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection] = Field(default_factory=list)
raw: Optional[Any] = Field(default=None, exclude=True)
thinking_delta: Optional[str] = Field(default=None)
| AgentStream |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/RemoteGraphicsView.py | {
"start": 4642,
"end": 9394
} | class ____(QtWidgets.QWidget):
"""
Replacement for GraphicsView that does all scene management and rendering on a remote process,
while displaying on the local widget.
GraphicsItems must be created by proxy to the remote process.
"""
def __init__(self, parent=None, *args, **kwds):
"""
The keyword arguments 'useOpenGL' and 'backgound', if specified, are passed to the remote
GraphicsView.__init__(). All other keyword arguments are passed to multiprocess.QtProcess.__init__().
"""
self._img = None
self._imgReq = None
self._sizeHint = (640,480) ## no clue why this is needed, but it seems to be the default sizeHint for GraphicsView.
## without it, the widget will not compete for space against another GraphicsView.
QtWidgets.QWidget.__init__(self)
# separate local keyword arguments from remote.
remoteKwds = {}
for kwd in ['useOpenGL', 'background']:
if kwd in kwds:
remoteKwds[kwd] = kwds.pop(kwd)
self._proc = mp.QtProcess(**kwds)
self.pg = self._proc._import('pyqtgraph')
self.pg.setConfigOptions(**CONFIG_OPTIONS)
rpgRemote = self._proc._import('pyqtgraph.widgets.RemoteGraphicsView')
self._view = rpgRemote.Renderer(*args, **remoteKwds)
self._view._setProxyOptions(deferGetattr=True)
self.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus)
self.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)
self.setMouseTracking(True)
self.shm = None
shmFileName = self._view.shmFileName()
if sys.platform == 'win32':
opener = lambda path, flags: os.open(path, flags | os.O_TEMPORARY)
else:
opener = None
self.shmFile = open(shmFileName, 'rb', opener=opener)
self._view.sceneRendered.connect(mp.proxy(self.remoteSceneChanged)) #, callSync='off'))
## Note: we need synchronous signals
## even though there is no return value--
## this informs the renderer that it is
## safe to begin rendering again.
for method in ['scene', 'setCentralItem']:
setattr(self, method, getattr(self._view, method))
def resizeEvent(self, ev):
ret = super().resizeEvent(ev)
self._view.resize(self.size(), _callSync='off')
return ret
def sizeHint(self):
return QtCore.QSize(*self._sizeHint)
def remoteSceneChanged(self, data):
w, h, size = data
if self.shm is None or self.shm.size != size:
if self.shm is not None:
self.shm.close()
self.shm = mmap.mmap(self.shmFile.fileno(), size, access=mmap.ACCESS_READ)
self._img = QtGui.QImage(self.shm, w, h, QtGui.QImage.Format.Format_RGB32).copy()
self.update()
def paintEvent(self, ev):
if self._img is None:
return
p = QtGui.QPainter(self)
p.drawImage(self.rect(), self._img, self._img.rect())
p.end()
def mousePressEvent(self, ev):
self._view.mousePressEvent(MouseEvent(ev), _callSync='off')
ev.accept()
return super().mousePressEvent(ev)
def mouseReleaseEvent(self, ev):
self._view.mouseReleaseEvent(MouseEvent(ev), _callSync='off')
ev.accept()
return super().mouseReleaseEvent(ev)
def mouseMoveEvent(self, ev):
self._view.mouseMoveEvent(MouseEvent(ev), _callSync='off')
ev.accept()
return super().mouseMoveEvent(ev)
def wheelEvent(self, ev):
self._view.wheelEvent(WheelEvent(ev), _callSync='off')
ev.accept()
return super().wheelEvent(ev)
def enterEvent(self, ev):
self._view.enterEvent(EnterEvent(ev), _callSync='off')
return super().enterEvent(ev)
def leaveEvent(self, ev):
self._view.leaveEvent(LeaveEvent(ev), _callSync='off')
return super().leaveEvent(ev)
def remoteProcess(self):
"""Return the remote process handle. (see multiprocess.remoteproxy.RemoteEventHandler)"""
return self._proc
def close(self):
"""Close the remote process. After this call, the widget will no longer be updated."""
self._view.sceneRendered.disconnect()
self._proc.close()
| RemoteGraphicsView |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 54914,
"end": 55868
} | class ____(UserMethodVariable):
def __init__(
self,
wrapped: UserMethodVariable,
context: "ContextWrappingVariable",
**kwargs: Any,
) -> None:
kwargs.pop("fn", None)
kwargs.pop("obj", None)
super().__init__(wrapped.fn, wrapped.obj, **kwargs)
self.wrapped = wrapped
self.context = context
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
self.context.enter(tx)
result = super().call_function(tx, args, kwargs)
self.context.exit(tx)
return result
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(lambda: codegen(self.context)) # type: ignore[arg-type]
codegen(self.wrapped)
codegen.extend_output(create_call_function(1, False))
| WrappedUserMethodVariable |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_tabbed.py | {
"start": 86,
"end": 15681
} | class ____(util.MdCase):
"""Test legacy tab cases."""
extension = ['pymdownx.tabbed', 'pymdownx.superfences', 'markdown.extensions.def_list', 'pymdownx.details']
extension_configs = {}
def test_with_preceding_text(self):
"""Test content directly before tabs."""
expected = r'''
<p>foo
<strong>foo</strong></p>
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content"></div>
</div>
''' # noqa: E501
self.check_markdown(
r'''
foo
**foo**
=== "Tab"
''',
expected,
True
)
def test_tabbed(self):
"""Test tabbed."""
self.check_markdown(
r'''
=== "Tab"
Some *content*
And more `content`.
=== "Another Tab"
Some more content.
```
code
```
''',
r'''
<div class="tabbed-set" data-tabs="1:2"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
<input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><label for="__tabbed_1_2">Another Tab</label><div class="tabbed-content">
<p>Some more content.</p>
<div class="highlight"><pre><span></span><code>code
</code></pre></div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_markdown_title(self):
"""Test tabbed."""
self.check_markdown(
R'''
=== "**Tab**"
Some *content*
And more `content`.
=== "_Another Tab_"
Some more content.
```
code
```
''',
r'''
<div class="tabbed-set" data-tabs="1:2"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1"><strong>Tab</strong></label><div class="tabbed-content">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
<input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><label for="__tabbed_1_2"><em>Another Tab</em></label><div class="tabbed-content">
<p>Some more content.</p>
<div class="highlight"><pre><span></span><code>code
</code></pre></div>
</div>
</div>
''', # noqa: E501
True
)
def test_nested_tabbed(self):
"""Test nested tabbed."""
self.check_markdown(
r'''
=== "Tab"
Some *content*
=== "Tab A"
- item 1
- item 2
=== "Tab B"
- item A
- item B
=== "Another Tab"
Some more content.
''',
r'''
<div class="tabbed-set" data-tabs="1:2"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content">
<p>Some <em>content</em></p>
<div class="tabbed-set" data-tabs="2:2"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><label for="__tabbed_2_1">Tab A</label><div class="tabbed-content">
<ul>
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
<input id="__tabbed_2_2" name="__tabbed_2" type="radio" /><label for="__tabbed_2_2">Tab B</label><div class="tabbed-content">
<ul>
<li>
<p>item A</p>
</li>
<li>
<p>item B</p>
</li>
</ul>
</div>
</div>
</div>
<input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><label for="__tabbed_1_2">Another Tab</label><div class="tabbed-content">
<p>Some more content.</p>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_split(self):
"""Force a split of tab sets."""
self.check_markdown(
r'''
=== "Tab"
Some *content*
And more `content`.
===! "Another Tab"
Some more content.
```
code
```
''',
r'''
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
</div>
<div class="tabbed-set" data-tabs="2:1"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><label for="__tabbed_2_1">Another Tab</label><div class="tabbed-content">
<p>Some more content.</p>
<div class="highlight"><pre><span></span><code>code
</code></pre></div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_break(self):
"""Test that tabs are properly terminated on blocks that are not under the tab."""
self.check_markdown(
r'''
=== "Tab"
Some *content*
And more `content`.
Content
''',
r'''
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
</div>
<p>Content</p>
''', # noqa: E501
True
)
def test_tabbed_select(self):
"""Test selecting a tab."""
self.check_markdown(
r'''
=== "Tab 1"
content
===+ "Tab 2"
content
=== "Tab 3"
content
''',
r'''
<div class="tabbed-set" data-tabs="1:3"><input id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab 1</label><div class="tabbed-content">
<p>content</p>
</div>
<input checked="checked" id="__tabbed_1_2" name="__tabbed_1" type="radio" /><label for="__tabbed_1_2">Tab 2</label><div class="tabbed-content">
<p>content</p>
</div>
<input id="__tabbed_1_3" name="__tabbed_1" type="radio" /><label for="__tabbed_1_3">Tab 3</label><div class="tabbed-content">
<p>content</p>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_select_multiple(self):
"""Test selecting multiple tabs."""
self.check_markdown(
r'''
=== "Tab 1"
content
===+ "Tab 2"
content
===+ "Tab 3"
content
''',
r'''
<div class="tabbed-set" data-tabs="1:3"><input id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab 1</label><div class="tabbed-content">
<p>content</p>
</div>
<input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><label for="__tabbed_1_2">Tab 2</label><div class="tabbed-content">
<p>content</p>
</div>
<input checked="checked" id="__tabbed_1_3" name="__tabbed_1" type="radio" /><label for="__tabbed_1_3">Tab 3</label><div class="tabbed-content">
<p>content</p>
</div>
</div>
''', # noqa: E501
True
)
def test_with_lists(self):
"""Test with lists."""
self.check_markdown(
'''
- List
=== "Tab"
- Paragraph
Paragraph
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content">
<ul>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
</ul>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_big_lists(self):
"""Test details with a longer list."""
self.check_markdown(
'''
- List
=== "Tab"
- Paragraph
Paragraph
- Paragraph
paragraph
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content">
<ul>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
<li>
<p>Paragraph</p>
<p>paragraph</p>
</li>
</ul>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_complex_lists(self):
"""Test details in a complex list scenario."""
self.check_markdown(
'''
- List
=== "Tab"
- Paragraph
=== "Tab"
1. Paragraph
Paragraph
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content">
<ul>
<li>
<p>Paragraph</p>
<div class="tabbed-set" data-tabs="2:1"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><label for="__tabbed_2_1">Tab</label><div class="tabbed-content">
<ol>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
</ol>
</div>
</div>
</li>
</ul>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_definition_list(self):
"""Test with definition list."""
self.check_markdown(
'''
- List
=== "Tab"
Term
: Definition
More text
: Another
definition
Even more text
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab</label><div class="tabbed-content">
<dl>
<dt>Term</dt>
<dd>
<p>Definition</p>
<p>More text</p>
</dd>
<dd>
<p>Another
definition</p>
<p>Even more text</p>
</dd>
</dl>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_details(self):
"""Test with definition list."""
self.check_markdown(
'''
=== "Output"
???+ note "Open styled details"
??? danger "Nested details!"
And more content again.
''',
'''
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Output</label><div class="tabbed-content">
<details class="note" open="open">
<summary>Open styled details</summary>
<details class="danger">
<summary>Nested details!</summary>
<p>And more content again.</p>
</details>
</details>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_complex_list(self):
"""Test tabbed complex list scenario."""
self.check_markdown(
'''
=== "Tab with loose lists"
- Parent 1
- Child 1
- Child 2
''',
'''
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab with loose lists</label><div class="tabbed-content">
<ul>
<li>
<p>Parent 1</p>
<ul>
<li>Child 1</li>
<li>Child 2</li>
</ul>
</li>
</ul>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_complex_list_unindented_content(self):
"""Test tabbed complex list scenario with un-indented content."""
self.check_markdown(
'''
=== "Tab with loose lists"
- Parent 1
- Child 1
- Child 2
- Parent 2
''',
'''
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab with loose lists</label><div class="tabbed-content">
<ul>
<li>
<p>Parent 1</p>
<ul>
<li>Child 1</li>
<li>Child 2</li>
</ul>
</li>
</ul>
</div>
</div>
<ul>
<li>Parent 2</li>
</ul>
''', # noqa: E501
True
)
def test_indented_code(self):
"""Test indented code."""
md = """
=== "Tab 1"
code
"""
self.check_markdown(
md,
'''
<div class="tabbed-set" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><label for="__tabbed_1_1">Tab 1</label><div class="tabbed-content">
<pre><code>code
</code></pre>
</div>
</div>
''', # noqa: E501
True
)
| TestLegacyTab |
python | openai__gym | gym/envs/mujoco/pusher_v4.py | {
"start": 109,
"end": 12160
} | class ____(MujocoEnv, utils.EzPickle):
"""
### Description
"Pusher" is a multi-jointed robot arm which is very similar to that of a human.
The goal is to move a target cylinder (called *object*) to a goal position using the robot's end effector (called *fingertip*).
The robot consists of shoulder, elbow, forearm, and wrist joints.
### Action Space
The action space is a `Box(-2, 2, (7,), float32)`. An action `(a, b)` represents the torques applied at the hinge joints.
| Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit |
|-----|--------------------------------------------------------------------|-------------|-------------|----------------------------------|-------|--------------|
| 0 | Rotation of the panning the shoulder | -2 | 2 | r_shoulder_pan_joint | hinge | torque (N m) |
| 1 | Rotation of the shoulder lifting joint | -2 | 2 | r_shoulder_lift_joint | hinge | torque (N m) |
| 2 | Rotation of the shoulder rolling joint | -2 | 2 | r_upper_arm_roll_joint | hinge | torque (N m) |
| 3 | Rotation of hinge joint that flexed the elbow | -2 | 2 | r_elbow_flex_joint | hinge | torque (N m) |
| 4 | Rotation of hinge that rolls the forearm | -2 | 2 | r_forearm_roll_joint | hinge | torque (N m) |
| 5 | Rotation of flexing the wrist | -2 | 2 | r_wrist_flex_joint | hinge | torque (N m) |
| 6 | Rotation of rolling the wrist | -2 | 2 | r_wrist_roll_joint | hinge | torque (N m) |
### Observation Space
Observations consist of
- Angle of rotational joints on the pusher
- Angular velocities of rotational joints on the pusher
- The coordinates of the fingertip of the pusher
- The coordinates of the object to be moved
- The coordinates of the goal position
The observation is a `ndarray` with shape `(23,)` where the elements correspond to the table below.
An analogy can be drawn to a human arm in order to help understand the state space, with the words flex and roll meaning the
same as human joints.
| Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit |
| --- | -------------------------------------------------------- | ---- | --- | -------------------------------- | -------- | ------------------------ |
| 0 | Rotation of the panning the shoulder | -Inf | Inf | r_shoulder_pan_joint | hinge | angle (rad) |
| 1 | Rotation of the shoulder lifting joint | -Inf | Inf | r_shoulder_lift_joint | hinge | angle (rad) |
| 2 | Rotation of the shoulder rolling joint | -Inf | Inf | r_upper_arm_roll_joint | hinge | angle (rad) |
| 3 | Rotation of hinge joint that flexed the elbow | -Inf | Inf | r_elbow_flex_joint | hinge | angle (rad) |
| 4 | Rotation of hinge that rolls the forearm | -Inf | Inf | r_forearm_roll_joint | hinge | angle (rad) |
| 5 | Rotation of flexing the wrist | -Inf | Inf | r_wrist_flex_joint | hinge | angle (rad) |
| 6 | Rotation of rolling the wrist | -Inf | Inf | r_wrist_roll_joint | hinge | angle (rad) |
| 7 | Rotational velocity of the panning the shoulder | -Inf | Inf | r_shoulder_pan_joint | hinge | angular velocity (rad/s) |
| 8 | Rotational velocity of the shoulder lifting joint | -Inf | Inf | r_shoulder_lift_joint | hinge | angular velocity (rad/s) |
| 9 | Rotational velocity of the shoulder rolling joint | -Inf | Inf | r_upper_arm_roll_joint | hinge | angular velocity (rad/s) |
| 10 | Rotational velocity of hinge joint that flexed the elbow | -Inf | Inf | r_elbow_flex_joint | hinge | angular velocity (rad/s) |
| 11 | Rotational velocity of hinge that rolls the forearm | -Inf | Inf | r_forearm_roll_joint | hinge | angular velocity (rad/s) |
| 12 | Rotational velocity of flexing the wrist | -Inf | Inf | r_wrist_flex_joint | hinge | angular velocity (rad/s) |
| 13 | Rotational velocity of rolling the wrist | -Inf | Inf | r_wrist_roll_joint | hinge | angular velocity (rad/s) |
| 14 | x-coordinate of the fingertip of the pusher | -Inf | Inf | tips_arm | slide | position (m) |
| 15 | y-coordinate of the fingertip of the pusher | -Inf | Inf | tips_arm | slide | position (m) |
| 16 | z-coordinate of the fingertip of the pusher | -Inf | Inf | tips_arm | slide | position (m) |
| 17 | x-coordinate of the object to be moved | -Inf | Inf | object (obj_slidex) | slide | position (m) |
| 18 | y-coordinate of the object to be moved | -Inf | Inf | object (obj_slidey) | slide | position (m) |
| 19 | z-coordinate of the object to be moved | -Inf | Inf | object | cylinder | position (m) |
| 20 | x-coordinate of the goal position of the object | -Inf | Inf | goal (goal_slidex) | slide | position (m) |
| 21 | y-coordinate of the goal position of the object | -Inf | Inf | goal (goal_slidey) | slide | position (m) |
| 22 | z-coordinate of the goal position of the object | -Inf | Inf | goal | sphere | position (m) |
### Rewards
The reward consists of two parts:
- *reward_near *: This reward is a measure of how far the *fingertip*
of the pusher (the unattached end) is from the object, with a more negative
value assigned for when the pusher's *fingertip* is further away from the
target. It is calculated as the negative vector norm of (position of
the fingertip - position of target), or *-norm("fingertip" - "target")*.
- *reward_dist *: This reward is a measure of how far the object is from
the target goal position, with a more negative value assigned for object is
further away from the target. It is calculated as the negative vector norm of
(position of the object - position of goal), or *-norm("object" - "target")*.
- *reward_control*: A negative reward for penalising the pusher if
it takes actions that are too large. It is measured as the negative squared
Euclidean norm of the action, i.e. as *- sum(action<sup>2</sup>)*.
The total reward returned is ***reward*** *=* *reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near*
Unlike other environments, Pusher does not allow you to specify weights for the individual reward terms.
However, `info` does contain the keys *reward_dist* and *reward_ctrl*. Thus, if you'd like to weight the terms,
you should create a wrapper that computes the weighted reward from `info`.
### Starting State
All pusher (not including object and goal) states start in
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0). A uniform noise in the range
[-0.005, 0.005] is added to the velocity attributes only. The velocities of
the object and goal are permanently set to 0. The object's x-position is selected uniformly
between [-0.3, 0] while the y-position is selected uniformly between [-0.2, 0.2], and this
process is repeated until the vector norm between the object's (x,y) position and origin is not greater
than 0.17. The goal always have the same position of (0.45, -0.05, -0.323).
The default framerate is 5 with each frame lasting for 0.01, giving rise to a *dt = 5 * 0.01 = 0.05*
### Episode End
The episode ends when any of the following happens:
1. Truncation: The episode duration reaches a 100 timesteps.
2. Termination: Any of the state space values is no longer finite.
### Arguments
No additional arguments are currently supported (in v2 and lower),
but modifications can be made to the XML file in the assets folder
(or by changing the path to a modified XML file in another folder)..
```
env = gym.make('Pusher-v4')
```
There is no v3 for Pusher, unlike the robot environments where a v3 and
beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc.
### Version History
* v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3
* v2: All continuous control environments now use mujoco_py >= 1.50
* v1: max_time_steps raised to 1000 for robot based tasks (not including reacher, which has a max_time_steps of 50). Added reward_threshold to environments.
* v0: Initial versions release (1.0.0)
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 20,
}
def __init__(self, **kwargs):
utils.EzPickle.__init__(self, **kwargs)
observation_space = Box(low=-np.inf, high=np.inf, shape=(23,), dtype=np.float64)
MujocoEnv.__init__(
self, "pusher.xml", 5, observation_space=observation_space, **kwargs
)
def step(self, a):
vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm")
vec_2 = self.get_body_com("object") - self.get_body_com("goal")
reward_near = -np.linalg.norm(vec_1)
reward_dist = -np.linalg.norm(vec_2)
reward_ctrl = -np.square(a).sum()
reward = reward_dist + 0.1 * reward_ctrl + 0.5 * reward_near
self.do_simulation(a, self.frame_skip)
if self.render_mode == "human":
self.render()
ob = self._get_obs()
return (
ob,
reward,
False,
False,
dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl),
)
def viewer_setup(self):
assert self.viewer is not None
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.0
def reset_model(self):
qpos = self.init_qpos
self.goal_pos = np.asarray([0, 0])
while True:
self.cylinder_pos = np.concatenate(
[
self.np_random.uniform(low=-0.3, high=0, size=1),
self.np_random.uniform(low=-0.2, high=0.2, size=1),
]
)
if np.linalg.norm(self.cylinder_pos - self.goal_pos) > 0.17:
break
qpos[-4:-2] = self.cylinder_pos
qpos[-2:] = self.goal_pos
qvel = self.init_qvel + self.np_random.uniform(
low=-0.005, high=0.005, size=self.model.nv
)
qvel[-4:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate(
[
self.data.qpos.flat[:7],
self.data.qvel.flat[:7],
self.get_body_com("tips_arm"),
self.get_body_com("object"),
self.get_body_com("goal"),
]
)
| PusherEnv |
python | eventlet__eventlet | eventlet/websocket.py | {
"start": 1555,
"end": 1788
} | class ____(Exception):
def __init__(self, status='400 Bad Request', body=None, headers=None):
super(Exception, self).__init__()
self.status = status
self.body = body
self.headers = headers
| BadRequest |
python | pytorch__pytorch | test/nn/test_module_hooks.py | {
"start": 595,
"end": 979
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.seq1 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
self.seq2 = nn.Sequential(*[nn.Linear(10, 10) for _ in range(2)])
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.seq2(self.seq1(x))
ToyNamedTuple = namedtuple("ToyNamedTuple", "content")
| Net |
python | kubernetes-client__python | kubernetes/client/models/v1_controller_revision.py | {
"start": 383,
"end": 7835
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'data': 'object',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'revision': 'int'
}
attribute_map = {
'api_version': 'apiVersion',
'data': 'data',
'kind': 'kind',
'metadata': 'metadata',
'revision': 'revision'
}
def __init__(self, api_version=None, data=None, kind=None, metadata=None, revision=None, local_vars_configuration=None): # noqa: E501
"""V1ControllerRevision - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._data = None
self._kind = None
self._metadata = None
self._revision = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if data is not None:
self.data = data
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.revision = revision
@property
def api_version(self):
"""Gets the api_version of this V1ControllerRevision. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ControllerRevision. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ControllerRevision.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ControllerRevision. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def data(self):
"""Gets the data of this V1ControllerRevision. # noqa: E501
Data is the serialized representation of the state. # noqa: E501
:return: The data of this V1ControllerRevision. # noqa: E501
:rtype: object
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this V1ControllerRevision.
Data is the serialized representation of the state. # noqa: E501
:param data: The data of this V1ControllerRevision. # noqa: E501
:type: object
"""
self._data = data
@property
def kind(self):
"""Gets the kind of this V1ControllerRevision. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ControllerRevision. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ControllerRevision.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ControllerRevision. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ControllerRevision. # noqa: E501
:return: The metadata of this V1ControllerRevision. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ControllerRevision.
:param metadata: The metadata of this V1ControllerRevision. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def revision(self):
"""Gets the revision of this V1ControllerRevision. # noqa: E501
Revision indicates the revision of the state represented by Data. # noqa: E501
:return: The revision of this V1ControllerRevision. # noqa: E501
:rtype: int
"""
return self._revision
@revision.setter
def revision(self, revision):
"""Sets the revision of this V1ControllerRevision.
Revision indicates the revision of the state represented by Data. # noqa: E501
:param revision: The revision of this V1ControllerRevision. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and revision is None: # noqa: E501
raise ValueError("Invalid value for `revision`, must not be `None`") # noqa: E501
self._revision = revision
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ControllerRevision):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ControllerRevision):
return True
return self.to_dict() != other.to_dict()
| V1ControllerRevision |
python | huggingface__transformers | src/transformers/models/autoformer/modeling_autoformer.py | {
"start": 12407,
"end": 15298
} | class ____(nn.Module):
"""
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
"""
def __init__(self, config: AutoformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
def forward(
self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor] = None
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
return data, loc, scale
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average
def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:
"""
Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
Args:
input_tensor (`torch.FloatTensor`):
Input tensor, of which the average must be computed.
weights (`torch.FloatTensor`, *optional*):
Weights tensor, of the same shape as `input_tensor`.
dim (`int`, *optional*):
The dim along which to average `input_tensor`.
Returns:
`torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
"""
if weights is not None:
weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
else:
return input_tensor.mean(dim=dim)
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll
def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
"""
Computes the negative log likelihood loss from input distribution with respect to target.
"""
return -input.log_prob(target)
# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Autoformer
| AutoformerNOPScaler |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 7597,
"end": 7641
} | class ____(ASTExpression):
pass
| ASTLiteral |
python | PrefectHQ__prefect | src/prefect/states.py | {
"start": 21961,
"end": 29681
} | class ____:
def __init__(self, states: list[State]) -> None:
self.states: list[State] = states
self.type_counts: dict[StateType, int] = self._get_type_counts(states)
self.total_count: int = len(states)
self.cancelled_count: int = self.type_counts[StateType.CANCELLED]
self.final_count: int = sum(state.is_final() for state in states)
self.not_final_count: int = self.total_count - self.final_count
self.paused_count: int = self.type_counts[StateType.PAUSED]
@property
def fail_count(self) -> int:
return self.type_counts[StateType.FAILED] + self.type_counts[StateType.CRASHED]
def all_completed(self) -> bool:
return self.type_counts[StateType.COMPLETED] == self.total_count
def any_cancelled(self) -> bool:
return self.cancelled_count > 0
def any_failed(self) -> bool:
return (
self.type_counts[StateType.FAILED] > 0
or self.type_counts[StateType.CRASHED] > 0
)
def any_paused(self) -> bool:
return self.paused_count > 0
def all_final(self) -> bool:
return self.final_count == self.total_count
def counts_message(self) -> str:
count_messages = [f"total={self.total_count}"]
if self.not_final_count:
count_messages.append(f"not_final={self.not_final_count}")
count_messages += [
f"{state_type.value!r}={count}"
for state_type, count in self.type_counts.items()
if count
]
return ", ".join(count_messages)
@staticmethod
def _get_type_counts(states: Iterable[State]) -> Dict[StateType, int]:
return Counter(state.type for state in states)
def __repr__(self) -> str:
return f"StateGroup<{self.counts_message()}>"
def _traced(cls: Type["State[R]"], **kwargs: Any) -> "State[R]":
state_details = StateDetails.model_validate(kwargs.pop("state_details", {}))
carrier = {}
propagate.inject(carrier)
state_details.traceparent = carrier.get("traceparent")
return cls(**kwargs, state_details=state_details)
def Scheduled(
cls: Type["State[R]"] = State,
scheduled_time: Optional[datetime.datetime] = None,
**kwargs: Any,
) -> "State[R]":
"""Convenience function for creating `Scheduled` states.
Returns:
State: a Scheduled state
"""
state_details = StateDetails.model_validate(kwargs.pop("state_details", {}))
if scheduled_time is None:
scheduled_time = now()
elif state_details.scheduled_time:
raise ValueError("An extra scheduled_time was provided in state_details")
state_details.scheduled_time = scheduled_time
return _traced(cls, type=StateType.SCHEDULED, state_details=state_details, **kwargs)
def Completed(cls: Type["State[R]"] = State, **kwargs: Any) -> "State[R]":
"""Convenience function for creating `Completed` states.
Returns:
State: a Completed state
"""
return _traced(cls, type=StateType.COMPLETED, **kwargs)
def Running(cls: Type["State[R]"] = State, **kwargs: Any) -> "State[R]":
"""Convenience function for creating `Running` states.
Returns:
State: a Running state
"""
return _traced(cls, type=StateType.RUNNING, **kwargs)
def Failed(cls: Type["State[R]"] = State, **kwargs: Any) -> "State[R]":
"""Convenience function for creating `Failed` states.
Returns:
State: a Failed state
"""
return _traced(cls, type=StateType.FAILED, **kwargs)
def Crashed(cls: Type["State[R]"] = State, **kwargs: Any) -> "State[R]":
"""Convenience function for creating `Crashed` states.
Returns:
State: a Crashed state
"""
return _traced(cls, type=StateType.CRASHED, **kwargs)
def Cancelling(cls: Type["State[R]"] = State, **kwargs: Any) -> "State[R]":
"""Convenience function for creating `Cancelling` states.
Returns:
State: a Cancelling state
"""
return _traced(cls, type=StateType.CANCELLING, **kwargs)
def Cancelled(cls: Type["State[R]"] = State, **kwargs: Any) -> "State[R]":
"""Convenience function for creating `Cancelled` states.
Returns:
State: a Cancelled state
"""
return _traced(cls, type=StateType.CANCELLED, **kwargs)
def Pending(cls: Type["State[R]"] = State, **kwargs: Any) -> "State[R]":
"""Convenience function for creating `Pending` states.
Returns:
State: a Pending state
"""
return _traced(cls, type=StateType.PENDING, **kwargs)
def Paused(
cls: Type["State[R]"] = State,
timeout_seconds: Optional[int] = None,
pause_expiration_time: Optional[datetime.datetime] = None,
reschedule: bool = False,
pause_key: Optional[str] = None,
**kwargs: Any,
) -> "State[R]":
"""Convenience function for creating `Paused` states.
Returns:
State: a Paused state
"""
state_details = StateDetails.model_validate(kwargs.pop("state_details", {}))
if state_details.pause_timeout:
raise ValueError("An extra pause timeout was provided in state_details")
if pause_expiration_time is not None and timeout_seconds is not None:
raise ValueError(
"Cannot supply both a pause_expiration_time and timeout_seconds"
)
if pause_expiration_time is None and timeout_seconds is None:
pass
else:
state_details.pause_timeout = (
pause_expiration_time
if pause_expiration_time
else now() + datetime.timedelta(seconds=timeout_seconds or 0)
)
state_details.pause_reschedule = reschedule
state_details.pause_key = pause_key
return _traced(cls, type=StateType.PAUSED, state_details=state_details, **kwargs)
def Suspended(
cls: Type["State[R]"] = State,
timeout_seconds: Optional[int] = None,
pause_expiration_time: Optional[datetime.datetime] = None,
pause_key: Optional[str] = None,
**kwargs: Any,
) -> "State[R]":
"""Convenience function for creating `Suspended` states.
Returns:
State: a Suspended state
"""
return Paused(
cls=cls,
name="Suspended",
reschedule=True,
timeout_seconds=timeout_seconds,
pause_expiration_time=pause_expiration_time,
pause_key=pause_key,
**kwargs,
)
def AwaitingRetry(
cls: Type["State[R]"] = State,
scheduled_time: Optional[datetime.datetime] = None,
**kwargs: Any,
) -> "State[R]":
"""Convenience function for creating `AwaitingRetry` states.
Returns:
State: an AwaitingRetry state
"""
return Scheduled(
cls=cls, scheduled_time=scheduled_time, name="AwaitingRetry", **kwargs
)
def AwaitingConcurrencySlot(
cls: Type["State[R]"] = State,
scheduled_time: Optional[datetime.datetime] = None,
**kwargs: Any,
) -> "State[R]":
"""Convenience function for creating `AwaitingConcurrencySlot` states.
Returns:
State: an AwaitingConcurrencySlot state
"""
return Scheduled(
cls=cls, scheduled_time=scheduled_time, name="AwaitingConcurrencySlot", **kwargs
)
def Retrying(cls: Type["State[R]"] = State, **kwargs: Any) -> "State[R]":
"""Convenience function for creating `Retrying` states.
Returns:
State: a Retrying state
"""
return _traced(cls, type=StateType.RUNNING, name="Retrying", **kwargs)
def Late(
cls: Type["State[R]"] = State,
scheduled_time: Optional[datetime.datetime] = None,
**kwargs: Any,
) -> "State[R]":
"""Convenience function for creating `Late` states.
Returns:
State: a Late state
"""
return Scheduled(cls=cls, scheduled_time=scheduled_time, name="Late", **kwargs)
| StateGroup |
python | python-excel__xlwt | xlwt/Cell.py | {
"start": 86,
"end": 546
} | class ____(object):
__slots__ = ["rowx", "colx", "xf_idx", "sst_idx"]
def __init__(self, rowx, colx, xf_idx, sst_idx):
self.rowx = rowx
self.colx = colx
self.xf_idx = xf_idx
self.sst_idx = sst_idx
def get_biff_data(self):
# return BIFFRecords.LabelSSTRecord(self.rowx, self.colx, self.xf_idx, self.sst_idx).get()
return pack('<5HL', 0x00FD, 10, self.rowx, self.colx, self.xf_idx, self.sst_idx)
| StrCell |
python | scipy__scipy | scipy/integrate/_quadpack_py.py | {
"start": 297,
"end": 52359
} | class ____(UserWarning):
"""
Warning on issues during integration.
"""
pass
@xp_capabilities(np_only=True)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50, complex_func=False):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : {function, scipy.LowLevelCallable}
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(double x)
double func(double x, void *user_data)
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
In the call forms with ``xx``, ``n`` is the length of the ``xx``
array which contains ``xx[0] == x`` and the rest of the items are
numbers contained in the ``args`` argument of quad.
In addition, certain ctypes call signatures are supported for
backward compatibility, but those should not be used in new code.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
complex_func : bool, optional
Indicate if the function's (`func`) return type is real
(``complex_func=False``: default) or complex (``complex_func=True``).
In both cases, the function's argument is real.
If full_output is also non-zero, the `infodict`, `message`, and
`explain` for the real and complex components are returned in
a dictionary with keys "real output" and "imag output".
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
numerical approximation. See `epsrel` below.
epsrel : float or int, optional
Relative error tolerance. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted. Note that this option cannot be used in conjunction
with ``weight``.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
For valid results, the integral must converge; behavior for divergent
integrals is not guaranteed.
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e., it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions, and these do not support specifying break points. The
possible values of weight and the corresponding weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For weighted integrals with finite integration limits, the integration
is performed using a Clenshaw-Curtis method, which uses Chebyshev moments.
For repeated calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
**Details of QUADPACK level routines**
`quad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. The routine called depends on
`weight`, `points` and the integration limits `a` and `b`.
================ ============== ========== =====================
QUADPACK routine `weight` `points` infinite bounds
================ ============== ========== =====================
qagse None No No
qagie None No Yes
qagpe None Yes No
qawoe 'sin', 'cos' No No
qawfe 'sin', 'cos' No either `a` or `b`
qawse 'alg*' No No
qawce 'cauchy' No No
================ ============== ========== =====================
The following provides a short description from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types. The integration is performed using a 21-point Gauss-Kronrod
quadrature within each subinterval.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
qagpe
serves the same purposes as QAGS, but also allows the
user to provide explicit information about the location
and type of trouble-spots i.e. the abscissae of internal
singularities, discontinuities and other difficulties of
the integrand function.
qawoe
is an integrator for the evaluation of
:math:`\\int^b_a \\cos(\\omega x)f(x)dx` or
:math:`\\int^b_a \\sin(\\omega x)f(x)dx`
over a finite interval [a,b], where :math:`\\omega` and :math:`f`
are specified by the user. The rule evaluation component is based
on the modified Clenshaw-Curtis technique
An adaptive subdivision scheme is used in connection
with an extrapolation procedure, which is a modification
of that in ``QAGS`` and allows the algorithm to deal with
singularities in :math:`f(x)`.
qawfe
calculates the Fourier transform
:math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or
:math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx`
for user-provided :math:`\\omega` and :math:`f`. The procedure of
``QAWO`` is applied on successive finite intervals, and convergence
acceleration by means of the :math:`\\varepsilon`-algorithm is applied
to the series of integral approximations.
qawse
approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where
:math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with
:math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the
following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`,
:math:`\\log(x-a)\\log(b-x)`.
The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the
function :math:`v`. A globally adaptive subdivision strategy is
applied, with modified Clenshaw-Curtis integration on those
subintervals which contain `a` or `b`.
qawce
compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be
interpreted as a Cauchy principal value integral, for user specified
:math:`c` and :math:`f`. The strategy is globally adaptive. Modified
Clenshaw-Curtis integration is used on those intervals containing the
point :math:`x = c`.
**Integration of Complex Function of a Real Variable**
A complex valued function, :math:`f`, of a real variable can be written as
:math:`f = g + ih`. Similarly, the integral of :math:`f` can be
written as
.. math::
\\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx
assuming that the integrals of :math:`g` and :math:`h` exist
over the interval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates
complex-valued functions by integrating the real and imaginary components
separately.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
.. [2] McCullough, Thomas; Phillips, Keith (1973).
Foundations of Analysis in the Complex Plane.
Holt Rinehart Winston.
ISBN 0-03-086370-8
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> import numpy as np
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3`
>>> f = lambda x, a: a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
Be aware that pulse shapes and other sharp features as compared to the
size of the integration interval may not be integrated correctly using
this method. A simplified example of this limitation is integrating a
y-axis reflected step function with many zero values within the integrals
bounds.
>>> y = lambda x: 1 if x<=0 else 0
>>> integrate.quad(y, -1, 1)
(1.0, 1.1102230246251565e-14)
>>> integrate.quad(y, -1, 100)
(1.0000000002199108, 1.0189464580163188e-08)
>>> integrate.quad(y, -1, 10000)
(0.0, 0.0)
"""
if not isinstance(args, tuple):
args = (args,)
# Shortcut for empty interval, also works for improper integrals.
if a == b:
if full_output == 0:
return (0., 0.)
else:
infodict = {"neval": 0, "last": 0,
"alist": np.full(limit, np.nan, dtype=np.float64),
"blist": np.full(limit, np.nan, dtype=np.float64),
"rlist": np.zeros(limit, dtype=np.float64),
"elist": np.zeros(limit, dtype=np.float64),
"iord" : np.zeros(limit, dtype=np.int32)}
if complex_func:
return (0.+0.j, 0.+0.j, {"real": infodict, "imag": infodict})
else:
return (0., 0., infodict)
# check the limits of integration: \int_a^b, expect a < b
flip, a, b = b < a, min(a, b), max(a, b)
if complex_func:
def imfunc(x, *args):
return func(x, *args).imag
def refunc(x, *args):
return func(x, *args).real
re_retval = quad(refunc, a, b, args, full_output, epsabs,
epsrel, limit, points, weight, wvar, wopts,
maxp1, limlst, complex_func=False)
im_retval = quad(imfunc, a, b, args, full_output, epsabs,
epsrel, limit, points, weight, wvar, wopts,
maxp1, limlst, complex_func=False)
integral = re_retval[0] + 1j*im_retval[0]
error_estimate = re_retval[1] + 1j*im_retval[1]
retval = integral, error_estimate
if full_output:
msgexp = {}
msgexp["real"] = re_retval[2:]
msgexp["imag"] = im_retval[2:]
retval = retval + (msgexp,)
return retval
if weight is None:
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
if points is not None:
msg = ("Break points cannot be specified when using weighted integrand.\n"
"Continuing, ignoring specified points.")
warnings.warn(msg, IntegrationWarning, stacklevel=2)
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
if flip:
retval = (-retval[0],) + retval[1:]
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: f"The maximum number of subdivisions ({limit}) has been achieved.\n "
f"If increasing the limit yields no improvement it is advised to "
f"analyze \n the integrand in order to determine the difficulties. "
f"If the position of a \n local difficulty can be determined "
f"(singularity, discontinuity) one will \n probably gain from "
f"splitting up the interval and calling the integrator \n on the "
f"subranges. Perhaps a special-purpose integrator should be used.",
2: "The occurrence of roundoff error is detected, which prevents \n "
"the requested tolerance from being achieved. "
"The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n "
"integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n "
"in the extrapolation table. It is assumed that the requested "
"tolerance\n cannot be achieved, and that the returned result "
"(if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n "
"and error are less reliable. It is assumed that the requested "
"accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == np.inf or a == -np.inf):
msgs[1] = (
"The maximum number of cycles allowed has been achieved., e.e.\n of "
"subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n "
"*pi/abs(omega), for k = 1, 2, ..., lst. "
"One can allow more cycles by increasing the value of limlst. "
"Look at info['ierlst'] with full_output=1."
)
msgs[4] = (
"The extrapolation table constructed for convergence acceleration\n of "
"the series formed by the integral contributions over the cycles, \n does "
"not converge to within the requested accuracy. "
"Look at \n info['ierlst'] with full_output=1."
)
msgs[7] = (
"Bad integrand behavior occurs within one or more of the cycles.\n "
"Location and type of the difficulty involved can be determined from \n "
"the vector info['ierlist'] obtained with full_output=1."
)
explain = {1: "The maximum number of subdivisions (= limit) has been \n "
"achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n "
"the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n "
"this cycle.",
4: "The integral over this cycle does not converge (to within the "
"required accuracy) due to roundoff in the extrapolation "
"procedure invoked on this cycle. It is assumed that the result "
"on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or "
"slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos', 'sin'] and (b == np.inf or a == -np.inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning, stacklevel=2)
return retval[:-1]
elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
if epsabs <= 0: # Small error tolerance - applies to all methods
if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
" 5e-29 and 50*(machine epsilon).")
elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == np.inf):
msg = ("Sine or cosine weighted integrals with infinite domain"
" must have 'epsabs'>0.")
elif weight is None:
if points is None: # QAGSE/QAGIE
msg = ("Invalid 'limit' argument. There must be"
" at least one subinterval")
else: # QAGPE
if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
msg = ("All break points in 'points' must lie within the"
" integration limits.")
elif len(points) >= limit:
msg = (f"Number of break points ({len(points):d}) "
f"must be less than subinterval limit ({limit:d})")
else:
if maxp1 < 1:
msg = "Chebyshev moment limit maxp1 must be >=1."
elif weight in ('cos', 'sin') and abs(a+b) == np.inf: # QAWFE
msg = "Cycle limit limlst must be >=3."
elif weight.startswith('alg'): # QAWSE
if min(wvar) < -1:
msg = "wvar parameters (alpha, beta) must both be >= -1."
if b < a:
msg = "Integration limits a, b must satistfy a<b."
elif weight == 'cauchy' and wvar in (a, b):
msg = ("Parameter 'wvar' must not equal"
" integration limits 'a' or 'b'.")
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != np.inf and a != -np.inf):
pass # standard integration
elif (b == np.inf and a != -np.inf):
infbounds = 1
bound = a
elif (b == np.inf and a == -np.inf):
infbounds = 2
bound = 0 # ignored
elif (b != np.inf and a == -np.inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func, bound, infbounds, args, full_output,
epsabs, epsrel, limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
#Duplicates force function evaluation at singular points
the_points = np.unique(points)
the_points = the_points[a < the_points]
the_points = the_points[the_points < b]
the_points = np.concatenate((the_points, (0., 0.)))
return _quadpack._qagpe(func, a, b, the_points, args, full_output,
epsabs, epsrel, limit)
def _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1,weight, wvar, wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError(f"{weight} not a recognized weighting function.")
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != np.inf and a != -np.inf): # finite limits
if wopts is None: # no precomputed Chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed Chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args,
full_output,epsabs, epsrel, limit, maxp1, 2,
momcom, chebcom)
elif (b == np.inf and a != -np.inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs, limlst, limit, maxp1)
elif (b != np.inf and a == -np.inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-np.inf, np.inf] or b in [-np.inf, np.inf]:
message = "Cannot integrate with this weight over an infinite interval."
raise ValueError(message)
if weight.startswith('alg'):
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
@xp_capabilities(np_only=True)
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : callable or float
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8. ``dblquad`` tries to obtain
an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
to ``hfun(x)``, and ``result`` is the numerical approximation.
See `epsrel` below.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
and ``50 * (machine epsilon)``. See `epsabs` above.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
simpson : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
For valid results, the integral must converge; behavior for divergent
integrals is not guaranteed.
**Details of QUADPACK level routines**
`quad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. For each level of integration, ``qagse``
is used for finite limits or ``qagie`` is used if either limit (or both!)
are infinite. The following provides a short description from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types. The integration is is performed using a 21-point Gauss-Kronrod
quadrature within each subinterval.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
Examples
--------
Compute the double integral of ``x * y**2`` over the box
``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 \\,dy \\,dx`.
>>> import numpy as np
>>> from scipy import integrate
>>> f = lambda y, x: x*y**2
>>> integrate.dblquad(f, 0, 2, 0, 1)
(0.6666666666666667, 7.401486830834377e-15)
Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1
\\,dy \\,dx`.
>>> f = lambda y, x: 1
>>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos)
(0.41421356237309503, 1.1083280054755938e-14)
Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx`
for :math:`a=1, 3`.
>>> f = lambda y, x, a: a*x*y
>>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
(0.33333333333333337, 5.551115123125783e-15)
>>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
(0.9999999999999999, 1.6653345369377348e-14)
Compute the two-dimensional Gaussian Integral, which is the integral of the
Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over
:math:`(-\\infty,+\\infty)`. That is, compute the integral
:math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`.
>>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2))
>>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)
(3.141592653589777, 2.5173086737433208e-08)
"""
def temp_ranges(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
return nquad(func, [temp_ranges, [a, b]], args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
@xp_capabilities(np_only=True)
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function or float
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result
or a float indicating a constant boundary curve.
hfun : function or float
The upper boundary curve in y (same requirements as `gfun`).
qfun : function or float
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float or a float
indicating a constant boundary surface.
rfun : function or float
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad : Adaptive quadrature using QUADPACK
fixed_quad : Fixed-order Gaussian quadrature
dblquad : Double integrals
nquad : N-dimensional integrals
romb : Integrators for sampled data
simpson : Integrators for sampled data
scipy.special : For coefficients and roots of orthogonal polynomials
Notes
-----
For valid results, the integral must converge; behavior for divergent
integrals is not guaranteed.
**Details of QUADPACK level routines**
`quad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. For each level of integration, ``qagse``
is used for finite limits or ``qagie`` is used, if either limit (or both!)
are infinite. The following provides a short description from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types. The integration is is performed using a 21-point Gauss-Kronrod
quadrature within each subinterval.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
Examples
--------
Compute the triple integral of ``x * y * z``, over ``x`` ranging
from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z
\\,dz \\,dy \\,dx`.
>>> import numpy as np
>>> from scipy import integrate
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1)
(1.8749999999999998, 3.3246447942574074e-14)
Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0}
\\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`.
Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f``
takes arguments in the order (z, y, x).
>>> f = lambda z, y, x: x*y*z
>>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y)
(0.05416666666666668, 2.1774196738157757e-14)
Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0}
a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`.
>>> f = lambda z, y, x, a: a*x*y*z
>>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,))
(0.125, 5.527033708952211e-15)
>>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,))
(0.375, 1.6581101126856635e-14)
Compute the three-dimensional Gaussian Integral, which is the integral of
the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over
:math:`(-\\infty,+\\infty)`. That is, compute the integral
:math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz
\\,dy\\,dx`.
>>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2))
>>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf)
(5.568327996830833, 4.4619078828029765e-08)
"""
# f(z, y, x)
# qfun/rfun(x, y)
# gfun/hfun(x)
# nquad will hand (y, x, t0, ...) to ranges0
# nquad will hand (x, t0, ...) to ranges1
# Only qfun / rfun is different API...
def ranges0(*args):
return [qfun(args[1], args[0]) if callable(qfun) else qfun,
rfun(args[1], args[0]) if callable(rfun) else rfun]
def ranges1(*args):
return [gfun(args[0]) if callable(gfun) else gfun,
hfun(args[0]) if callable(hfun) else hfun]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args,
opts={"epsabs": epsabs, "epsrel": epsrel})
@xp_capabilities(np_only=True)
def nquad(func, ranges, args=None, opts=None, full_output=False):
r"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : {callable, scipy.LowLevelCallable}
The function to be integrated. Has arguments of ``x0, ..., xn``,
``t0, ..., tm``, where integration is carried out over ``x0, ..., xn``,
which must be floats. Where ``t0, ..., tm`` are extra arguments
passed in args.
Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
Integration is carried out in order. That is, integration over ``x0``
is the innermost integral, and ``xn`` is the outermost.
If the user desires improved integration performance, then `f` may
be a `scipy.LowLevelCallable` with one of the signatures::
double func(int n, double *xx)
double func(int n, double *xx, void *user_data)
where ``n`` is the number of variables and args. The ``xx`` array
contains the coordinates and extra arguments. ``user_data`` is the data
contained in the `scipy.LowLevelCallable`.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available,
as well as any parametric arguments. e.g., if
``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by ``func``, ``ranges``,
and ``opts``.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quad are used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.,
``opts[0]`` corresponds to integration over ``x0``, and so on. If a
callable, the signature must be the same as for ``ranges``. The
available options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
For more information on these options, see `quad`.
full_output : bool, optional
Partial implementation of ``full_output`` from scipy.integrate.quad.
The number of integrand function evaluations ``neval`` can be obtained
by setting ``full_output=True`` when calling nquad.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
out_dict : dict, optional
A dict containing additional information on the integration.
See Also
--------
quad : 1-D numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
Notes
-----
For valid results, the integral must converge; behavior for divergent
integrals is not guaranteed.
**Details of QUADPACK level routines**
`nquad` calls routines from the FORTRAN library QUADPACK. This section
provides details on the conditions for each routine to be called and a
short description of each routine. The routine called depends on
`weight`, `points` and the integration limits `a` and `b`.
================ ============== ========== =====================
QUADPACK routine `weight` `points` infinite bounds
================ ============== ========== =====================
qagse None No No
qagie None No Yes
qagpe None Yes No
qawoe 'sin', 'cos' No No
qawfe 'sin', 'cos' No either `a` or `b`
qawse 'alg*' No No
qawce 'cauchy' No No
================ ============== ========== =====================
The following provides a short description from [1]_ for each
routine.
qagse
is an integrator based on globally adaptive interval
subdivision in connection with extrapolation, which will
eliminate the effects of integrand singularities of
several types. The integration is is performed using a 21-point Gauss-Kronrod
quadrature within each subinterval.
qagie
handles integration over infinite intervals. The infinite range is
mapped onto a finite interval and subsequently the same strategy as
in ``QAGS`` is applied.
qagpe
serves the same purposes as QAGS, but also allows the
user to provide explicit information about the location
and type of trouble-spots i.e. the abscissae of internal
singularities, discontinuities and other difficulties of
the integrand function.
qawoe
is an integrator for the evaluation of
:math:`\int^b_a \cos(\omega x)f(x)dx` or
:math:`\int^b_a \sin(\omega x)f(x)dx`
over a finite interval [a,b], where :math:`\omega` and :math:`f`
are specified by the user. The rule evaluation component is based
on the modified Clenshaw-Curtis technique
An adaptive subdivision scheme is used in connection
with an extrapolation procedure, which is a modification
of that in ``QAGS`` and allows the algorithm to deal with
singularities in :math:`f(x)`.
qawfe
calculates the Fourier transform
:math:`\int^\infty_a \cos(\omega x)f(x)dx` or
:math:`\int^\infty_a \sin(\omega x)f(x)dx`
for user-provided :math:`\omega` and :math:`f`. The procedure of
``QAWO`` is applied on successive finite intervals, and convergence
acceleration by means of the :math:`\varepsilon`-algorithm is applied
to the series of integral approximations.
qawse
approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where
:math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with
:math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the
following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`,
:math:`\log(x-a)\log(b-x)`.
The user specifies :math:`\alpha`, :math:`\beta` and the type of the
function :math:`v`. A globally adaptive subdivision strategy is
applied, with modified Clenshaw-Curtis integration on those
subintervals which contain `a` or `b`.
qawce
compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be
interpreted as a Cauchy principal value integral, for user specified
:math:`c` and :math:`f`. The strategy is globally adaptive. Modified
Clenshaw-Curtis integration is used on those intervals containing the
point :math:`x = c`.
References
----------
.. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
Überhuber, Christoph W.; Kahaner, David (1983).
QUADPACK: A subroutine package for automatic integration.
Springer-Verlag.
ISBN 978-3-540-12553-2.
Examples
--------
Calculate
.. math::
\int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0}
f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 ,
where
.. math::
f(x_0, x_1, x_2, x_3) = \begin{cases}
x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\
x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0)
\end{cases} .
>>> import numpy as np
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}], full_output=True)
(1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
Calculate
.. math::
\int^{t_0+t_1+1}_{t_0+t_1-1}
\int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1}
\int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1}
f(x_0,x_1, x_2,t_0,t_1)
\,dx_0 \,dx_1 \,dx_2,
where
.. math::
f(x_0, x_1, x_2, t_0, t_1) = \begin{cases}
x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\
x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0)
\end{cases}
and :math:`(t_0, t_1) = (0, 1)` .
>>> def func2(x0, x1, x2, t0, t1):
... return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, t0, t1):
... return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1]
>>> def lim1(x2, t0, t1):
... return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1]
>>> def lim2(t0, t1):
... return [t0 + t1 - 1, t0 + t1 + 1]
>>> def opts0(x1, x2, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, t0, t1):
... return {}
>>> def opts2(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1),
... opts=[opts0, opts1, opts2])
(36.099919226771625, 1.8546948553373528e-07)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts, full_output).integrate(*args)
| IntegrationWarning |
python | pydata__xarray | xarray/coding/common.py | {
"start": 482,
"end": 596
} | class ____(RuntimeWarning):
"""Warnings about encoding/decoding issues in serialization."""
| SerializationWarning |
python | jazzband__django-polymorphic | src/polymorphic/contrib/extra_views.py | {
"start": 1946,
"end": 2483
} | class ____(PolymorphicFormSetMixin, extra_views.ModelFormSetView):
"""
A view that displays a single polymorphic formset.
.. code-block:: python
from polymorphic.formsets import PolymorphicFormSetChild
class ItemsView(PolymorphicFormSetView):
model = Item
formset_children = [
PolymorphicFormSetChild(ItemSubclass1),
PolymorphicFormSetChild(ItemSubclass2),
]
"""
formset_class = BasePolymorphicModelFormSet
| PolymorphicFormSetView |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 9787,
"end": 10131
} | class ____(object):
_gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE)
def tokenize(self, value, token):
match = self._gherkin_prefix.match(value)
if not match:
return [(value, token)]
end = match.end()
return [(value[:end], GHERKIN), (value[end:], token)]
| GherkinTokenizer |
python | PrefectHQ__prefect | tests/runtime/test_flow_run.py | {
"start": 6812,
"end": 7680
} | class ____:
async def test_scheduled_start_time_is_attribute(self):
assert "scheduled_start_time" in dir(flow_run)
async def test_scheduled_start_time_is_timestamp_when_not_set(self):
assert isinstance(flow_run.scheduled_start_time, datetime.datetime)
async def test_scheduled_start_time_pulls_from_api_when_needed(
self, monkeypatch: pytest.MonkeyPatch, prefect_client: PrefectClient
):
TIMESTAMP = now("UTC") + datetime.timedelta(days=7)
run = await prefect_client.create_flow_run(
flow=flow(lambda: None, name="test"),
state=states.Scheduled(scheduled_time=TIMESTAMP),
)
assert flow_run.scheduled_start_time != TIMESTAMP
monkeypatch.setenv(name="PREFECT__FLOW_RUN_ID", value=str(run.id))
assert flow_run.scheduled_start_time == TIMESTAMP
| TestStartTime |
python | lepture__authlib | authlib/oauth2/rfc9101/errors.py | {
"start": 178,
"end": 387
} | class ____(OAuth2Error):
error = "invalid_request_uri"
description = "The request_uri in the authorization request returns an error or contains invalid data."
status_code = 400
| InvalidRequestUriError |
python | apache__airflow | airflow-core/tests/unit/ti_deps/deps/test_dagrun_exists_dep.py | {
"start": 1095,
"end": 1825
} | class ____:
@patch("airflow.models.DagRun.find", return_value=())
def test_dagrun_doesnt_exist(self, mock_dagrun_find):
"""
Task instances without dagruns should fail this dep
"""
dag = DAG("test_dag", schedule=None, max_active_runs=2)
dagrun = DagRun(state=State.QUEUED)
ti = Mock(task=Mock(dag=dag), get_dagrun=Mock(return_value=dagrun))
assert not DagrunRunningDep().is_met(ti=ti)
def test_dagrun_exists(self):
"""
Task instances with a dagrun should pass this dep
"""
dagrun = DagRun(state=State.RUNNING)
ti = Mock(get_dagrun=Mock(return_value=dagrun))
assert DagrunRunningDep().is_met(ti=ti)
| TestDagrunRunningDep |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/unicode_decode_op_test.py | {
"start": 3310,
"end": 20330
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testScalarDecode(self):
text = constant_op.constant(u"仅今年前".encode("utf-8"))
chars = ragged_string_ops.unicode_decode(text, "utf-8")
self.assertAllEqual(chars, [ord(c) for c in u"仅今年前"])
def testScalarDecodeWithOffset(self):
text = constant_op.constant(u"仅今年前".encode("utf-8"))
chars, starts = ragged_string_ops.unicode_decode_with_offsets(text, "utf-8")
self.assertAllEqual(chars, [ord(c) for c in u"仅今年前"])
self.assertAllEqual(starts, [0, 3, 6, 9])
def testVectorDecode(self):
text = constant_op.constant([u"仅今年前".encode("utf-8"), b"hello"])
chars = ragged_string_ops.unicode_decode(text, "utf-8")
expected_chars = [[ord(c) for c in u"仅今年前"],
[ord(c) for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
def testVectorDecodeWithOffset(self):
text = constant_op.constant([u"仅今年前".encode("utf-8"), b"hello"])
chars, starts = ragged_string_ops.unicode_decode_with_offsets(text, "utf-8")
expected_chars = [[ord(c) for c in u"仅今年前"],
[ord(c) for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
self.assertAllEqual(starts, [[0, 3, 6, 9], [0, 1, 2, 3, 4]])
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": [[[u"😊"], [u"🤠🧐"]], [[u"🤓👻🤖"]]], "ragged_rank": 1},
{"texts": [[[u"😊"], [u"🤠🧐"]], [[u"🤓"], [u"👻"]]], "ragged_rank": 0},
{"texts": []}
]) # pyformat: disable
def testBasicDecode(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode(input_tensor, "UTF-8")
expected = _nested_codepoints(texts)
self.assertAllEqual(expected, result)
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicDecodeWithOffsets(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode_with_offsets(
input_tensor, "UTF-8")
expected_codepoints = _nested_codepoints(texts)
expected_offsets = _nested_offsets(texts, "UTF-8")
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
def testDocstringExamples(self):
texts = [s.encode("utf8") for s in [u"G\xf6\xf6dnight", u"\U0001f60a"]]
codepoints1 = ragged_string_ops.unicode_decode(texts, "UTF-8")
codepoints2, offsets = ragged_string_ops.unicode_decode_with_offsets(
texts, "UTF-8")
self.assertAllEqual(
codepoints1, [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
self.assertAllEqual(
codepoints2, [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
self.assertAllEqual(offsets, [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]])
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=_make_sparse_tensor(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1],
[1, 2], [1, 3], [1, 4], [3, 0]],
values=[72, 101, 108, 108, 111, 119, 111, 114, 108, 100, 128077],
dense_shape=[4, 5])),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=_make_sparse_tensor(
indices=[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[0, 1, 3], [0, 1, 4], [1, 1, 0]],
values=[72, 105, 116, 104, 101, 114, 101, 128522],
dense_shape=[2, 2, 5])),
dict(
texts=[],
expected=_make_sparse_tensor(np.zeros([0, 2], np.int64), [], [0, 0])),
])
def testDecodeWithSparseOutput(self, texts, expected):
input_tensor = np.array(_nested_encode(texts, "UTF-8"), dtype=bytes)
result = ragged_string_ops.unicode_decode(input_tensor, "UTF-8").to_sparse()
self.assertIsInstance(result, sparse_tensor.SparseTensor)
self.assertAllEqual(expected.indices, result.indices)
self.assertAllEqual(expected.values, result.values)
self.assertAllEqual(expected.dense_shape, result.dense_shape)
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=[[72, 101, 108, 108, 111], [119, 111, 114, 108, 100],
[-1, -1, -1, -1, -1], [0x1F44D, -1, -1, -1, -1]]),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=[[[72, 105, -1, -1, -1], [116, 104, 101, 114, 101]],
[[-1, -1, -1, -1, -1], [128522, -1, -1, -1, -1]]],
ragged_rank=0),
dict(
texts=[["Hi", "there", ""], [u"😊"]],
expected=[[[72, 105, -1, -1, -1],
[116, 104, 101, 114, 101],
[-1, -1, -1, -1, -1]],
[[128522, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1]]]),
dict(
texts=[[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]],
expected=[
[[[128522, -1, -1], [129312, 129488, -1]],
[[-1, -1, -1], [-1, -1, -1]]],
[[[129299, 128123, 129302], [-1, -1, -1]],
[[-1, -1, -1], [-1, -1, -1]]]]),
dict(texts=[], expected=np.zeros([0, 0], np.int64)),
]) # pyformat: disable
def testDecodeWithPaddedOutput(self, texts, expected, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode(
input_tensor, "UTF-8").to_tensor(default_value=-1)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[0xFFFD],
[ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0xFFFD, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[0], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world",
# C1 control characters are not replaced.
u"\x80\x9f".encode()],
input_encoding="UTF-8",
replace_control_characters=True,
expected=[[0xFFFD],
[ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[61, 61, 65533, 61, 61],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')],
[0x80, 0x9F]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world",
u"\u0080\u009f".encode()],
input_encoding="UTF-8",
replace_control_characters=True,
replacement_char=0,
expected=[[0], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')],
[0x80, 0x9F]]),
dict(
input=[b"\xed\xa0\xbd", # single high surrogate
b"\xed\xb8\x80", # single low surrogate
b"\xed\xa0\xbd\xed\xb8\x80"], # surrogate pair
input_encoding="UTF-8",
expected=[[0xFFFD, 0xFFFD, 0xFFFD],
[0xFFFD, 0xFFFD, 0xFFFD],
[0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD, 0xFFFD]]),
dict(
input=["\ufdd0".encode(), # noncharacter
"\ufffe\uffff".encode(), # last two in BMP
"\U0010ffff".encode(), # last in plane 16 = last in Unicode
b"\xc0\x80", # overlong U+0000 encoding
b"\xf4\x90\x80\x80"], # U+110000, beyond Unicode
input_encoding="UTF-8",
expected=[[0xFDD0],
[0xFFFE, 0xFFFF],
[0x10FFFF],
[0xFFFD] * 2,
[0xFFFD] * 4]),
]) # pyformat: disable
def testErrorModes(self, expected=None, **args):
result = ragged_string_ops.unicode_decode(**args)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[0xFFFD],
[ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0xFFFD, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[0], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[], [0, 1, 2, 3, 4],
[0, 1, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
expected=[[0xFFFD],
[ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0xFFFD, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
replacement_char=0,
expected=[[0], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[0x3D, 0x3D, 0, 0x3D, 0x3D],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xD8\x01"],
input_encoding="UTF-8",
replacement_char=0x41,
expected=[[0x41, 1]],
expected_offsets=[[0, 1]]),
]) # pyformat: disable
def testErrorModesWithOffsets(self,
expected=None,
expected_offsets=None,
**args):
result = ragged_string_ops.unicode_decode_with_offsets(**args)
self.assertAllEqual(result[0], expected)
self.assertAllEqual(result[1], expected_offsets)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
("US-ASCII", [u"Hello", "world"]),
("ISO-8859-1", [u"ÀÈÓ", "AEO"]),
("SHIFT-JIS", [u"Hello", u"こんにちは"]),
)
def testDecodeWithDifferentEncodings(self, encoding, texts):
expected = _nested_codepoints(texts)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_decode(input_tensor, encoding)
self.assertAllEqual(expected, result)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
("US-ASCII", [u"Hello", "world"]),
("ISO-8859-1", [u"ÀÈÓ", "AEO"]),
("SHIFT-JIS", [u"Hello", u"こんにちは"]),
)
def testDecodeWithOffsetsWithDifferentEncodings(self, encoding, texts):
expected_codepoints = _nested_codepoints(texts)
expected_offsets = _nested_offsets(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_decode_with_offsets(
input_tensor, encoding)
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
@parameterized.parameters([
dict(input=[b"\xFEED"],
errors="strict",
input_encoding="UTF-8",
exception=errors.InvalidArgumentError,
message="Invalid formatting on input string"),
dict(input="x",
input_encoding="UTF-8",
replacement_char=11141111,
exception=errors.InvalidArgumentError,
message="replacement_char out of unicode codepoint range"),
dict(input="x",
input_encoding="UTF-8",
errors="oranguatan",
exception=(ValueError, errors.InvalidArgumentError)),
]) # pyformat: disable
def testExceptions(self, exception=None, message=None, **args):
with self.assertRaisesRegex(exception, message):
self.evaluate(ragged_string_ops.unicode_decode(**args))
def testUnknownRankError(self):
if context.executing_eagerly():
return
s = array_ops.placeholder(dtypes.string)
message = "Rank of `input` must be statically known."
with self.assertRaisesRegex(ValueError, message):
self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
@parameterized.parameters([
dict(
doc="Single string",
input=_nested_encode([u"仅今年前"], "utf-8"),
input_encoding="UTF-8",
expected_char_values=_nested_codepoints(u"仅今年前"),
expected_row_splits=[0, 4],
expected_char_to_byte_starts=[0, 3, 6, 9]),
dict(
doc="Multiple strings",
input=_nested_encode([u"仅今年前", u"你好"], "utf-8"),
input_encoding="UTF-8",
expected_char_values=_nested_codepoints(u"仅今年前你好"),
expected_row_splits=[0, 4, 6],
expected_char_to_byte_starts=[0, 3, 6, 9, 0, 3]),
dict(
doc="errors=replace",
input=b"=\xFE=",
input_encoding="UTF-8",
errors="replace",
expected_char_values=[0x3D, 0xFFFD, 0x3D],
expected_row_splits=[0, 3],
expected_char_to_byte_starts=[0, 1, 2]),
dict(
doc="errors=ignore",
input=b"=\xFE=",
input_encoding="UTF-8",
errors="ignore",
expected_char_values=[61, 61],
expected_row_splits=[0, 2],
expected_char_to_byte_starts=[0, 2]),
])
def testDecodeGenOp(self,
doc,
expected_row_splits=None,
expected_char_values=None,
expected_char_to_byte_starts=None,
**args):
"""Test for the c++ interface (gen_string_ops.unicode_decode)."""
result = gen_string_ops.unicode_decode_with_offsets(**args)
self.assertAllEqual(expected_row_splits, result.row_splits)
self.assertAllEqual(expected_char_values, result.char_values)
self.assertAllEqual(expected_char_to_byte_starts,
result.char_to_byte_starts)
@test_util.run_all_in_graph_and_eager_modes
| UnicodeDecodeTest |
python | mkdocs__mkdocs | mkdocs/structure/pages.py | {
"start": 995,
"end": 12326
} | class ____(StructureItem):
def __init__(self, title: str | None, file: File, config: MkDocsConfig) -> None:
file.page = self
self.file = file
if title is not None:
self.title = title
# Navigation attributes
self.children = None
self.previous_page = None
self.next_page = None
self.active = False
self.update_date: str = get_build_date()
self._set_canonical_url(config.get('site_url', None))
self._set_edit_url(
config.get('repo_url', None), config.get('edit_uri'), config.get('edit_uri_template')
)
# Placeholders to be filled in later in the build process.
self.markdown = None
self._title_from_render: str | None = None
self.content = None
self.toc = [] # type: ignore
self.meta = {}
def __eq__(self, other) -> bool:
return (
isinstance(other, self.__class__)
and self.title == other.title
and self.file == other.file
)
def __repr__(self):
name = self.__class__.__name__
title = f"{self.title!r}" if self.title is not None else '[blank]'
url = self.abs_url or self.file.url
return f"{name}(title={title}, url={url!r})"
markdown: str | None
"""The original Markdown content from the file."""
content: str | None
"""The rendered Markdown as HTML, this is the contents of the documentation.
Populated after `.render()`."""
toc: TableOfContents
"""An iterable object representing the Table of contents for a page. Each item in
the `toc` is an [`AnchorLink`][mkdocs.structure.toc.AnchorLink]."""
meta: MutableMapping[str, Any]
"""A mapping of the metadata included at the top of the markdown page."""
@property
def url(self) -> str:
"""The URL of the page relative to the MkDocs `site_dir`."""
url = self.file.url
if url in ('.', './'):
return ''
return url
file: File
"""The documentation [`File`][mkdocs.structure.files.File] that the page is being rendered from."""
abs_url: str | None
"""The absolute URL of the page from the server root as determined by the value
assigned to the [site_url][] configuration setting. The value includes any
subdirectory included in the `site_url`, but not the domain. [base_url][] should
not be used with this variable."""
canonical_url: str | None
"""The full, canonical URL to the current page as determined by the value assigned
to the [site_url][] configuration setting. The value includes the domain and any
subdirectory included in the `site_url`. [base_url][] should not be used with this
variable."""
@property
def active(self) -> bool:
"""When `True`, indicates that this page is the currently viewed page. Defaults to `False`."""
return self.__active
@active.setter
def active(self, value: bool):
"""Set active status of page and ancestors."""
self.__active = bool(value)
if self.parent is not None:
self.parent.active = bool(value)
@property
def is_index(self) -> bool:
return self.file.name == 'index'
edit_url: str | None
"""The full URL to the source page in the source repository. Typically used to
provide a link to edit the source page. [base_url][] should not be used with this
variable."""
@property
def is_homepage(self) -> bool:
"""Evaluates to `True` for the homepage of the site and `False` for all other pages."""
return self.is_top_level and self.is_index and self.file.url in ('.', './', 'index.html')
previous_page: Page | None
"""The [page][mkdocs.structure.pages.Page] object for the previous page or `None`.
The value will be `None` if the current page is the first item in the site navigation
or if the current page is not included in the navigation at all."""
next_page: Page | None
"""The [page][mkdocs.structure.pages.Page] object for the next page or `None`.
The value will be `None` if the current page is the last item in the site navigation
or if the current page is not included in the navigation at all."""
children: None = None
"""Pages do not contain children and the attribute is always `None`."""
is_section: bool = False
"""Indicates that the navigation object is a "section" object. Always `False` for page objects."""
is_page: bool = True
"""Indicates that the navigation object is a "page" object. Always `True` for page objects."""
is_link: bool = False
"""Indicates that the navigation object is a "link" object. Always `False` for page objects."""
def _set_canonical_url(self, base: str | None) -> None:
if base:
if not base.endswith('/'):
base += '/'
self.canonical_url = canonical_url = urljoin(base, self.url)
self.abs_url = urlsplit(canonical_url).path
else:
self.canonical_url = None
self.abs_url = None
def _set_edit_url(
self,
repo_url: str | None,
edit_uri: str | None = None,
edit_uri_template: str | None = None,
) -> None:
if not edit_uri_template and not edit_uri:
self.edit_url = None
return
src_uri = self.file.edit_uri
if src_uri is None:
self.edit_url = None
return
if edit_uri_template:
noext = posixpath.splitext(src_uri)[0]
file_edit_uri = edit_uri_template.format(path=src_uri, path_noext=noext)
else:
assert edit_uri is not None and edit_uri.endswith('/')
file_edit_uri = edit_uri + src_uri
if repo_url:
# Ensure urljoin behavior is correct
if not file_edit_uri.startswith(('?', '#')) and not repo_url.endswith('/'):
repo_url += '/'
else:
try:
parsed_url = urlsplit(file_edit_uri)
if not parsed_url.scheme or not parsed_url.netloc:
log.warning(
f"edit_uri: {file_edit_uri!r} is not a valid URL, it should include the http:// (scheme)"
)
except ValueError as e:
log.warning(f"edit_uri: {file_edit_uri!r} is not a valid URL: {e}")
self.edit_url = urljoin(repo_url or '', file_edit_uri)
def read_source(self, config: MkDocsConfig) -> None:
source = config.plugins.on_page_read_source(page=self, config=config)
if source is None:
try:
source = self.file.content_string
except OSError:
log.error(f'File not found: {self.file.src_path}')
raise
except ValueError:
log.error(f'Encoding error reading file: {self.file.src_path}')
raise
self.markdown, self.meta = meta.get_data(source)
def _set_title(self) -> None:
warnings.warn(
"_set_title is no longer used in MkDocs and will be removed soon.", DeprecationWarning
)
@weak_property
def title(self) -> str | None: # type: ignore[override]
"""
Returns the title for the current page.
Before calling `read_source()`, this value is empty. It can also be updated by `render()`.
Checks these in order and uses the first that returns a valid title:
- value provided on init (passed in from config)
- value of metadata 'title'
- content of the first H1 in Markdown content
- convert filename to title
"""
if self.markdown is None:
return None
if 'title' in self.meta:
return self.meta['title']
if self._title_from_render:
return self._title_from_render
elif self.content is None: # Preserve legacy behavior only for edge cases in plugins.
title_from_md = get_markdown_title(self.markdown)
if title_from_md is not None:
return title_from_md
if self.is_homepage:
return 'Home'
title = self.file.name.replace('-', ' ').replace('_', ' ')
# Capitalize if the filename was all lowercase, otherwise leave it as-is.
if title.lower() == title:
title = title.capitalize()
return title
def render(self, config: MkDocsConfig, files: Files) -> None:
"""Convert the Markdown source file to HTML as per the config."""
if self.markdown is None:
raise RuntimeError("`markdown` field hasn't been set (via `read_source`)")
md = markdown.Markdown(
extensions=config['markdown_extensions'],
extension_configs=config['mdx_configs'] or {},
)
raw_html_ext = _RawHTMLPreprocessor()
raw_html_ext._register(md)
extract_anchors_ext = _ExtractAnchorsTreeprocessor(self.file, files, config)
extract_anchors_ext._register(md)
relative_path_ext = _RelativePathTreeprocessor(self.file, files, config)
relative_path_ext._register(md)
extract_title_ext = _ExtractTitleTreeprocessor()
extract_title_ext._register(md)
self.content = md.convert(self.markdown)
self.toc = get_toc(getattr(md, 'toc_tokens', []))
self._title_from_render = extract_title_ext.title
self.present_anchor_ids = (
extract_anchors_ext.present_anchor_ids | raw_html_ext.present_anchor_ids
)
if log.getEffectiveLevel() > logging.DEBUG:
self.links_to_anchors = relative_path_ext.links_to_anchors
present_anchor_ids: set[str] | None = None
"""Anchor IDs that this page contains (can be linked to in this page)."""
links_to_anchors: dict[File, dict[str, str]] | None = None
"""Links to anchors in other files that this page contains.
The structure is: `{file_that_is_linked_to: {'anchor': 'original_link/to/some_file.md#anchor'}}`.
Populated after `.render()`. Populated only if `validation: {anchors: info}` (or greater) is set.
"""
def validate_anchor_links(self, *, files: Files, log_level: int) -> None:
if not self.links_to_anchors:
return
for to_file, links in self.links_to_anchors.items():
for anchor, original_link in links.items():
page = to_file.page
if page is None:
continue
if page.present_anchor_ids is None: # Page was somehow not rendered.
continue
if anchor in page.present_anchor_ids:
continue
context = ""
if to_file == self.file:
problem = "there is no such anchor on this page"
if anchor.startswith('fnref:'):
context = " This seems to be a footnote that is never referenced."
else:
problem = f"the doc '{to_file.src_uri}' does not contain an anchor '#{anchor}'"
log.log(
log_level,
f"Doc file '{self.file.src_uri}' contains a link '{original_link}', but {problem}.{context}",
)
| Page |
python | zarr-developers__zarr-python | src/zarr/codecs/numcodecs/_codecs.py | {
"start": 7706,
"end": 7773
} | class ____(_NumcodecsBytesBytesCodec, codec_name="bz2"):
pass
| BZ2 |
python | falconry__falcon | tests/test_httperror.py | {
"start": 5563,
"end": 5902
} | class ____:
def __init__(self, retry_after):
self.retry_after = retry_after
def on_get(self, req, resp):
raise falcon.HTTPContentTooLarge(
title='Request Rejected',
description='Request Body Too Large',
retry_after=self.retry_after,
)
| TemporaryRequestEntityTooLongResource |
python | celery__celery | celery/result.py | {
"start": 29885,
"end": 32695
} | class ____(ResultSet):
"""Like :class:`ResultSet`, but with an associated id.
This type is returned by :class:`~celery.group`.
It enables inspection of the tasks state and return values as
a single entity.
Arguments:
id (str): The id of the group.
results (Sequence[AsyncResult]): List of result instances.
parent (ResultBase): Parent result of this group.
"""
#: The UUID of the group.
id = None
#: List/iterator of results in the group
results = None
def __init__(self, id=None, results=None, parent=None, **kwargs):
self.id = id
self.parent = parent
super().__init__(results, **kwargs)
def _on_ready(self):
self.backend.remove_pending_result(self)
super()._on_ready()
def save(self, backend=None):
"""Save group-result for later retrieval using :meth:`restore`.
Example:
>>> def save_and_restore(result):
... result.save()
... result = GroupResult.restore(result.id)
"""
return (backend or self.app.backend).save_group(self.id, self)
def delete(self, backend=None):
"""Remove this result if it was previously saved."""
(backend or self.app.backend).delete_group(self.id)
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return self.id, self.results
def __bool__(self):
return bool(self.id or self.results)
__nonzero__ = __bool__ # Included for Py2 backwards compatibility
def __eq__(self, other):
if isinstance(other, GroupResult):
return (
other.id == self.id and
other.results == self.results and
other.parent == self.parent
)
elif isinstance(other, str):
return other == self.id
return NotImplemented
def __repr__(self):
return f'<{type(self).__name__}: {self.id} [{", ".join(r.id for r in self.results)}]>'
def __str__(self):
"""`str(self) -> self.id`."""
return str(self.id)
def __hash__(self):
"""`hash(self) -> hash(self.id)`."""
return hash(self.id)
def as_tuple(self):
return (
(self.id, self.parent and self.parent.as_tuple()),
[r.as_tuple() for r in self.results]
)
@property
def children(self):
return self.results
@classmethod
def restore(cls, id, backend=None, app=None):
"""Restore previously saved group result."""
app = app or (
cls.app if not isinstance(cls.app, property) else current_app
)
backend = backend or app.backend
return backend.restore_group(id)
@Thenable.register
| GroupResult |
python | plotly__plotly.py | plotly/graph_objs/waterfall/_stream.py | {
"start": 233,
"end": 3521
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "waterfall"
_path_str = "waterfall.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.waterfall.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/static_analysis/type_inference.py | {
"start": 4936,
"end": 6154
} | class ____(object):
"""Abstraction for the state of the CFG walk for type inference.
This is a value type. Only implements the strictly necessary operators.
Attributes:
types: Dict[qual_names.QN, Set[Type]], mapping symbols to the set of
possible types.
"""
def __init__(self, init_from=None):
if init_from:
assert isinstance(init_from, _TypeMap)
self.types = {
s: set(other_types) for s, other_types in init_from.types.items()
}
else:
self.types = {}
def __eq__(self, other):
if frozenset(self.types.keys()) != frozenset(other.types.keys()):
return False
ret = all(self.types[s] == other.types[s] for s in self.types)
return ret
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
assert isinstance(other, _TypeMap)
result = _TypeMap(self)
for s, other_types in other.types.items():
if s not in result.types:
self_types = set()
result.types[s] = self_types
else:
self_types = result.types[s]
self_types.update(other_types)
return result
def __repr__(self):
return 'SymbolTable {}'.format(self.types)
NO_VALUE = object()
| _TypeMap |
python | ray-project__ray | python/ray/train/v2/_internal/execution/failure_handling/failure_policy.py | {
"start": 146,
"end": 235
} | class ____(Enum):
RETRY = "RETRY"
RAISE = "RAISE"
NOOP = "NOOP"
| FailureDecision |
python | kamyu104__LeetCode-Solutions | Python/find-the-longest-valid-obstacle-course-at-each-position.py | {
"start": 74,
"end": 510
} | class ____(object):
def longestObstacleCourseAtEachPosition(self, obstacles):
"""
:type obstacles: List[int]
:rtype: List[int]
"""
result, stk = [], []
for x in obstacles:
i = bisect.bisect_right(stk, x)
result.append(i+1)
if i == len(stk):
stk.append(0)
stk[i] = x
return result
# Range Maximum Query
| Solution |
python | getsentry__sentry | src/sentry/api/paginator.py | {
"start": 20736,
"end": 26257
} | class ____:
"""This paginator can be used to paginate between multiple querysets.
It needs to be passed a list of CombinedQuerysetIntermediary. Each CombinedQuerysetIntermediary must be populated with a queryset and an order_by key
i.e. intermediaries = [
CombinedQuerysetIntermediary(AlertRule.objects.all(), "name")
CombinedQuerysetIntermediary(Rule.objects.all(), "label")
]
and an optional parameter `desc` to determine whether the sort is ascending or descending. Default is False.
There is an issue with sorting between multiple models using a mixture of
date fields and non-date fields. This is because the cursor value is converted differently for dates vs non-dates.
It assumes if _any_ field is a date key, all of them are.
There is an assertion in the constructor to help prevent this from manifesting.
"""
multiplier = 1000000 # Use microseconds for date keys.
using_dates = False
def __init__(self, intermediaries, desc=False, on_results=None, case_insensitive=False):
self.desc = desc
self.intermediaries = intermediaries
self.on_results = on_results
self.case_insensitive = case_insensitive
self.model_key_map = {}
for intermediary in list(self.intermediaries):
if intermediary.is_empty:
self.intermediaries.remove(intermediary)
else:
self.model_key_map[intermediary.instance_type] = intermediary.order_by
# This is an assertion to make sure date field sorts are all or nothing.###
# (i.e. all fields must be a date type, or none of them)
using_other = False
for intermediary in self.intermediaries:
if intermediary.order_by_type is datetime:
self.using_dates = True
else:
using_other = True
if self.using_dates:
assert (
not using_other
), "When sorting by a date, it must be the key used on all intermediaries"
def key_from_item(self, item):
return self.model_key_map[type(item)][0]
def _prep_value(self, item, key, for_prev):
"""
Formats values for use in the cursor
"""
value = getattr(item, key)
value_type = type(value)
if isinstance(value, float):
return math.floor(value) if self._is_asc(for_prev) else math.ceil(value)
elif value_type is str and self.case_insensitive:
return quote(value.lower())
return value
def get_item_key(self, item, for_prev=False):
if self.using_dates:
return int(
self.multiplier * float(getattr(item, self.key_from_item(item)).strftime("%s.%f"))
)
else:
return self._prep_value(item, self.key_from_item(item), for_prev)
def _is_asc(self, is_prev):
return (self.desc and is_prev) or not (self.desc or is_prev)
def _build_combined_querysets(self, is_prev):
asc = self._is_asc(is_prev)
combined_querysets = list()
for intermediary in self.intermediaries:
key = intermediary.order_by[0]
annotate = {}
if self.case_insensitive:
key = f"{key}_lower"
annotate[key] = Lower(intermediary.order_by[0])
queryset = intermediary.queryset.annotate(**annotate)
for key in intermediary.order_by:
if self.case_insensitive:
key = f"{key}_lower"
if asc:
queryset = queryset.order_by(key)
else:
queryset = queryset.order_by(f"-{key}")
combined_querysets += list(queryset)
def _sort_combined_querysets(item):
sort_keys = []
sort_keys.append(self.get_item_key(item))
if len(self.model_key_map[type(item)]) > 1:
# XXX: This doesn't do anything - it just uses a column name as the sort key. It should be pulling the
# value of the other keys out instead.
sort_keys.extend(iter(self.model_key_map[type(item)][1:]))
sort_keys.append(type(item).__name__)
return tuple(sort_keys)
combined_querysets.sort(
key=_sort_combined_querysets,
reverse=asc if is_prev else not asc,
)
return combined_querysets
def get_result(self, cursor=None, limit=100):
# offset is page #
# value is page limit
if cursor is None:
cursor = Cursor(0, 0, 0)
limit = min(limit, MAX_LIMIT)
combined_querysets = self._build_combined_querysets(cursor.is_prev)
page = int(cursor.offset)
cursor_value = int(cursor.value)
offset = page * cursor_value
stop = offset + (int(cursor_value) or limit) + 1
if offset < 0:
raise BadPaginationError("Pagination offset cannot be negative")
results = list(combined_querysets[offset:stop])
if cursor.value != limit:
results = results[-(limit + 1) :]
next_cursor = Cursor(limit, page + 1, False, len(results) > limit)
prev_cursor = Cursor(limit, page - 1, True, page > 0)
results = list(results[:limit])
if self.on_results:
results = self.on_results(results)
return CursorResult(results=results, next=next_cursor, prev=prev_cursor)
| CombinedQuerysetPaginator |
python | MongoEngine__mongoengine | tests/fields/test_int_field.py | {
"start": 106,
"end": 1892
} | class ____(MongoDBTestCase):
def test_int_validation(self):
"""Ensure that invalid values cannot be assigned to int fields."""
class Person(Document):
age = IntField(min_value=0, max_value=110)
person = Person()
person.age = 0
person.validate()
person.age = 50
person.validate()
person.age = 110
person.validate()
person.age = -1
with pytest.raises(ValidationError):
person.validate()
person.age = 120
with pytest.raises(ValidationError):
person.validate()
person.age = "ten"
with pytest.raises(ValidationError):
person.validate()
def test_ne_operator(self):
class TestDocument(Document):
int_fld = IntField()
TestDocument.drop_collection()
TestDocument(int_fld=None).save()
TestDocument(int_fld=1).save()
assert 1 == TestDocument.objects(int_fld__ne=None).count()
assert 1 == TestDocument.objects(int_fld__ne=1).count()
def test_int_field_long_field_migration(self):
class DeprecatedLongField(IntField):
"""64-bit integer field. (Equivalent to IntField since the support to Python2 was dropped)"""
def to_mongo(self, value):
return Int64(value)
class TestDocument(Document):
long = DeprecatedLongField()
TestDocument.drop_collection()
TestDocument(long=10).save()
v = TestDocument.objects().first().long
# simulate a migration to IntField
class TestDocument(Document):
long = IntField()
assert TestDocument.objects(long=10).count() == 1
assert TestDocument.objects().first().long == v
| TestIntField |
python | allegroai__clearml | clearml/backend_api/services/v2_23/projects.py | {
"start": 43012,
"end": 45767
} | class ____(Request):
"""
Deletes a project
:param project: Project ID
:type project: str
:param force: If not true, fails if project has tasks. If true, and project has
tasks, they will be unassigned
:type force: bool
:param delete_contents: If set to 'true' then the project tasks and models will
be deleted. Otherwise their project property will be unassigned. Default value
is 'false'
:type delete_contents: bool
"""
_service = "projects"
_action = "delete"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"delete_contents": {
"description": "If set to 'true' then the project tasks and models will be deleted. Otherwise their project property will be unassigned. Default value is 'false'",
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, fails if project has tasks.\n If true, and project has tasks, they will be unassigned",
"type": "boolean",
},
"project": {"description": "Project ID", "type": "string"},
},
"required": ["project"],
"type": "object",
}
def __init__(
self, project: str, force: Optional[bool] = False, delete_contents: Optional[bool] = None, **kwargs: Any
) -> None:
super(DeleteRequest, self).__init__(**kwargs)
self.project = project
self.force = force
self.delete_contents = delete_contents
@schema_property("project")
def project(self) -> str:
return self._property_project
@project.setter
def project(self, value: str) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("delete_contents")
def delete_contents(self) -> Optional[bool]:
return self._property_delete_contents
@delete_contents.setter
def delete_contents(self, value: Optional[bool]) -> None:
if value is None:
self._property_delete_contents = None
return
self.assert_isinstance(value, "delete_contents", (bool,))
self._property_delete_contents = value
| DeleteRequest |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-arize-phoenix-query-engine/llama_index/packs/arize_phoenix_query_engine/base.py | {
"start": 375,
"end": 2355
} | class ____(BaseLlamaPack):
"""
The Arize-Phoenix LlamaPack show how to instrument your LlamaIndex query
engine with tracing. It launches Phoenix in the background, builds an index
over an input list of nodes, and instantiates and instruments a query engine
over that index so that trace data from each query is sent to Phoenix.
Note: Using this LlamaPack requires that your OpenAI API key is set via the
OPENAI_API_KEY environment variable.
"""
def __init__(
self,
nodes: List[TextNode],
**kwargs: Any,
) -> None:
"""
Initializes a new instance of ArizePhoenixQueryEnginePack.
Args:
nodes (List[TextNode]): An input list of nodes over which the index
will be built.
"""
try:
import phoenix as px
except ImportError:
raise ImportError(
"The arize-phoenix package could not be found. "
"Please install with `pip install arize-phoenix`."
)
self._session: "PhoenixSession" = px.launch_app()
set_global_handler("arize_phoenix")
self._index = VectorStoreIndex(nodes, **kwargs)
self._query_engine = self._index.as_query_engine()
def get_modules(self) -> Dict[str, Any]:
"""
Returns a dictionary containing the internals of the LlamaPack.
Returns:
Dict[str, Any]: A dictionary containing the internals of the
LlamaPack.
"""
return {
"session": self._session,
"session_url": self._session.url,
"index": self._index,
"query_engine": self._query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""
Runs queries against the index.
Returns:
Any: A response from the query engine.
"""
return self._query_engine.query(*args, **kwargs)
| ArizePhoenixQueryEnginePack |
python | google__pytype | pytype/tools/arg_parser.py | {
"start": 755,
"end": 3446
} | class ____:
"""Parser that integrates tool and pytype-single args."""
def __init__(self, parser, *, pytype_single_args=None, overrides=None):
"""Initialize a parser.
Args:
parser: An argparse.ArgumentParser or compatible object
pytype_single_args: Args passed to pytype
overrides: Pytype args that the tool overrides (will be put into the tool
args, with the corresponding pytype opts getting their default values)
"""
self._parser = parser
self._overrides = overrides or []
self.pytype_single_args = pytype_single_args or {}
def parse_args(self, argv: list[str]) -> ParsedArgs:
"""Parses argv.
Args:
argv: sys.argv[1:]
Returns:
A ParsedArgs object
"""
tool_args = self._parser.parse_args(argv)
return self.process_parsed_args(tool_args)
def get_pytype_kwargs(self, args: argparse.Namespace) -> _ArgDict:
"""Return a set of kwargs to pass to pytype.config.Options.
Args:
args: an argparse.Namespace.
Returns:
A dict of kwargs with pytype_single args as keys.
"""
return {k: getattr(args, k) for k in self.pytype_single_args}
def process_parsed_args(self, tool_args: Namespace) -> ParsedArgs:
"""Process args from a namespace."""
pytype_args = pytype_config.make_parser().parse_args([])
pytype_dict = vars(pytype_args)
tool_dict = {}
for k, v in vars(tool_args).items():
if (
k in self.pytype_single_args
and k not in self._overrides
and k in pytype_dict
):
pytype_dict[k] = v
else:
tool_dict[k] = v
tool_args = Namespace(**tool_dict)
self.process(tool_args, pytype_args)
self._ensure_valid_pytype_args(pytype_args)
pytype_opts = pytype_config.Options(pytype_args)
return ParsedArgs(tool_args, pytype_opts)
def process(self, tool_args, pytype_args):
"""Process raw pytype args before passing to config.Options."""
# Override in subclasses
def error(self, msg):
self._parser.error(msg)
def _ensure_valid_pytype_args(self, pytype_args: argparse.Namespace):
"""Final adjustment of raw pytype args before constructing Options."""
# If we do not have an input file add a dummy one here; tools often need to
# construct a config.Options without having an input file.
if not getattr(pytype_args, "input", None):
pytype_args.input = ["<dummy_file>"]
if isinstance(pytype_args.input, str):
pytype_args.input = [pytype_args.input]
# If we are passed an imports map we should look for pickled files as well.
if getattr(pytype_args, "imports_map", None):
pytype_args.use_pickled_files = True
| Parser |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/python_logs.py | {
"start": 292,
"end": 500
} | class ____(BaseModel, extra="forbid"):
pythonLogLevel: Optional[PythonLogLevel] = None
managedPythonLoggers: Optional[list[str]] = None
dagsterHandlerConfig: Optional[dict[str, Any]] = None
| PythonLogs |
python | pytest-dev__pytest-xdist | testing/test_looponfail.py | {
"start": 246,
"end": 2957
} | class ____:
def test_filechange(self, tmp_path: Path) -> None:
tmp = tmp_path
hello = tmp / "hello.py"
hello.touch()
sd = StatRecorder([tmp])
changed = sd.check()
assert not changed
hello.write_text("world")
changed = sd.check()
assert changed
hello.with_suffix(".pyc").write_text("hello")
changed = sd.check()
assert not changed
p = tmp / "new.py"
p.touch()
changed = sd.check()
assert changed
p.unlink()
changed = sd.check()
assert changed
tmp.joinpath("a", "b").mkdir(parents=True)
tmp.joinpath("a", "b", "c.py").touch()
changed = sd.check()
assert changed
tmp.joinpath("a", "c.txt").touch()
changed = sd.check()
assert changed
changed = sd.check()
assert not changed
shutil.rmtree(str(tmp.joinpath("a")))
changed = sd.check()
assert changed
def test_dirchange(self, tmp_path: Path) -> None:
tmp = tmp_path
tmp.joinpath("dir").mkdir()
tmp.joinpath("dir", "hello.py").touch()
sd = StatRecorder([tmp])
assert not sd.fil(tmp / "dir")
def test_filechange_deletion_race(self, tmp_path: Path) -> None:
tmp = tmp_path
sd = StatRecorder([tmp])
changed = sd.check()
assert not changed
p = tmp.joinpath("new.py")
p.touch()
changed = sd.check()
assert changed
p.unlink()
# make check()'s visit() call return our just removed
# path as if we were in a race condition
dirname = str(tmp)
dirnames: list[str] = []
filenames = [str(p)]
with unittest.mock.patch(
"os.walk", return_value=[(dirname, dirnames, filenames)], autospec=True
):
changed = sd.check()
assert changed
def test_pycremoval(self, tmp_path: Path) -> None:
tmp = tmp_path
hello = tmp / "hello.py"
hello.touch()
sd = StatRecorder([tmp])
changed = sd.check()
assert not changed
pycfile = hello.with_suffix(".pyc")
pycfile.touch()
hello.write_text("world")
changed = sd.check()
assert changed
assert not pycfile.exists()
def test_waitonchange(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
tmp = tmp_path
sd = StatRecorder([tmp])
ret_values = [True, False]
monkeypatch.setattr(StatRecorder, "check", lambda self: ret_values.pop())
sd.waitonchange(checkinterval=0.2)
assert not ret_values
| TestStatRecorder |
python | Pylons__pyramid | tests/test_util.py | {
"start": 26168,
"end": 27905
} | class ____(unittest.TestCase):
def _callFUT(self, obj, *attrs):
from pyramid.util import hide_attrs
return hide_attrs(obj, *attrs)
def _makeDummy(self):
from pyramid.decorator import reify
class Dummy:
x = 1
@reify
def foo(self):
return self.x
return Dummy()
def test_restores_attrs(self):
obj = self._makeDummy()
obj.bar = 'asdf'
orig_foo = obj.foo
with self._callFUT(obj, 'foo', 'bar'):
obj.foo = object()
obj.bar = 'nope'
self.assertEqual(obj.foo, orig_foo)
self.assertEqual(obj.bar, 'asdf')
def test_restores_attrs_on_exception(self):
obj = self._makeDummy()
orig_foo = obj.foo
try:
with self._callFUT(obj, 'foo'):
obj.foo = object()
raise RuntimeError()
except RuntimeError:
self.assertEqual(obj.foo, orig_foo)
else: # pragma: no cover
self.fail("RuntimeError not raised")
def test_restores_attrs_to_none(self):
obj = self._makeDummy()
obj.foo = None
with self._callFUT(obj, 'foo'):
obj.foo = object()
self.assertEqual(obj.foo, None)
def test_deletes_attrs(self):
obj = self._makeDummy()
with self._callFUT(obj, 'foo'):
obj.foo = object()
self.assertTrue('foo' not in obj.__dict__)
def test_does_not_delete_attr_if_no_attr_to_delete(self):
obj = self._makeDummy()
with self._callFUT(obj, 'foo'):
pass
self.assertTrue('foo' not in obj.__dict__)
def dummyfunc(): # pragma: no cover
pass
| Test_hide_attrs |
python | ray-project__ray | python/ray/train/tests/test_tune.py | {
"start": 1111,
"end": 10390
} | class ____(Backend):
def on_start(self, worker_group: WorkerGroup, backend_config: TestConfig):
pass
def on_shutdown(self, worker_group: WorkerGroup, backend_config: TestConfig):
pass
def torch_fashion_mnist(num_workers, use_gpu, num_samples):
trainer = TorchTrainer(
fashion_mnist_train_func,
scaling_config=train.ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),
)
tuner = Tuner(
trainer,
param_space={
"train_loop_config": {
"lr": tune.loguniform(1e-4, 1e-1),
"batch_size_per_worker": tune.choice([32, 64, 128]),
"epochs": 2,
}
},
tune_config=TuneConfig(
num_samples=num_samples,
),
)
analysis = tuner.fit()._experiment_analysis
# Check that loss decreases in each trial.
for df in analysis.trial_dataframes.values():
assert df.loc[1, "loss"] < df.loc[0, "loss"]
def test_tune_torch_fashion_mnist(ray_start_8_cpus):
torch_fashion_mnist(num_workers=2, use_gpu=False, num_samples=2)
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="tensorflow is not installed in python 3.12+"
)
def tune_tensorflow_mnist(num_workers, use_gpu, num_samples):
from ray.train.examples.tf.tensorflow_mnist_example import (
train_func as tensorflow_mnist_train_func,
)
from ray.train.tensorflow import TensorflowTrainer
trainer = TensorflowTrainer(
tensorflow_mnist_train_func,
scaling_config=train.ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),
)
tuner = Tuner(
trainer,
param_space={
"train_loop_config": {
"lr": tune.loguniform(1e-4, 1e-1),
"batch_size": tune.choice([32, 64, 128]),
"epochs": 2,
}
},
tune_config=TuneConfig(
num_samples=num_samples,
),
)
analysis = tuner.fit()._experiment_analysis
# Check that loss decreases in each trial.
for df in analysis.trial_dataframes.values():
assert df.loc[1, "loss"] < df.loc[0, "loss"]
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="tensorflow is not installed in python 3.12+"
)
def test_tune_tensorflow_mnist(ray_start_8_cpus):
tune_tensorflow_mnist(num_workers=2, use_gpu=False, num_samples=2)
def test_tune_error(ray_start_4_cpus):
def train_func(config):
raise RuntimeError("Error in training function!")
trainer = DataParallelTrainer(
train_func,
backend_config=TestConfig(),
scaling_config=train.ScalingConfig(num_workers=1),
)
tuner = Tuner(
trainer,
)
result_grid = tuner.fit()
with pytest.raises(RuntimeError):
raise result_grid[0].error
def test_tune_checkpoint(ray_start_4_cpus):
def train_func():
for i in range(9):
train.report(dict(test=i))
with create_dict_checkpoint(dict(hello="world")) as checkpoint:
train.report(dict(test=i + 1), checkpoint=checkpoint)
trainer = DataParallelTrainer(
train_func,
backend_config=TestConfig(),
scaling_config=train.ScalingConfig(num_workers=1),
)
tuner = Tuner(
trainer,
param_space={"train_loop_config": {"max_iter": 5}},
)
result_grid = tuner.fit()
assert len(result_grid) == 1
result = result_grid[0]
assert result.checkpoint
assert load_dict_checkpoint(result.checkpoint)["hello"] == "world"
def test_reuse_checkpoint(ray_start_4_cpus):
def train_func(config):
itr = 0
ckpt = train.get_checkpoint()
if ckpt is not None:
ckpt = load_dict_checkpoint(ckpt)
itr = ckpt["iter"] + 1
for i in range(itr, config["max_iter"]):
with create_dict_checkpoint(dict(iter=i)) as checkpoint:
train.report(dict(test=i, training_iteration=i), checkpoint=checkpoint)
trainer = DataParallelTrainer(
train_func,
backend_config=TestConfig(),
scaling_config=train.ScalingConfig(num_workers=1),
)
tuner = Tuner(
trainer,
param_space={"train_loop_config": {"max_iter": 5}},
)
result_grid = tuner.fit()
assert len(result_grid) == 1
result = result_grid[0]
assert result.checkpoint
assert load_dict_checkpoint(result.checkpoint)["iter"] == 4
tuner = Tuner.restore(result_grid.experiment_path, trainable=trainer)
result_grid = tuner.fit()
assert len(result_grid) == 1
assert len(result_grid[0].metrics_dataframe) == 5
def test_retry_with_max_failures(ray_start_4_cpus):
"""Tests trainer retry with max_failures > 0 when integrating with Tune."""
def train_func():
ckpt = train.get_checkpoint()
restored = bool(ckpt) # Does a previous checkpoint exist?
itr = 0
if ckpt:
ckpt = load_dict_checkpoint(ckpt)
itr = ckpt["iter"] + 1
for i in range(itr, 4):
if i == 2 and not restored:
raise Exception("try to fail me")
with create_dict_checkpoint(dict(iter=i)) as checkpoint:
train.report(dict(test=i, training_iteration=i), checkpoint=checkpoint)
trainer = DataParallelTrainer(
train_func,
backend_config=TestConfig(),
scaling_config=train.ScalingConfig(num_workers=1),
)
tuner = Tuner(
trainer,
run_config=tune.RunConfig(failure_config=tune.FailureConfig(max_failures=3)),
)
result_grid = tuner.fit()
checkpoint = load_dict_checkpoint(result_grid[0].checkpoint)
assert checkpoint["iter"] == 3
df = result_grid[0].metrics_dataframe
assert len(df[TRAINING_ITERATION]) == 4
def test_restore_with_new_trainer(ray_start_4_cpus, tmpdir, propagate_logs, caplog):
def train_func(config):
raise RuntimeError("failing!")
trainer = DataParallelTrainer(
train_func,
backend_config=TestConfig(),
scaling_config=train.ScalingConfig(num_workers=1),
run_config=train.RunConfig(
name="restore_new_trainer", storage_path=str(tmpdir)
),
datasets={"train": ray.data.from_items([{"a": i} for i in range(10)])},
)
results = Tuner(trainer).fit()
assert results.errors
def train_func(config):
dataset = train.get_dataset_shard("train")
assert train.get_context().get_world_size() == 2
rows = 0
for _ in dataset.iter_rows():
rows += 1
assert rows == 10
trainer = DataParallelTrainer(
# Training function can be modified
train_func,
backend_config=TestConfig(),
# ScalingConfig can be modified
scaling_config=train.ScalingConfig(num_workers=2),
# New RunConfig will be ignored
run_config=train.RunConfig(name="ignored"),
# Datasets and preprocessors can be re-specified
datasets={"train": ray.data.from_items([{"a": i} for i in range(20)])},
)
caplog.clear()
with caplog.at_level(logging.WARNING, logger="ray.tune.impl.tuner_internal"):
tuner = Tuner.restore(
str(tmpdir / "restore_new_trainer"),
trainable=trainer,
resume_errored=True,
)
assert "they will be ignored in the resumed run" in caplog.text
results = tuner.fit()
assert not results.errors
@pytest.mark.parametrize("in_trainer", [True, False])
@pytest.mark.parametrize("in_tuner", [True, False])
def test_run_config_in_trainer_and_tuner(
propagate_logs, tmp_path, caplog, in_trainer, in_tuner
):
trainer_run_config = (
train.RunConfig(name="trainer", storage_path=str(tmp_path))
if in_trainer
else None
)
tuner_run_config = (
tune.RunConfig(name="tuner", storage_path=str(tmp_path)) if in_tuner else None
)
trainer = DataParallelTrainer(
lambda config: None,
backend_config=TestConfig(),
scaling_config=train.ScalingConfig(num_workers=1),
run_config=trainer_run_config,
)
with caplog.at_level(logging.INFO, logger="ray.tune.impl.tuner_internal"):
tuner = Tuner(trainer, run_config=tuner_run_config)
both_msg = (
"`RunConfig` was passed to both the `Tuner` and the `DataParallelTrainer`"
)
run_config = tuner._local_tuner.get_run_config()
if in_trainer and in_tuner:
assert run_config.name == "tuner"
assert both_msg in caplog.text
elif in_trainer and not in_tuner:
assert run_config.name == "trainer"
assert both_msg not in caplog.text
elif not in_trainer and in_tuner:
assert run_config.name == "tuner"
assert both_msg not in caplog.text
else:
assert both_msg not in caplog.text
def test_run_config_in_param_space():
trainer = DataParallelTrainer(
lambda config: None,
backend_config=TestConfig(),
scaling_config=train.ScalingConfig(num_workers=1),
)
with pytest.raises(ValueError):
Tuner(trainer, param_space={"run_config": train.RunConfig(name="ignored")})
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
| TestBackend |
python | tox-dev__tox | src/tox/tox_env/python/package.py | {
"start": 698,
"end": 756
} | class ____(Package):
"""python package."""
| PythonPackage |
python | python__mypy | mypy/test/teststubtest.py | {
"start": 8208,
"end": 82184
} | class ____(unittest.TestCase):
@collect_cases
def test_basic_good(self) -> Iterator[Case]:
yield Case(
stub="def f(number: int, text: str) -> None: ...",
runtime="def f(number, text): pass",
error=None,
)
yield Case(
stub="""
class X:
def f(self, number: int, text: str) -> None: ...
""",
runtime="""
class X:
def f(self, number, text): pass
""",
error=None,
)
@collect_cases
def test_types(self) -> Iterator[Case]:
yield Case(
stub="def mistyped_class() -> None: ...",
runtime="class mistyped_class: pass",
error="mistyped_class",
)
yield Case(
stub="class mistyped_fn: ...", runtime="def mistyped_fn(): pass", error="mistyped_fn"
)
yield Case(
stub="""
class X:
def mistyped_var(self) -> int: ...
""",
runtime="""
class X:
mistyped_var = 1
""",
error="X.mistyped_var",
)
@collect_cases
def test_coroutines(self) -> Iterator[Case]:
yield Case(stub="def bar() -> int: ...", runtime="async def bar(): return 5", error="bar")
# Don't error for this one -- we get false positives otherwise
yield Case(stub="async def foo() -> int: ...", runtime="def foo(): return 5", error=None)
yield Case(stub="def baz() -> int: ...", runtime="def baz(): return 5", error=None)
yield Case(
stub="async def bingo() -> int: ...", runtime="async def bingo(): return 5", error=None
)
@collect_cases
def test_arg_name(self) -> Iterator[Case]:
yield Case(
stub="def bad(number: int, text: str) -> None: ...",
runtime="def bad(num, text) -> None: pass",
error="bad",
)
yield Case(
stub="def good_posonly(__number: int, text: str) -> None: ...",
runtime="def good_posonly(num, /, text): pass",
error=None,
)
yield Case(
stub="def bad_posonly(__number: int, text: str) -> None: ...",
runtime="def bad_posonly(flag, /, text): pass",
error="bad_posonly",
)
yield Case(
stub="""
class BadMethod:
def f(self, number: int, text: str) -> None: ...
""",
runtime="""
class BadMethod:
def f(self, n, text): pass
""",
error="BadMethod.f",
)
yield Case(
stub="""
class GoodDunder:
def __exit__(self, t, v, tb) -> None: ...
""",
runtime="""
class GoodDunder:
def __exit__(self, exc_type, exc_val, exc_tb): pass
""",
error=None,
)
yield Case(
stub="""def dunder_name(__x: int) -> None: ...""",
runtime="""def dunder_name(__x: int) -> None: ...""",
error=None,
)
yield Case(
stub="""def dunder_name_posonly(__x: int, /) -> None: ...""",
runtime="""def dunder_name_posonly(__x: int) -> None: ...""",
error=None,
)
yield Case(
stub="""def dunder_name_bad(x: int) -> None: ...""",
runtime="""def dunder_name_bad(__x: int) -> None: ...""",
error="dunder_name_bad",
)
@collect_cases
def test_arg_kind(self) -> Iterator[Case]:
yield Case(
stub="def runtime_kwonly(number: int, text: str) -> None: ...",
runtime="def runtime_kwonly(number, *, text): pass",
error="runtime_kwonly",
)
yield Case(
stub="def stub_kwonly(number: int, *, text: str) -> None: ...",
runtime="def stub_kwonly(number, text): pass",
error="stub_kwonly",
)
yield Case(
stub="def stub_posonly(__number: int, text: str) -> None: ...",
runtime="def stub_posonly(number, text): pass",
error="stub_posonly",
)
yield Case(
stub="def good_posonly(__number: int, text: str) -> None: ...",
runtime="def good_posonly(number, /, text): pass",
error=None,
)
yield Case(
stub="def runtime_posonly(number: int, text: str) -> None: ...",
runtime="def runtime_posonly(number, /, text): pass",
error="runtime_posonly",
)
yield Case(
stub="def stub_posonly_570(number: int, /, text: str) -> None: ...",
runtime="def stub_posonly_570(number, text): pass",
error="stub_posonly_570",
)
@collect_cases
def test_private_parameters(self) -> Iterator[Case]:
# Private parameters can optionally be omitted.
yield Case(
stub="def priv_pos_arg_missing() -> None: ...",
runtime="def priv_pos_arg_missing(_p1=None): pass",
error=None,
)
yield Case(
stub="def multi_priv_args() -> None: ...",
runtime="def multi_priv_args(_p='', _q=''): pass",
error=None,
)
yield Case(
stub="def priv_kwarg_missing() -> None: ...",
runtime="def priv_kwarg_missing(*, _p2=''): pass",
error=None,
)
# But if they are included, they must be correct.
yield Case(
stub="def priv_pos_arg_wrong(_p: int = ...) -> None: ...",
runtime="def priv_pos_arg_wrong(_p=None): pass",
error="priv_pos_arg_wrong",
)
yield Case(
stub="def priv_kwarg_wrong(*, _p: int = ...) -> None: ...",
runtime="def priv_kwarg_wrong(*, _p=None): pass",
error="priv_kwarg_wrong",
)
# Private parameters must have a default and start with exactly one
# underscore.
yield Case(
stub="def pos_arg_no_default() -> None: ...",
runtime="def pos_arg_no_default(_np): pass",
error="pos_arg_no_default",
)
yield Case(
stub="def kwarg_no_default() -> None: ...",
runtime="def kwarg_no_default(*, _np): pass",
error="kwarg_no_default",
)
yield Case(
stub="def double_underscore_pos_arg() -> None: ...",
runtime="def double_underscore_pos_arg(__np = None): pass",
error="double_underscore_pos_arg",
)
yield Case(
stub="def double_underscore_kwarg() -> None: ...",
runtime="def double_underscore_kwarg(*, __np = None): pass",
error="double_underscore_kwarg",
)
# But spot parameters that are accidentally not marked kw-only and
# vice-versa.
yield Case(
stub="def priv_arg_is_kwonly(_p=...) -> None: ...",
runtime="def priv_arg_is_kwonly(*, _p=''): pass",
error="priv_arg_is_kwonly",
)
yield Case(
stub="def priv_arg_is_positional(*, _p=...) -> None: ...",
runtime="def priv_arg_is_positional(_p=''): pass",
error="priv_arg_is_positional",
)
# Private parameters not at the end of the parameter list must be
# included so that users can pass the following arguments using
# positional syntax.
yield Case(
stub="def priv_args_not_at_end(*, q='') -> None: ...",
runtime="def priv_args_not_at_end(_p='', q=''): pass",
error="priv_args_not_at_end",
)
@collect_cases
def test_default_presence(self) -> Iterator[Case]:
yield Case(
stub="def f1(text: str = ...) -> None: ...",
runtime="def f1(text = 'asdf'): pass",
error=None,
)
yield Case(
stub="def f2(text: str = ...) -> None: ...", runtime="def f2(text): pass", error="f2"
)
yield Case(
stub="def f3(text: str) -> None: ...",
runtime="def f3(text = 'asdf'): pass",
error="f3",
)
yield Case(
stub="def f4(text: str = ...) -> None: ...",
runtime="def f4(text = None): pass",
error="f4",
)
yield Case(
stub="def f5(data: bytes = ...) -> None: ...",
runtime="def f5(data = 'asdf'): pass",
error="f5",
)
yield Case(
stub="""
from typing import TypeVar
_T = TypeVar("_T", bound=str)
def f6(text: _T = ...) -> None: ...
""",
runtime="def f6(text = None): pass",
error="f6",
)
@collect_cases
def test_default_value(self) -> Iterator[Case]:
yield Case(
stub="def f1(text: str = 'x') -> None: ...",
runtime="def f1(text = 'y'): pass",
error="f1",
)
yield Case(
stub='def f2(text: bytes = b"x\'") -> None: ...',
runtime='def f2(text = b"x\'"): pass',
error=None,
)
yield Case(
stub='def f3(text: bytes = b"y\'") -> None: ...',
runtime='def f3(text = b"x\'"): pass',
error="f3",
)
yield Case(
stub="def f4(text: object = 1) -> None: ...",
runtime="def f4(text = 1.0): pass",
error="f4",
)
yield Case(
stub="def f5(text: object = True) -> None: ...",
runtime="def f5(text = 1): pass",
error="f5",
)
yield Case(
stub="def f6(text: object = True) -> None: ...",
runtime="def f6(text = True): pass",
error=None,
)
yield Case(
stub="def f7(text: object = not True) -> None: ...",
runtime="def f7(text = False): pass",
error=None,
)
yield Case(
stub="def f8(text: object = not True) -> None: ...",
runtime="def f8(text = True): pass",
error="f8",
)
yield Case(
stub="def f9(text: object = {1: 2}) -> None: ...",
runtime="def f9(text = {1: 3}): pass",
error="f9",
)
yield Case(
stub="def f10(text: object = [1, 2]) -> None: ...",
runtime="def f10(text = [1, 2]): pass",
error=None,
)
# Simulate "<unrepresentable>"
yield Case(
stub="def f11() -> None: ...",
runtime="""
def f11(text=None) -> None: pass
f11.__text_signature__ = "(text=<unrepresentable>)"
""",
error="f11",
)
# Simulate numpy ndarray.__bool__ that raises an error
yield Case(
stub="def f12(x=1): ...",
runtime="""
class _ndarray:
def __eq__(self, obj): return self
def __bool__(self): raise ValueError
def f12(x=_ndarray()) -> None: pass
""",
error="f12",
)
@collect_cases
def test_static_class_method(self) -> Iterator[Case]:
yield Case(
stub="""
class Good:
@classmethod
def f(cls, number: int, text: str) -> None: ...
""",
runtime="""
class Good:
@classmethod
def f(cls, number, text): pass
""",
error=None,
)
yield Case(
stub="""
class Bad1:
def f(cls, number: int, text: str) -> None: ...
""",
runtime="""
class Bad1:
@classmethod
def f(cls, number, text): pass
""",
error="Bad1.f",
)
yield Case(
stub="""
class Bad2:
@classmethod
def f(cls, number: int, text: str) -> None: ...
""",
runtime="""
class Bad2:
@staticmethod
def f(self, number, text): pass
""",
error="Bad2.f",
)
yield Case(
stub="""
class Bad3:
@staticmethod
def f(cls, number: int, text: str) -> None: ...
""",
runtime="""
class Bad3:
@classmethod
def f(self, number, text): pass
""",
error="Bad3.f",
)
yield Case(
stub="""
class GoodNew:
def __new__(cls, *args, **kwargs): ...
""",
runtime="""
class GoodNew:
def __new__(cls, *args, **kwargs): pass
""",
error=None,
)
@collect_cases
def test_arg_mismatch(self) -> Iterator[Case]:
yield Case(
stub="def f1(a, *, b, c) -> None: ...", runtime="def f1(a, *, b, c): pass", error=None
)
yield Case(
stub="def f2(a, *, b) -> None: ...", runtime="def f2(a, *, b, c): pass", error="f2"
)
yield Case(
stub="def f3(a, *, b, c) -> None: ...", runtime="def f3(a, *, b): pass", error="f3"
)
yield Case(
stub="def f4(a, *, b, c) -> None: ...", runtime="def f4(a, b, *, c): pass", error="f4"
)
yield Case(
stub="def f5(a, b, *, c) -> None: ...", runtime="def f5(a, *, b, c): pass", error="f5"
)
@collect_cases
def test_varargs_varkwargs(self) -> Iterator[Case]:
yield Case(
stub="def f1(*args, **kwargs) -> None: ...",
runtime="def f1(*args, **kwargs): pass",
error=None,
)
yield Case(
stub="def f2(*args, **kwargs) -> None: ...",
runtime="def f2(**kwargs): pass",
error="f2",
)
yield Case(
stub="def g1(a, b, c, d) -> None: ...", runtime="def g1(a, *args): pass", error=None
)
yield Case(
stub="def g2(a, b, c, d, *args) -> None: ...", runtime="def g2(a): pass", error="g2"
)
yield Case(
stub="def g3(a, b, c, d, *args) -> None: ...",
runtime="def g3(a, *args): pass",
error=None,
)
yield Case(
stub="def h1(a) -> None: ...", runtime="def h1(a, b, c, d, *args): pass", error="h1"
)
yield Case(
stub="def h2(a, *args) -> None: ...", runtime="def h2(a, b, c, d): pass", error="h2"
)
yield Case(
stub="def h3(a, *args) -> None: ...",
runtime="def h3(a, b, c, d, *args): pass",
error="h3",
)
yield Case(
stub="def j1(a: int, *args) -> None: ...", runtime="def j1(a): pass", error="j1"
)
yield Case(
stub="def j2(a: int) -> None: ...", runtime="def j2(a, *args): pass", error="j2"
)
yield Case(
stub="def j3(a, b, c) -> None: ...", runtime="def j3(a, *args, c): pass", error="j3"
)
yield Case(stub="def k1(a, **kwargs) -> None: ...", runtime="def k1(a): pass", error="k1")
yield Case(
# In theory an error, but led to worse results in practice
stub="def k2(a) -> None: ...",
runtime="def k2(a, **kwargs): pass",
error=None,
)
yield Case(
stub="def k3(a, b) -> None: ...", runtime="def k3(a, **kwargs): pass", error="k3"
)
yield Case(
stub="def k4(a, *, b) -> None: ...", runtime="def k4(a, **kwargs): pass", error=None
)
yield Case(
stub="def k5(a, *, b) -> None: ...",
runtime="def k5(a, *, b, c, **kwargs): pass",
error="k5",
)
yield Case(
stub="def k6(a, *, b, **kwargs) -> None: ...",
runtime="def k6(a, *, b, c, **kwargs): pass",
error="k6",
)
@collect_cases
def test_overload(self) -> Iterator[Case]:
yield Case(
stub="""
from typing import overload
@overload
def f1(a: int, *, c: int = ...) -> int: ...
@overload
def f1(a: int, b: int, c: int = ...) -> str: ...
""",
runtime="def f1(a, b = 0, c = 0): pass",
error=None,
)
yield Case(
stub="""
@overload
def f2(a: int, *, c: int = ...) -> int: ...
@overload
def f2(a: int, b: int, c: int = ...) -> str: ...
""",
runtime="def f2(a, b, c = 0): pass",
error="f2",
)
yield Case(
stub="""
@overload
def f3(a: int) -> int: ...
@overload
def f3(a: int, b: str) -> str: ...
""",
runtime="def f3(a, b = None): pass",
error="f3",
)
yield Case(
stub="""
@overload
def f4(a: int, *args, b: int, **kwargs) -> int: ...
@overload
def f4(a: str, *args, b: int, **kwargs) -> str: ...
""",
runtime="def f4(a, *args, b, **kwargs): pass",
error=None,
)
yield Case(
stub="""
@overload
def f5(__a: int) -> int: ...
@overload
def f5(__b: str) -> str: ...
""",
runtime="def f5(x, /): pass",
error=None,
)
yield Case(
stub="""
from typing import final
from typing_extensions import deprecated
class Foo:
@overload
@final
def f6(self, __a: int) -> int: ...
@overload
@deprecated("evil")
def f6(self, __b: str) -> str: ...
""",
runtime="""
class Foo:
def f6(self, x, /): pass
""",
error=None,
)
yield Case(
stub="""
@overload
def f7(a: int, /) -> int: ...
@overload
def f7(b: str, /) -> str: ...
""",
runtime="def f7(x, /): pass",
error=None,
)
yield Case(
stub="""
@overload
def f8(a: int, c: int = 0, /) -> int: ...
@overload
def f8(b: str, d: int, /) -> str: ...
""",
runtime="def f8(x, y, /): pass",
error="f8",
)
yield Case(
stub="""
@overload
def f9(a: int, c: int = 0, /) -> int: ...
@overload
def f9(b: str, d: int, /) -> str: ...
""",
runtime="def f9(x, y=0, /): pass",
error=None,
)
yield Case(
stub="""
class Bar:
@overload
def f1(self) -> int: ...
@overload
def f1(self, a: int, /) -> int: ...
@overload
def f2(self, a: int, /) -> int: ...
@overload
def f2(self, a: str, /) -> int: ...
""",
runtime="""
class Bar:
def f1(self, *a) -> int: ...
def f2(self, *a) -> int: ...
""",
error=None,
)
yield Case(
stub="""
@overload
def f(a: int) -> int: ...
@overload
def f(a: int, b: str, /) -> str: ...
""",
runtime="""
def f(a, *args): ...
""",
error=None,
)
@collect_cases
def test_property(self) -> Iterator[Case]:
yield Case(
stub="""
class Good:
@property
def read_only_attr(self) -> int: ...
read_only_attr_alias = read_only_attr
""",
runtime="""
class Good:
@property
def read_only_attr(self): return 1
read_only_attr_alias = read_only_attr
""",
error=None,
)
yield Case(
stub="""
class Bad:
@property
def f(self) -> int: ...
""",
runtime="""
class Bad:
def f(self) -> int: return 1
""",
error="Bad.f",
)
yield Case(
stub="""
class GoodReadOnly:
@property
def f(self) -> int: ...
""",
runtime="""
class GoodReadOnly:
f = 1
""",
error=None,
)
yield Case(
stub="""
class BadReadOnly:
@property
def f(self) -> str: ...
""",
runtime="""
class BadReadOnly:
f = 1
""",
error="BadReadOnly.f",
)
yield Case(
stub="""
class Y:
@property
def read_only_attr(self) -> int: ...
@read_only_attr.setter
def read_only_attr(self, val: int) -> None: ...
""",
runtime="""
class Y:
@property
def read_only_attr(self): return 5
""",
error="Y.read_only_attr",
)
yield Case(
stub="""
class Z:
@property
def read_write_attr(self) -> int: ...
@read_write_attr.setter
def read_write_attr(self, val: int) -> None: ...
read_write_attr_alias = read_write_attr
""",
runtime="""
class Z:
@property
def read_write_attr(self): return self._val
@read_write_attr.setter
def read_write_attr(self, val): self._val = val
read_write_attr_alias = read_write_attr
""",
error=None,
)
yield Case(
stub="""
class FineAndDandy:
@property
def attr(self) -> int: ...
""",
runtime="""
class _EvilDescriptor:
def __get__(self, instance, ownerclass=None):
if instance is None:
raise AttributeError('no')
return 42
def __set__(self, instance, value):
raise AttributeError('no')
class FineAndDandy:
attr = _EvilDescriptor()
""",
error=None,
)
@collect_cases
def test_cached_property(self) -> Iterator[Case]:
yield Case(
stub="""
from functools import cached_property
class Good:
@cached_property
def read_only_attr(self) -> int: ...
@cached_property
def read_only_attr2(self) -> int: ...
""",
runtime="""
import functools as ft
from functools import cached_property
class Good:
@cached_property
def read_only_attr(self): return 1
@ft.cached_property
def read_only_attr2(self): return 1
""",
error=None,
)
yield Case(
stub="""
from functools import cached_property
class Bad:
@cached_property
def f(self) -> int: ...
""",
runtime="""
class Bad:
def f(self) -> int: return 1
""",
error="Bad.f",
)
yield Case(
stub="""
from functools import cached_property
class GoodCachedAttr:
@cached_property
def f(self) -> int: ...
""",
runtime="""
class GoodCachedAttr:
f = 1
""",
error=None,
)
yield Case(
stub="""
from functools import cached_property
class BadCachedAttr:
@cached_property
def f(self) -> str: ...
""",
runtime="""
class BadCachedAttr:
f = 1
""",
error="BadCachedAttr.f",
)
yield Case(
stub="""
from functools import cached_property
from typing import final
class FinalGood:
@cached_property
@final
def attr(self) -> int: ...
""",
runtime="""
from functools import cached_property
from typing import final
class FinalGood:
@cached_property
@final
def attr(self):
return 1
""",
error=None,
)
yield Case(
stub="""
from functools import cached_property
class FinalBad:
@cached_property
def attr(self) -> int: ...
""",
runtime="""
from functools import cached_property
from typing_extensions import final
class FinalBad:
@cached_property
@final
def attr(self):
return 1
""",
error="FinalBad.attr",
)
@collect_cases
def test_var(self) -> Iterator[Case]:
yield Case(stub="x1: int", runtime="x1 = 5", error=None)
yield Case(stub="x2: str", runtime="x2 = 5", error="x2")
yield Case("from typing import Tuple", "", None) # dummy case
yield Case(
stub="""
x3: Tuple[int, int]
""",
runtime="x3 = (1, 3)",
error=None,
)
yield Case(
stub="""
x4: Tuple[int, int]
""",
runtime="x4 = (1, 3, 5)",
error="x4",
)
yield Case(stub="x5: int", runtime="def x5(a, b): pass", error="x5")
yield Case(
stub="def foo(a: int, b: int) -> None: ...\nx6 = foo",
runtime="def foo(a, b): pass\ndef x6(c, d): pass",
error="x6",
)
yield Case(
stub="""
class X:
f: int
""",
runtime="""
class X:
def __init__(self):
self.f = "asdf"
""",
error=None,
)
yield Case(
stub="""
class Y:
read_only_attr: int
""",
runtime="""
class Y:
@property
def read_only_attr(self): return 5
""",
error="Y.read_only_attr",
)
yield Case(
stub="""
class Z:
read_write_attr: int
""",
runtime="""
class Z:
@property
def read_write_attr(self): return self._val
@read_write_attr.setter
def read_write_attr(self, val): self._val = val
""",
error=None,
)
@collect_cases
def test_type_alias(self) -> Iterator[Case]:
yield Case(
stub="""
import collections.abc
import re
import typing
from typing import Callable, Dict, Generic, Iterable, List, Match, Tuple, TypeVar, Union
""",
runtime="""
import collections.abc
import re
from typing import Callable, Dict, Generic, Iterable, List, Match, Tuple, TypeVar, Union
""",
error=None,
)
yield Case(
stub="""
class X:
def f(self) -> None: ...
Y = X
""",
runtime="""
class X:
def f(self) -> None: ...
class Y: ...
""",
error="Y.f",
)
yield Case(stub="A = Tuple[int, str]", runtime="A = (int, str)", error="A")
# Error if an alias isn't present at runtime...
yield Case(stub="B = str", runtime="", error="B")
# ... but only if the alias isn't private
yield Case(stub="_C = int", runtime="", error=None)
yield Case(
stub="""
D = tuple[str, str]
E = Tuple[int, int, int]
F = Tuple[str, int]
""",
runtime="""
D = Tuple[str, str]
E = Tuple[int, int, int]
F = List[str]
""",
error="F",
)
yield Case(
stub="""
G = str | int
H = Union[str, bool]
I = str | int
""",
runtime="""
G = Union[str, int]
H = Union[str, bool]
I = str
""",
error="I",
)
yield Case(
stub="""
K = dict[str, str]
L = Dict[int, int]
KK = collections.abc.Iterable[str]
LL = typing.Iterable[str]
""",
runtime="""
K = Dict[str, str]
L = Dict[int, int]
KK = Iterable[str]
LL = Iterable[str]
""",
error=None,
)
yield Case(
stub="""
_T = TypeVar("_T")
class _Spam(Generic[_T]):
def foo(self) -> None: ...
IntFood = _Spam[int]
""",
runtime="""
_T = TypeVar("_T")
class _Bacon(Generic[_T]):
def foo(self, arg): pass
IntFood = _Bacon[int]
""",
error="IntFood.foo",
)
yield Case(stub="StrList = list[str]", runtime="StrList = ['foo', 'bar']", error="StrList")
yield Case(
stub="""
N = typing.Callable[[str], bool]
O = collections.abc.Callable[[int], str]
P = typing.Callable[[str], bool]
""",
runtime="""
N = Callable[[str], bool]
O = Callable[[int], str]
P = int
""",
error="P",
)
yield Case(
stub="""
class Foo:
class Bar: ...
BarAlias = Foo.Bar
""",
runtime="""
class Foo:
class Bar: pass
BarAlias = Foo.Bar
""",
error=None,
)
yield Case(
stub="""
from io import StringIO
StringIOAlias = StringIO
""",
runtime="""
from _io import StringIO
StringIOAlias = StringIO
""",
error=None,
)
yield Case(stub="M = Match[str]", runtime="M = Match[str]", error=None)
yield Case(
stub="""
class Baz:
def fizz(self) -> None: ...
BazAlias = Baz
""",
runtime="""
class Baz:
def fizz(self): pass
BazAlias = Baz
Baz.__name__ = Baz.__qualname__ = Baz.__module__ = "New"
""",
error=None,
)
yield Case(
stub="""
class FooBar:
__module__: None # type: ignore
def fizz(self) -> None: ...
FooBarAlias = FooBar
""",
runtime="""
class FooBar:
def fizz(self): pass
FooBarAlias = FooBar
FooBar.__module__ = None
""",
error=None,
)
yield Case(
stub="""
Q = Dict[str, str]
R = dict[int, int]
S = Tuple[int, int]
T = tuple[str, str]
U = int | str
V = Union[int, str]
W = typing.Callable[[str], bool]
Z = collections.abc.Callable[[str], bool]
QQ = typing.Iterable[str]
RR = collections.abc.Iterable[str]
MM = typing.Match[str]
MMM = re.Match[str]
""",
runtime="""
Q = dict[str, str]
R = dict[int, int]
S = tuple[int, int]
T = tuple[str, str]
U = int | str
V = int | str
W = collections.abc.Callable[[str], bool]
Z = collections.abc.Callable[[str], bool]
QQ = collections.abc.Iterable[str]
RR = collections.abc.Iterable[str]
MM = re.Match[str]
MMM = re.Match[str]
""",
error=None,
)
@collect_cases
def test_enum(self) -> Iterator[Case]:
yield Case(stub="import enum", runtime="import enum", error=None)
yield Case(
stub="""
class X(enum.Enum):
a = ...
b = "asdf"
c = "oops"
""",
runtime="""
class X(enum.Enum):
a = 1
b = "asdf"
c = 2
""",
error="X.c",
)
yield Case(
stub="""
class Flags1(enum.Flag):
a = ...
b = 2
def foo(x: Flags1 = ...) -> None: ...
""",
runtime="""
class Flags1(enum.Flag):
a = 1
b = 2
def foo(x=Flags1.a|Flags1.b): pass
""",
error=None,
)
yield Case(
stub="""
class Flags2(enum.Flag):
a = ...
b = 2
def bar(x: Flags2 | None = None) -> None: ...
""",
runtime="""
class Flags2(enum.Flag):
a = 1
b = 2
def bar(x=Flags2.a|Flags2.b): pass
""",
error="bar",
)
yield Case(
stub="""
class Flags3(enum.Flag):
a = ...
b = 2
def baz(x: Flags3 | None = ...) -> None: ...
""",
runtime="""
class Flags3(enum.Flag):
a = 1
b = 2
def baz(x=Flags3(0)): pass
""",
error=None,
)
yield Case(
runtime="""
import enum
class SomeObject: ...
class WeirdEnum(enum.Enum):
a = SomeObject()
b = SomeObject()
""",
stub="""
import enum
class SomeObject: ...
class WeirdEnum(enum.Enum):
_value_: SomeObject
a = ...
b = ...
""",
error=None,
)
yield Case(
stub="""
class Flags4(enum.Flag):
a = 1
b = 2
def spam(x: Flags4 | None = None) -> None: ...
""",
runtime="""
class Flags4(enum.Flag):
a = 1
b = 2
def spam(x=Flags4(0)): pass
""",
error="spam",
)
yield Case(
stub="""
import sys
from typing import Final, Literal
class BytesEnum(bytes, enum.Enum):
a = b'foo'
FOO: Literal[BytesEnum.a]
BAR: Final = BytesEnum.a
BAZ: BytesEnum
EGGS: bytes
""",
runtime="""
class BytesEnum(bytes, enum.Enum):
a = b'foo'
FOO = BytesEnum.a
BAR = BytesEnum.a
BAZ = BytesEnum.a
EGGS = BytesEnum.a
""",
error=None,
)
yield Case(
stub="""
class HasSlotsAndNothingElse:
__slots__ = ("x",)
x: int
class HasInheritedSlots(HasSlotsAndNothingElse):
pass
class HasEmptySlots:
__slots__ = ()
""",
runtime="""
class HasSlotsAndNothingElse:
__slots__ = ("x",)
x: int
class HasInheritedSlots(HasSlotsAndNothingElse):
pass
class HasEmptySlots:
__slots__ = ()
""",
error=None,
)
yield Case(
stub="""
class HasCompatibleValue(enum.Enum):
_value_: str
FOO = ...
""",
runtime="""
class HasCompatibleValue(enum.Enum):
FOO = "foo"
""",
error=None,
)
yield Case(
stub="""
class HasIncompatibleValue(enum.Enum):
_value_: int
FOO = ...
""",
runtime="""
class HasIncompatibleValue(enum.Enum):
FOO = "foo"
""",
error="HasIncompatibleValue.FOO",
)
@collect_cases
def test_decorator(self) -> Iterator[Case]:
yield Case(
stub="""
from typing import Any, Callable
def decorator(f: Callable[[], int]) -> Callable[..., Any]: ...
@decorator
def f() -> Any: ...
""",
runtime="""
def decorator(f): return f
@decorator
def f(): return 3
""",
error=None,
)
@collect_cases
def test_all_at_runtime_not_stub(self) -> Iterator[Case]:
yield Case(
stub="Z: int",
runtime="""
__all__ = []
Z = 5""",
error="__all__",
)
@collect_cases
def test_all_in_stub_not_at_runtime(self) -> Iterator[Case]:
yield Case(stub="__all__ = ()", runtime="", error="__all__")
@collect_cases
def test_all_in_stub_different_to_all_at_runtime(self) -> Iterator[Case]:
# We *should* emit an error with the module name itself + __all__,
# if the stub *does* define __all__,
# but the stub's __all__ is inconsistent with the runtime's __all__
yield Case(
stub="""
__all__ = ['foo']
foo: str
""",
runtime="""
__all__ = []
foo = 'foo'
""",
error="__all__",
)
@collect_cases
def test_missing(self) -> Iterator[Case]:
yield Case(stub="x = 5", runtime="", error="x")
yield Case(stub="def f(): ...", runtime="", error="f")
yield Case(stub="class X: ...", runtime="", error="X")
yield Case(
stub="""
from typing import overload
@overload
def h(x: int): ...
@overload
def h(x: str): ...
""",
runtime="",
error="h",
)
yield Case(stub="", runtime="__all__ = []", error="__all__") # dummy case
yield Case(stub="", runtime="__all__ += ['y']\ny = 5", error="y")
yield Case(stub="", runtime="__all__ += ['g']\ndef g(): pass", error="g")
# Here we should only check that runtime has B, since the stub explicitly re-exports it
yield Case(
stub="from mystery import A, B as B, C as D # type: ignore", runtime="", error="B"
)
yield Case(
stub="class Y: ...",
runtime="__all__ += ['Y']\nclass Y:\n def __or__(self, other): return self|other",
error="Y.__or__",
)
yield Case(
stub="class Z: ...",
runtime="__all__ += ['Z']\nclass Z:\n def __reduce__(self): return (Z,)",
error=None,
)
# __call__ exists on type, so it appears to exist on the class.
# This checks that we identify it as missing at runtime anyway.
yield Case(
stub="""
class ClassWithMetaclassOverride:
def __call__(*args, **kwds): ...
""",
runtime="class ClassWithMetaclassOverride: ...",
error="ClassWithMetaclassOverride.__call__",
)
# Test that we ignore object.__setattr__ and object.__delattr__ inheritance
yield Case(
stub="""
from typing import Any
class FakeSetattrClass:
def __setattr__(self, name: str, value: Any, /) -> None: ...
""",
runtime="class FakeSetattrClass: ...",
error="FakeSetattrClass.__setattr__",
)
yield Case(
stub="""
class FakeDelattrClass:
def __delattr__(self, name: str, /) -> None: ...
""",
runtime="class FakeDelattrClass: ...",
error="FakeDelattrClass.__delattr__",
)
@collect_cases
def test_missing_no_runtime_all(self) -> Iterator[Case]:
yield Case(stub="", runtime="import sys", error=None)
yield Case(stub="", runtime="def g(): ...", error="g")
yield Case(stub="", runtime="CONSTANT = 0", error="CONSTANT")
yield Case(stub="", runtime="import re; constant = re.compile('foo')", error="constant")
yield Case(stub="", runtime="from json.scanner import NUMBER_RE", error=None)
yield Case(stub="", runtime="from string import ascii_letters", error=None)
@collect_cases
def test_missing_no_runtime_all_terrible(self) -> Iterator[Case]:
yield Case(
stub="",
runtime="""
import sys
import types
import __future__
_m = types.SimpleNamespace()
_m.annotations = __future__.annotations
sys.modules["_terrible_stubtest_test_module"] = _m
from _terrible_stubtest_test_module import *
assert annotations
""",
error=None,
)
@collect_cases
def test_non_public_1(self) -> Iterator[Case]:
yield Case(
stub="__all__: list[str]", runtime="", error=f"{TEST_MODULE_NAME}.__all__"
) # dummy case
yield Case(stub="_f: int", runtime="def _f(): ...", error="_f")
@collect_cases
def test_non_public_2(self) -> Iterator[Case]:
yield Case(stub="__all__: list[str] = ['f']", runtime="__all__ = ['f']", error=None)
yield Case(stub="f: int", runtime="def f(): ...", error="f")
yield Case(stub="g: int", runtime="def g(): ...", error="g")
@collect_cases
def test_dunders(self) -> Iterator[Case]:
yield Case(
stub="class A:\n def __init__(self, a: int, b: int) -> None: ...",
runtime="class A:\n def __init__(self, a, bx): pass",
error="A.__init__",
)
yield Case(
stub="class B:\n def __call__(self, c: int, d: int) -> None: ...",
runtime="class B:\n def __call__(self, c, dx): pass",
error="B.__call__",
)
yield Case(
stub=(
"class C:\n"
" def __init_subclass__(\n"
" cls, e: int = ..., **kwargs: int\n"
" ) -> None: ...\n"
),
runtime="class C:\n def __init_subclass__(cls, e=1, **kwargs): pass",
error=None,
)
yield Case(
stub="class D:\n def __class_getitem__(cls, type: type) -> type: ...",
runtime="class D:\n def __class_getitem__(cls, type): ...",
error=None,
)
@collect_cases
def test_not_subclassable(self) -> Iterator[Case]:
yield Case(
stub="class CanBeSubclassed: ...", runtime="class CanBeSubclassed: ...", error=None
)
yield Case(
stub="class CannotBeSubclassed:\n def __init_subclass__(cls) -> None: ...",
runtime="class CannotBeSubclassed:\n def __init_subclass__(cls): raise TypeError",
error="CannotBeSubclassed",
)
@collect_cases
def test_disjoint_base(self) -> Iterator[Case]:
yield Case(
stub="""
class A: pass
""",
runtime="""
class A: pass
""",
error=None,
)
yield Case(
stub="""
from typing_extensions import disjoint_base
@disjoint_base
class B: pass
""",
runtime="""
class B: pass
""",
error="test_module.B",
)
yield Case(
stub="""
from typing_extensions import Self
class mytakewhile:
def __new__(cls, predicate: object, iterable: object, /) -> Self: ...
def __iter__(self) -> Self: ...
def __next__(self) -> object: ...
""",
runtime="""
from itertools import takewhile as mytakewhile
""",
# Should have @disjoint_base
error="test_module.mytakewhile",
)
yield Case(
stub="""
from typing_extensions import disjoint_base, Self
@disjoint_base
class mycorrecttakewhile:
def __new__(cls, predicate: object, iterable: object, /) -> Self: ...
def __iter__(self) -> Self: ...
def __next__(self) -> object: ...
""",
runtime="""
from itertools import takewhile as mycorrecttakewhile
""",
error=None,
)
yield Case(
runtime="""
class IsDisjointBaseBecauseItHasSlots:
__slots__ = ("a",)
a: int
""",
stub="""
from typing_extensions import disjoint_base
@disjoint_base
class IsDisjointBaseBecauseItHasSlots:
a: int
""",
error="test_module.IsDisjointBaseBecauseItHasSlots",
)
yield Case(
runtime="""
class IsFinalSoDisjointBaseIsRedundant: ...
""",
stub="""
from typing_extensions import disjoint_base, final
@final
@disjoint_base
class IsFinalSoDisjointBaseIsRedundant: ...
""",
error="test_module.IsFinalSoDisjointBaseIsRedundant",
)
yield Case(
runtime="""
import enum
class IsEnumWithMembersSoDisjointBaseIsRedundant(enum.Enum):
A = 1
B = 2
""",
stub="""
from typing_extensions import disjoint_base
import enum
@disjoint_base
class IsEnumWithMembersSoDisjointBaseIsRedundant(enum.Enum):
A = 1
B = 2
""",
error="test_module.IsEnumWithMembersSoDisjointBaseIsRedundant",
)
@collect_cases
def test_has_runtime_final_decorator(self) -> Iterator[Case]:
yield Case(
stub="from typing_extensions import final",
runtime="""
import functools
from typing_extensions import final
""",
error=None,
)
yield Case(
stub="""
@final
class A: ...
""",
runtime="""
@final
class A: ...
""",
error=None,
)
yield Case( # Runtime can miss `@final` decorator
stub="""
@final
class B: ...
""",
runtime="""
class B: ...
""",
error=None,
)
yield Case( # Stub cannot miss `@final` decorator
stub="""
class C: ...
""",
runtime="""
@final
class C: ...
""",
error="C",
)
yield Case(
stub="""
class D:
@final
def foo(self) -> None: ...
@final
@staticmethod
def bar() -> None: ...
@staticmethod
@final
def bar2() -> None: ...
@final
@classmethod
def baz(cls) -> None: ...
@classmethod
@final
def baz2(cls) -> None: ...
@property
@final
def eggs(self) -> int: ...
@final
@property
def eggs2(self) -> int: ...
@final
def ham(self, obj: int) -> int: ...
""",
runtime="""
class D:
@final
def foo(self): pass
@final
@staticmethod
def bar(): pass
@staticmethod
@final
def bar2(): pass
@final
@classmethod
def baz(cls): pass
@classmethod
@final
def baz2(cls): pass
@property
@final
def eggs(self): return 42
@final
@property
def eggs2(self): pass
@final
@functools.lru_cache()
def ham(self, obj): return obj * 2
""",
error=None,
)
# Stub methods are allowed to have @final even if the runtime doesn't...
yield Case(
stub="""
class E:
@final
def foo(self) -> None: ...
@final
@staticmethod
def bar() -> None: ...
@staticmethod
@final
def bar2() -> None: ...
@final
@classmethod
def baz(cls) -> None: ...
@classmethod
@final
def baz2(cls) -> None: ...
@property
@final
def eggs(self) -> int: ...
@final
@property
def eggs2(self) -> int: ...
@final
def ham(self, obj: int) -> int: ...
""",
runtime="""
class E:
def foo(self): pass
@staticmethod
def bar(): pass
@staticmethod
def bar2(): pass
@classmethod
def baz(cls): pass
@classmethod
def baz2(cls): pass
@property
def eggs(self): return 42
@property
def eggs2(self): return 42
@functools.lru_cache()
def ham(self, obj): return obj * 2
""",
error=None,
)
# ...But if the runtime has @final, the stub must have it as well
yield Case(
stub="""
class F:
def foo(self) -> None: ...
""",
runtime="""
class F:
@final
def foo(self): pass
""",
error="F.foo",
)
yield Case(
stub="""
class G:
@staticmethod
def foo() -> None: ...
""",
runtime="""
class G:
@final
@staticmethod
def foo(): pass
""",
error="G.foo",
)
yield Case(
stub="""
class H:
@staticmethod
def foo() -> None: ...
""",
runtime="""
class H:
@staticmethod
@final
def foo(): pass
""",
error="H.foo",
)
yield Case(
stub="""
class I:
@classmethod
def foo(cls) -> None: ...
""",
runtime="""
class I:
@final
@classmethod
def foo(cls): pass
""",
error="I.foo",
)
yield Case(
stub="""
class J:
@classmethod
def foo(cls) -> None: ...
""",
runtime="""
class J:
@classmethod
@final
def foo(cls): pass
""",
error="J.foo",
)
yield Case(
stub="""
class K:
@property
def foo(self) -> int: ...
""",
runtime="""
class K:
@property
@final
def foo(self): return 42
""",
error="K.foo",
)
# This test wouldn't pass,
# because the runtime can't set __final__ on instances of builtins.property,
# so stubtest has non way of knowing that the runtime was decorated with @final:
#
# yield Case(
# stub="""
# class K2:
# @property
# def foo(self) -> int: ...
# """,
# runtime="""
# class K2:
# @final
# @property
# def foo(self): return 42
# """,
# error="K2.foo",
# )
yield Case(
stub="""
class L:
def foo(self, obj: int) -> int: ...
""",
runtime="""
class L:
@final
@functools.lru_cache()
def foo(self, obj): return obj * 2
""",
error="L.foo",
)
@collect_cases
def test_name_mangling(self) -> Iterator[Case]:
yield Case(
stub="""
class X:
def __mangle_good(self, text: str) -> None: ...
def __mangle_bad(self, number: int) -> None: ...
""",
runtime="""
class X:
def __mangle_good(self, text): pass
def __mangle_bad(self, text): pass
""",
error="X.__mangle_bad",
)
yield Case(
stub="""
class Klass:
class __Mangled1:
class __Mangled2:
def __mangle_good(self, text: str) -> None: ...
def __mangle_bad(self, number: int) -> None: ...
""",
runtime="""
class Klass:
class __Mangled1:
class __Mangled2:
def __mangle_good(self, text): pass
def __mangle_bad(self, text): pass
""",
error="Klass.__Mangled1.__Mangled2.__mangle_bad",
)
yield Case(
stub="""
class __Dunder__:
def __mangle_good(self, text: str) -> None: ...
def __mangle_bad(self, number: int) -> None: ...
""",
runtime="""
class __Dunder__:
def __mangle_good(self, text): pass
def __mangle_bad(self, text): pass
""",
error="__Dunder__.__mangle_bad",
)
yield Case(
stub="""
class _Private:
def __mangle_good(self, text: str) -> None: ...
def __mangle_bad(self, number: int) -> None: ...
""",
runtime="""
class _Private:
def __mangle_good(self, text): pass
def __mangle_bad(self, text): pass
""",
error="_Private.__mangle_bad",
)
@collect_cases
def test_mro(self) -> Iterator[Case]:
yield Case(
stub="""
class A:
def foo(self, x: int) -> None: ...
class B(A):
pass
class C(A):
pass
""",
runtime="""
class A:
def foo(self, x: int) -> None: ...
class B(A):
def foo(self, x: int) -> None: ...
class C(A):
def foo(self, y: int) -> None: ...
""",
error="C.foo",
)
yield Case(
stub="""
class X: ...
""",
runtime="""
class X:
def __init__(self, x): pass
""",
error="X.__init__",
)
@collect_cases
def test_good_literal(self) -> Iterator[Case]:
yield Case(
stub=r"""
from typing import Literal
import enum
class Color(enum.Enum):
RED = ...
NUM: Literal[1]
CHAR: Literal['a']
FLAG: Literal[True]
NON: Literal[None]
BYT1: Literal[b'abc']
BYT2: Literal[b'\x90']
ENUM: Literal[Color.RED]
""",
runtime=r"""
import enum
class Color(enum.Enum):
RED = 3
NUM = 1
CHAR = 'a'
NON = None
FLAG = True
BYT1 = b"abc"
BYT2 = b'\x90'
ENUM = Color.RED
""",
error=None,
)
@collect_cases
def test_bad_literal(self) -> Iterator[Case]:
yield Case("from typing import Literal", "", None) # dummy case
yield Case(
stub="INT_FLOAT_MISMATCH: Literal[1]",
runtime="INT_FLOAT_MISMATCH = 1.0",
error="INT_FLOAT_MISMATCH",
)
yield Case(stub="WRONG_INT: Literal[1]", runtime="WRONG_INT = 2", error="WRONG_INT")
yield Case(stub="WRONG_STR: Literal['a']", runtime="WRONG_STR = 'b'", error="WRONG_STR")
yield Case(
stub="BYTES_STR_MISMATCH: Literal[b'value']",
runtime="BYTES_STR_MISMATCH = 'value'",
error="BYTES_STR_MISMATCH",
)
yield Case(
stub="STR_BYTES_MISMATCH: Literal['value']",
runtime="STR_BYTES_MISMATCH = b'value'",
error="STR_BYTES_MISMATCH",
)
yield Case(
stub="WRONG_BYTES: Literal[b'abc']",
runtime="WRONG_BYTES = b'xyz'",
error="WRONG_BYTES",
)
yield Case(
stub="WRONG_BOOL_1: Literal[True]",
runtime="WRONG_BOOL_1 = False",
error="WRONG_BOOL_1",
)
yield Case(
stub="WRONG_BOOL_2: Literal[False]",
runtime="WRONG_BOOL_2 = True",
error="WRONG_BOOL_2",
)
@collect_cases
def test_special_subtype(self) -> Iterator[Case]:
yield Case(
stub="""
b1: bool
b2: bool
b3: bool
""",
runtime="""
b1 = 0
b2 = 1
b3 = 2
""",
error="b3",
)
yield Case(
stub="""
from typing import TypedDict
class _Options(TypedDict):
a: str
b: int
opt1: _Options
opt2: _Options
opt3: _Options
""",
runtime="""
opt1 = {"a": "3.", "b": 14}
opt2 = {"some": "stuff"} # false negative
opt3 = 0
""",
error="opt3",
)
@collect_cases
def test_runtime_typing_objects(self) -> Iterator[Case]:
yield Case(
stub="from typing import Protocol, TypedDict",
runtime="from typing import Protocol, TypedDict",
error=None,
)
yield Case(
stub="""
class X(Protocol):
bar: int
def foo(self, x: int, y: bytes = ...) -> str: ...
""",
runtime="""
class X(Protocol):
bar: int
def foo(self, x: int, y: bytes = ...) -> str: ...
""",
error=None,
)
yield Case(
stub="""
class Y(TypedDict):
a: int
""",
runtime="""
class Y(TypedDict):
a: int
""",
error=None,
)
@collect_cases
def test_named_tuple(self) -> Iterator[Case]:
yield Case(
stub="from typing import NamedTuple",
runtime="from typing import NamedTuple",
error=None,
)
yield Case(
stub="""
class X1(NamedTuple):
bar: int
foo: str = ...
""",
runtime="""
class X1(NamedTuple):
bar: int
foo: str = 'a'
""",
error=None,
)
yield Case(
stub="""
class X2(NamedTuple):
bar: int
foo: str
""",
runtime="""
class X2(NamedTuple):
bar: int
foo: str = 'a'
""",
# `__new__` will miss a default value for a `foo` parameter,
# but we don't generate special errors for `foo` missing `...` part.
error="X2.__new__",
)
@collect_cases
def test_named_tuple_typing_and_collections(self) -> Iterator[Case]:
yield Case(
stub="from typing import NamedTuple",
runtime="from collections import namedtuple",
error=None,
)
yield Case(
stub="""
class X1(NamedTuple):
bar: int
foo: str = ...
""",
runtime="""
X1 = namedtuple('X1', ['bar', 'foo'], defaults=['a'])
""",
error=None,
)
yield Case(
stub="""
class X2(NamedTuple):
bar: int
foo: str
""",
runtime="""
X2 = namedtuple('X1', ['bar', 'foo'], defaults=['a'])
""",
error="X2.__new__",
)
@collect_cases
def test_type_var(self) -> Iterator[Case]:
yield Case(
stub="from typing import TypeVar", runtime="from typing import TypeVar", error=None
)
yield Case(stub="A = TypeVar('A')", runtime="A = TypeVar('A')", error=None)
yield Case(stub="B = TypeVar('B')", runtime="B = 5", error="B")
yield Case(
stub="from typing import ParamSpec", runtime="from typing import ParamSpec", error=None
)
yield Case(stub="C = ParamSpec('C')", runtime="C = ParamSpec('C')", error=None)
@collect_cases
def test_metaclass_match(self) -> Iterator[Case]:
yield Case(stub="class Meta(type): ...", runtime="class Meta(type): ...", error=None)
yield Case(stub="class A0: ...", runtime="class A0: ...", error=None)
yield Case(
stub="class A1(metaclass=Meta): ...",
runtime="class A1(metaclass=Meta): ...",
error=None,
)
yield Case(stub="class A2: ...", runtime="class A2(metaclass=Meta): ...", error="A2")
yield Case(stub="class A3(metaclass=Meta): ...", runtime="class A3: ...", error="A3")
# Explicit `type` metaclass can always be added in any part:
yield Case(
stub="class T1(metaclass=type): ...",
runtime="class T1(metaclass=type): ...",
error=None,
)
yield Case(stub="class T2: ...", runtime="class T2(metaclass=type): ...", error=None)
yield Case(stub="class T3(metaclass=type): ...", runtime="class T3: ...", error=None)
# Explicit check that `_protected` names are also supported:
yield Case(stub="class _P1(type): ...", runtime="class _P1(type): ...", error=None)
yield Case(stub="class P2: ...", runtime="class P2(metaclass=_P1): ...", error="P2")
# With inheritance:
yield Case(
stub="""
class I1(metaclass=Meta): ...
class S1(I1): ...
""",
runtime="""
class I1(metaclass=Meta): ...
class S1(I1): ...
""",
error=None,
)
yield Case(
stub="""
class I2(metaclass=Meta): ...
class S2: ... # missing inheritance
""",
runtime="""
class I2(metaclass=Meta): ...
class S2(I2): ...
""",
error="S2",
)
@collect_cases
def test_metaclass_abcmeta(self) -> Iterator[Case]:
# Handling abstract metaclasses is special:
yield Case(stub="from abc import ABCMeta", runtime="from abc import ABCMeta", error=None)
yield Case(
stub="class A1(metaclass=ABCMeta): ...",
runtime="class A1(metaclass=ABCMeta): ...",
error=None,
)
# Stubs cannot miss abstract metaclass:
yield Case(stub="class A2: ...", runtime="class A2(metaclass=ABCMeta): ...", error="A2")
# But, stubs can add extra abstract metaclass, this might be a typing hack:
yield Case(stub="class A3(metaclass=ABCMeta): ...", runtime="class A3: ...", error=None)
@collect_cases
def test_abstract_methods(self) -> Iterator[Case]:
yield Case(
stub="""
from abc import abstractmethod
from typing import overload
""",
runtime="from abc import abstractmethod",
error=None,
)
yield Case(
stub="""
class A1:
def some(self) -> None: ...
""",
runtime="""
class A1:
@abstractmethod
def some(self) -> None: ...
""",
error="A1.some",
)
yield Case(
stub="""
class A2:
@abstractmethod
def some(self) -> None: ...
""",
runtime="""
class A2:
@abstractmethod
def some(self) -> None: ...
""",
error=None,
)
yield Case(
stub="""
class A3:
@overload
def some(self, other: int) -> str: ...
@overload
def some(self, other: str) -> int: ...
""",
runtime="""
class A3:
@abstractmethod
def some(self, other) -> None: ...
""",
error="A3.some",
)
yield Case(
stub="""
class A4:
@overload
@abstractmethod
def some(self, other: int) -> str: ...
@overload
@abstractmethod
def some(self, other: str) -> int: ...
""",
runtime="""
class A4:
@abstractmethod
def some(self, other) -> None: ...
""",
error=None,
)
yield Case(
stub="""
class A5:
@abstractmethod
@overload
def some(self, other: int) -> str: ...
@abstractmethod
@overload
def some(self, other: str) -> int: ...
""",
runtime="""
class A5:
@abstractmethod
def some(self, other) -> None: ...
""",
error=None,
)
# Runtime can miss `@abstractmethod`:
yield Case(
stub="""
class A6:
@abstractmethod
def some(self) -> None: ...
""",
runtime="""
class A6:
def some(self) -> None: ...
""",
error=None,
)
@collect_cases
def test_abstract_properties(self) -> Iterator[Case]:
# TODO: test abstract properties with setters
yield Case(
stub="from abc import abstractmethod",
runtime="from abc import abstractmethod",
error=None,
)
# Ensure that `@property` also can be abstract:
yield Case(
stub="""
class AP1:
@property
def some(self) -> int: ...
""",
runtime="""
class AP1:
@property
@abstractmethod
def some(self) -> int: ...
""",
error="AP1.some",
)
yield Case(
stub="""
class AP1_2:
def some(self) -> int: ... # missing `@property` decorator
""",
runtime="""
class AP1_2:
@property
@abstractmethod
def some(self) -> int: ...
""",
error="AP1_2.some",
)
yield Case(
stub="""
class AP2:
@property
@abstractmethod
def some(self) -> int: ...
""",
runtime="""
class AP2:
@property
@abstractmethod
def some(self) -> int: ...
""",
error=None,
)
# Runtime can miss `@abstractmethod`:
yield Case(
stub="""
class AP3:
@property
@abstractmethod
def some(self) -> int: ...
""",
runtime="""
class AP3:
@property
def some(self) -> int: ...
""",
error=None,
)
@collect_cases
def test_type_check_only(self) -> Iterator[Case]:
yield Case(
stub="from typing import type_check_only, overload",
runtime="from typing import overload",
error=None,
)
# You can have public types that are only defined in stubs
# with `@type_check_only`:
yield Case(
stub="""
@type_check_only
class A1: ...
""",
runtime="",
error=None,
)
# Having `@type_check_only` on a type that exists at runtime is an error
yield Case(
stub="""
@type_check_only
class A2: ...
""",
runtime="class A2: ...",
error="A2",
)
# The same is true for NamedTuples and TypedDicts:
yield Case(
stub="from typing import NamedTuple, TypedDict",
runtime="from typing import NamedTuple, TypedDict",
error=None,
)
yield Case(
stub="""
@type_check_only
class NT1(NamedTuple): ...
""",
runtime="class NT1(NamedTuple): ...",
error="NT1",
)
yield Case(
stub="""
@type_check_only
class TD1(TypedDict): ...
""",
runtime="class TD1(TypedDict): ...",
error="TD1",
)
# The same is true for functions:
yield Case(
stub="""
@type_check_only
def func1() -> None: ...
""",
runtime="",
error=None,
)
yield Case(
stub="""
@type_check_only
def func2() -> None: ...
""",
runtime="def func2() -> None: ...",
error="func2",
)
# A type that exists at runtime is allowed to alias a type marked
# as '@type_check_only' in the stubs.
yield Case(
stub="""
@type_check_only
class _X1: ...
X2 = _X1
""",
runtime="class X2: ...",
error=None,
)
@collect_cases
def test_type_default_protocol(self) -> Iterator[Case]:
yield Case(
stub="""
from typing import Protocol
class _FormatterClass(Protocol):
def __call__(self, *, prog: str) -> HelpFormatter: ...
class ArgumentParser:
def __init__(self, formatter_class: _FormatterClass = ...) -> None: ...
class HelpFormatter:
def __init__(self, prog: str, indent_increment: int = 2) -> None: ...
""",
runtime="""
class HelpFormatter:
def __init__(self, prog, indent_increment=2) -> None: ...
class ArgumentParser:
def __init__(self, formatter_class=HelpFormatter): ...
""",
error=None,
)
def remove_color_code(s: str) -> str:
return re.sub("\\x1b.*?m", "", s) # this works!
| StubtestUnit |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 41198,
"end": 43526
} | class ____(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(0, oo)
@staticmethod
def check(d1, d2):
_value_check((d1 > 0, d1.is_integer),
"Degrees of freedom d1 must be positive integer.")
_value_check((d2 > 0, d2.is_integer),
"Degrees of freedom d2 must be positive integer.")
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (sqrt((d1*x)**d1*d2**d2 / (d1*x+d2)**(d1+d2))
/ (x * beta_fn(d1/2, d2/2)))
def _moment_generating_function(self, t):
raise NotImplementedError('The moment generating function for the '
'F-distribution does not exist.')
def FDistribution(name, d1, d2):
r"""
Create a continuous random variable with a F distribution.
Explanation
===========
The density of the F distribution is given by
.. math::
f(x) := \frac{\sqrt{\frac{(d_1 x)^{d_1} d_2^{d_2}}
{(d_1 x + d_2)^{d_1 + d_2}}}}
{x \mathrm{B} \left(\frac{d_1}{2}, \frac{d_2}{2}\right)}
with :math:`x > 0`.
Parameters
==========
d1 : `d_1 > 0`, where `d_1` is the degrees of freedom (`n_1 - 1`)
d2 : `d_2 > 0`, where `d_2` is the degrees of freedom (`n_2 - 1`)
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import FDistribution, density
>>> from sympy import Symbol, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FDistribution("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d2
-- ______________________________
2 / d1 -d1 - d2
d2 *\/ (d1*z) *(d1*z + d2)
--------------------------------------
/d1 d2\
z*B|--, --|
\2 2 /
References
==========
.. [1] https://en.wikipedia.org/wiki/F-distribution
.. [2] https://mathworld.wolfram.com/F-Distribution.html
"""
return rv(name, FDistributionDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Fisher Z distribution --------------------------------------------------------
| FDistributionDistribution |
python | tensorflow__tensorflow | tensorflow/python/framework/c_api_util.py | {
"start": 4841,
"end": 7408
} | class ____(object):
"""Wrapper around Tf_ApiDefMap that handles querying and deletion.
The OpDef protos are also stored in this class so that they could
be queried by op name.
"""
__slots__ = ["_api_def_map", "_op_per_name"]
def __init__(self):
op_def_proto = op_def_pb2.OpList()
buf = c_api.TF_GetAllOpList()
try:
op_def_proto.ParseFromString(c_api.TF_GetBuffer(buf))
self._api_def_map = c_api.TF_NewApiDefMap(buf)
finally:
c_api.TF_DeleteBuffer(buf)
self._op_per_name = {}
for op in op_def_proto.op:
self._op_per_name[op.name] = op
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteApiDefMap is not None:
c_api.TF_DeleteApiDefMap(self._api_def_map)
def put_api_def(self, text):
c_api.TF_ApiDefMapPut(self._api_def_map, text, len(text))
def get_api_def(self, op_name):
api_def_proto = api_def_pb2.ApiDef()
buf = c_api.TF_ApiDefMapGet(self._api_def_map, op_name, len(op_name))
try:
api_def_proto.ParseFromString(c_api.TF_GetBuffer(buf))
finally:
c_api.TF_DeleteBuffer(buf)
return api_def_proto
def get_op_def(self, op_name):
if op_name in self._op_per_name:
return self._op_per_name[op_name]
raise ValueError(f"No op_def found for op name {op_name}.")
def op_names(self):
return self._op_per_name.keys()
@tf_contextlib.contextmanager
def tf_buffer(data=None):
"""Context manager that creates and deletes TF_Buffer.
Example usage:
with tf_buffer() as buf:
# get serialized graph def into buf
...
proto_data = c_api.TF_GetBuffer(buf)
graph_def.ParseFromString(compat.as_bytes(proto_data))
# buf has been deleted
with tf_buffer(some_string) as buf:
c_api.TF_SomeFunction(buf)
# buf has been deleted
Args:
data: An optional `bytes`, `str`, or `unicode` object. If not None, the
yielded buffer will contain this data.
Yields:
Created TF_Buffer
"""
if data:
buf = c_api.TF_NewBufferFromString(compat.as_bytes(data))
else:
buf = c_api.TF_NewBuffer()
try:
yield buf
finally:
c_api.TF_DeleteBuffer(buf)
def tf_output(c_op, index):
"""Returns a wrapped TF_Output with specified operation and index.
Args:
c_op: wrapped TF_Operation
index: integer
Returns:
Wrapped TF_Output
"""
ret = c_api.TF_Output()
ret.oper = c_op
ret.index = index
return ret
| ApiDefMap |
python | pikepdf__pikepdf | tests/test_io.py | {
"start": 2190,
"end": 2648
} | class ____(BytesIO):
def write(self, b): # pylint: disable=unused-argument
return -1
@pytest.mark.parametrize(
'bio_class,exc_type',
[
(BadBytesIO, ValueError),
(WrongTypeBytesIO, TypeError),
(NegativeOneBytesIO, PdfError),
],
)
def test_invalid_output_stream(sandwich, bio_class, exc_type):
bio = bio_class()
with pytest.raises(exc_type):
sandwich.save(bio, static_id=True)
| NegativeOneBytesIO |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass2.py | {
"start": 2849,
"end": 2887
} | class ____(Generic[P2, P1]): ...
| ClassPB |
python | streamlit__streamlit | lib/streamlit/runtime/caching/hashing.py | {
"start": 6630,
"end": 8246
} | class ____:
"""Stacks of what has been hashed, with at most 1 stack per thread."""
def __init__(self) -> None:
self._stacks: weakref.WeakKeyDictionary[threading.Thread, _HashStack] = (
weakref.WeakKeyDictionary()
)
def __repr__(self) -> str:
return util.repr_(self)
@property
def current(self) -> _HashStack:
current_thread = threading.current_thread()
stack = self._stacks.get(current_thread, None)
if stack is None:
stack = _HashStack()
self._stacks[current_thread] = stack
return stack
hash_stacks = _HashStacks()
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True)
def _float_to_bytes(f: float) -> bytes:
# Lazy-load for performance reasons.
import struct
# Floats are 64bit in Python, so we need to use the "d" format.
return struct.pack("<d", f)
def _key(obj: Any | None) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj: Any) -> bool:
return (
isinstance(obj, (bytes, bytearray, str, float, int, bool, uuid.UUID))
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple) and all(map(is_simple, obj)):
return obj
if isinstance(obj, list) and all(map(is_simple, obj)):
return ("__l", tuple(obj))
if inspect.isbuiltin(obj) or inspect.isroutine(obj) or inspect.iscode(obj):
return id(obj)
return NoResult
| _HashStacks |
python | kamyu104__LeetCode-Solutions | Python/find-the-integer-added-to-array-ii.py | {
"start": 725,
"end": 1582
} | class ____(object):
def minimumAddedInteger(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
def check(cnt2, cnt1):
# return cnt2 <= cnt1 # for python3
return all(cnt1.get(k, 0)-v >= 0 for k, v in cnt2.iteritems()) # for python2
def topk(a, k): # Time: O(k * n)
result = [float("-inf")]*k
for x in a:
for i in xrange(len(result)):
if x > result[i]:
result[i], x = x, result[i]
return result
mx = max(nums2)
cnt2 = collections.Counter(nums2)
return next(d for d in [mx-x for x in topk(nums1, 3)] if check(cnt2, collections.Counter(x+d for x in nums1)))
# Time: O(nlogn)
# Space: O(1)
# sort
| Solution2 |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/string_bytes_split_op_test.py | {
"start": 1111,
"end": 3019
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters(
# Scalar input -> vector output
(b'hello', [b'h', b'e', b'l', b'l', b'o']),
# Vector input -> 2D ragged output
([b'hello', b'123'],
[[b'h', b'e', b'l', b'l', b'o'], [b'1', b'2', b'3']]),
# 2D tensor input -> 3D ragged output
([[b'abc', b'de'], [b'fgh', b'']],
[[[b'a', b'b', b'c'], [b'd', b'e']], [[b'f', b'g', b'h'], []]]),
# 2D ragged input -> 3D ragged output
(ragged_factory_ops.constant_value([[b'abc', b'de'], [b'f']]),
[[[b'a', b'b', b'c'], [b'd', b'e']], [[b'f']]]),
# 3D input -> 4D ragged output
(ragged_factory_ops.constant_value(
[[[b'big', b'small'], [b'red']], [[b'cat', b'dog'], [b'ox']]]),
[[[[b'b', b'i', b'g'], [b's', b'm', b'a', b'l', b'l']],
[[b'r', b'e', b'd']]],
[[[b'c', b'a', b't'], [b'd', b'o', b'g']],
[[b'o', b'x']]]]),
# Empty string
(b'', []),
# Null byte
(b'\x00', [b'\x00']),
# Unicode
(u'仅今年前'.encode('utf-8'),
[b'\xe4', b'\xbb', b'\x85', b'\xe4', b'\xbb', b'\x8a', b'\xe5',
b'\xb9', b'\xb4', b'\xe5', b'\x89', b'\x8d']),
)
def testStringToBytes(self, source, expected):
expected = ragged_factory_ops.constant_value(expected, dtype=object)
result = ragged_string_ops.string_bytes_split(source)
self.assertAllEqual(expected, result)
def testUnknownInputRankError(self):
# Use a tf.function that erases shape information.
@def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
def f(v):
return ragged_string_ops.string_bytes_split(v)
with self.assertRaisesRegex(TypeError,
'Binding inputs to tf.function failed'):
f(['foo'])
if __name__ == '__main__':
test.main()
| StringsToBytesOpTest |
python | matplotlib__matplotlib | lib/matplotlib/widgets.py | {
"start": 21519,
"end": 34474
} | class ____(SliderBase):
"""
A slider representing a range of floating point values. Defines the min and
max of the range via the *val* attribute as a tuple of (min, max).
Create a slider that defines a range contained within [*valmin*, *valmax*]
in Axes *ax*. For the slider to remain responsive you must maintain a
reference to it. Call :meth:`on_changed` to connect to the slider event.
Attributes
----------
val : tuple of float
Slider value.
"""
def __init__(
self,
ax,
label,
valmin,
valmax,
*,
valinit=None,
valfmt=None,
closedmin=True,
closedmax=True,
dragging=True,
valstep=None,
orientation="horizontal",
track_color='lightgrey',
handle_style=None,
**kwargs,
):
"""
Parameters
----------
ax : Axes
The Axes to put the slider in.
label : str
Slider label.
valmin : float
The minimum value of the slider.
valmax : float
The maximum value of the slider.
valinit : tuple of float or None, default: None
The initial positions of the slider. If None the initial positions
will be at the 25th and 75th percentiles of the range.
valfmt : str or callable, default: None
The way to format the range's minimal and maximal values. If a
string, it must be in %-format. If a callable, it must have the
signature ``valfmt(val: float) -> str``. If None, a
`.ScalarFormatter` is used.
closedmin : bool, default: True
Whether the slider interval is closed on the bottom.
closedmax : bool, default: True
Whether the slider interval is closed on the top.
dragging : bool, default: True
If True the slider can be dragged by the mouse.
valstep : float, default: None
If given, the slider will snap to multiples of *valstep*.
orientation : {'horizontal', 'vertical'}, default: 'horizontal'
The orientation of the slider.
track_color : :mpltype:`color`, default: 'lightgrey'
The color of the background track. The track is accessible for
further styling via the *track* attribute.
handle_style : dict
Properties of the slider handles. Default values are
========= ===== ======= =========================================
Key Value Default Description
========= ===== ======= =========================================
facecolor color 'white' The facecolor of the slider handles.
edgecolor color '.75' The edgecolor of the slider handles.
size int 10 The size of the slider handles in points.
========= ===== ======= =========================================
Other values will be transformed as marker{foo} and passed to the
`~.Line2D` constructor. e.g. ``handle_style = {'style'='x'}`` will
result in ``markerstyle = 'x'``.
Notes
-----
Additional kwargs are passed on to ``self.poly`` which is the
`~matplotlib.patches.Polygon` that draws the slider knob. See the
`.Polygon` documentation for valid property names (``facecolor``,
``edgecolor``, ``alpha``, etc.).
"""
super().__init__(ax, orientation, closedmin, closedmax,
valmin, valmax, valfmt, dragging, valstep)
# Set a value to allow _value_in_bounds() to work.
self.val = (valmin, valmax)
if valinit is None:
# Place at the 25th and 75th percentiles
extent = valmax - valmin
valinit = np.array([valmin + extent * 0.25,
valmin + extent * 0.75])
else:
valinit = self._value_in_bounds(valinit)
self.val = valinit
self.valinit = valinit
defaults = {'facecolor': 'white', 'edgecolor': '.75', 'size': 10}
handle_style = {} if handle_style is None else handle_style
marker_props = {
f'marker{k}': v for k, v in {**defaults, **handle_style}.items()
}
if orientation == "vertical":
self.track = Rectangle(
(.25, 0), .5, 2,
transform=ax.transAxes,
facecolor=track_color
)
ax.add_patch(self.track)
poly_transform = self.ax.get_yaxis_transform(which="grid")
handleXY_1 = [.5, valinit[0]]
handleXY_2 = [.5, valinit[1]]
else:
self.track = Rectangle(
(0, .25), 1, .5,
transform=ax.transAxes,
facecolor=track_color
)
ax.add_patch(self.track)
poly_transform = self.ax.get_xaxis_transform(which="grid")
handleXY_1 = [valinit[0], .5]
handleXY_2 = [valinit[1], .5]
self.poly = Polygon(np.zeros([5, 2]), **kwargs)
self._update_selection_poly(*valinit)
self.poly.set_transform(poly_transform)
self.poly.get_path()._interpolation_steps = 100
self.ax.add_patch(self.poly)
self.ax._request_autoscale_view()
self._handles = [
ax.plot(
*handleXY_1,
"o",
**marker_props,
clip_on=False
)[0],
ax.plot(
*handleXY_2,
"o",
**marker_props,
clip_on=False
)[0]
]
if orientation == "vertical":
self.label = ax.text(
0.5,
1.02,
label,
transform=ax.transAxes,
verticalalignment="bottom",
horizontalalignment="center",
)
self.valtext = ax.text(
0.5,
-0.02,
self._format(valinit),
transform=ax.transAxes,
verticalalignment="top",
horizontalalignment="center",
)
else:
self.label = ax.text(
-0.02,
0.5,
label,
transform=ax.transAxes,
verticalalignment="center",
horizontalalignment="right",
)
self.valtext = ax.text(
1.02,
0.5,
self._format(valinit),
transform=ax.transAxes,
verticalalignment="center",
horizontalalignment="left",
)
self._active_handle = None
self.set_val(valinit)
def _update_selection_poly(self, vmin, vmax):
"""
Update the vertices of the *self.poly* slider in-place
to cover the data range *vmin*, *vmax*.
"""
# The vertices are positioned
# 1 ------ 2
# | |
# 0, 4 ---- 3
verts = self.poly.xy
if self.orientation == "vertical":
verts[0] = verts[4] = .25, vmin
verts[1] = .25, vmax
verts[2] = .75, vmax
verts[3] = .75, vmin
else:
verts[0] = verts[4] = vmin, .25
verts[1] = vmin, .75
verts[2] = vmax, .75
verts[3] = vmax, .25
def _min_in_bounds(self, min):
"""Ensure the new min value is between valmin and self.val[1]."""
if min <= self.valmin:
if not self.closedmin:
return self.val[0]
min = self.valmin
if min > self.val[1]:
min = self.val[1]
return self._stepped_value(min)
def _max_in_bounds(self, max):
"""Ensure the new max value is between valmax and self.val[0]."""
if max >= self.valmax:
if not self.closedmax:
return self.val[1]
max = self.valmax
if max <= self.val[0]:
max = self.val[0]
return self._stepped_value(max)
def _value_in_bounds(self, vals):
"""Clip min, max values to the bounds."""
return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))
def _update_val_from_pos(self, pos):
"""Update the slider value based on a given position."""
idx = np.argmin(np.abs(self.val - pos))
if idx == 0:
val = self._min_in_bounds(pos)
self.set_min(val)
else:
val = self._max_in_bounds(pos)
self.set_max(val)
if self._active_handle:
if self.orientation == "vertical":
self._active_handle.set_ydata([val])
else:
self._active_handle.set_xdata([val])
def _update(self, event):
"""Update the slider position."""
if self.ignore(event) or event.button != 1:
return
if event.name == "button_press_event" and self.ax.contains(event)[0]:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
if (event.name == "button_release_event"
or event.name == "button_press_event" and not self.ax.contains(event)[0]):
self.drag_active = False
event.canvas.release_mouse(self.ax)
self._active_handle = None
return
# determine which handle was grabbed
xdata, ydata = self._get_data_coords(event)
handle_index = np.argmin(np.abs(
[h.get_xdata()[0] - xdata for h in self._handles]
if self.orientation == "horizontal" else
[h.get_ydata()[0] - ydata for h in self._handles]))
handle = self._handles[handle_index]
# these checks ensure smooth behavior if the handles swap which one
# has a higher value. i.e. if one is dragged over and past the other.
if handle is not self._active_handle:
self._active_handle = handle
self._update_val_from_pos(xdata if self.orientation == "horizontal" else ydata)
def _format(self, val):
"""Pretty-print *val*."""
if self.valfmt is not None:
if callable(self.valfmt):
return f"({self.valfmt(val[0])}, {self.valfmt(val[1])})"
else:
return f"({self.valfmt % val[0]}, {self.valfmt % val[1]})"
else:
_, s1, s2, _ = self._fmt.format_ticks(
[self.valmin, *val, self.valmax]
)
# fmt.get_offset is actually the multiplicative factor, if any.
s1 += self._fmt.get_offset()
s2 += self._fmt.get_offset()
# Use f string to avoid issues with backslashes when cast to a str
return f"({s1}, {s2})"
def set_min(self, min):
"""
Set the lower value of the slider to *min*.
Parameters
----------
min : float
"""
self.set_val((min, self.val[1]))
def set_max(self, max):
"""
Set the lower value of the slider to *max*.
Parameters
----------
max : float
"""
self.set_val((self.val[0], max))
def set_val(self, val):
"""
Set slider value to *val*.
Parameters
----------
val : tuple or array-like of float
"""
val = np.sort(val)
_api.check_shape((2,), val=val)
# Reset value to allow _value_in_bounds() to work.
self.val = (self.valmin, self.valmax)
vmin, vmax = self._value_in_bounds(val)
self._update_selection_poly(vmin, vmax)
if self.orientation == "vertical":
self._handles[0].set_ydata([vmin])
self._handles[1].set_ydata([vmax])
else:
self._handles[0].set_xdata([vmin])
self._handles[1].set_xdata([vmax])
self.valtext.set_text(self._format((vmin, vmax)))
if self.drawon:
self.ax.get_figure(root=True).canvas.draw_idle()
self.val = (vmin, vmax)
if self.eventson:
self._observers.process("changed", (vmin, vmax))
def on_changed(self, func):
"""
Connect *func* as callback function to changes of the slider value.
Parameters
----------
func : callable
Function to call when slider is changed. The function
must accept a 2-tuple of floats as its argument.
Returns
-------
int
Connection id (which can be used to disconnect *func*).
"""
return self._observers.connect('changed', lambda val: func(val))
def _expand_text_props(props):
props = cbook.normalize_kwargs(props, mtext.Text)
return cycler(**props)() if props else itertools.repeat({})
| RangeSlider |
python | apache__airflow | shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py | {
"start": 18569,
"end": 21883
} | class ____:
@pytest.fixture(autouse=True)
def reset_secrets_masker_and_skip_escape(self):
self.secrets_masker = SecretsMasker()
configure_secrets_masker_for_test(self.secrets_masker)
with patch(
"airflow_shared.secrets_masker.secrets_masker._secrets_masker",
return_value=self.secrets_masker,
):
with patch("airflow_shared.secrets_masker.secrets_masker.re.escape", lambda x: x):
yield
def test_calling_mask_secret_adds_adaptations_for_returned_str(self):
import urllib.parse
with env_vars({"AIRFLOW__LOGGING__SECRET_MASK_ADAPTER": "urllib.parse.quote"}):
# Manually configure the adapter since we don't read from config anymore
self.secrets_masker.secret_mask_adapter = urllib.parse.quote
mask_secret("secret<>&", None)
assert self.secrets_masker.patterns == {"secret%3C%3E%26", "secret<>&"}
def test_calling_mask_secret_adds_adaptations_for_returned_iterable(self):
import urllib.parse
with env_vars({"AIRFLOW__LOGGING__SECRET_MASK_ADAPTER": "urllib.parse.urlparse"}):
# Manually configure the adapter since we don't read from config anymore
self.secrets_masker.secret_mask_adapter = urllib.parse.urlparse
mask_secret("https://airflow.apache.org/docs/apache-airflow/stable", "password")
assert self.secrets_masker.patterns == {
"https",
"airflow.apache.org",
"/docs/apache-airflow/stable",
"https://airflow.apache.org/docs/apache-airflow/stable",
}
def test_calling_mask_secret_not_set(self):
with env_vars({"AIRFLOW__LOGGING__SECRET_MASK_ADAPTER": ""}):
# Ensure no adapter is set
self.secrets_masker.secret_mask_adapter = None
mask_secret("a secret")
assert self.secrets_masker.patterns == {"a secret"}
@pytest.mark.parametrize(
("secret", "should_be_masked", "is_first_short", "comment"),
[
("abc", False, True, "short secret with first warning"),
("def", False, False, "short secret with no warning"),
("airflow", False, False, "keyword that should be skipped"),
("valid_secret", True, False, "valid secret that should be masked"),
],
)
def test_add_mask_short_secrets_and_skip_keywords(
self, caplog, secret, should_be_masked, is_first_short, comment
):
if is_first_short:
SecretsMasker._has_warned_short_secret = False
else:
SecretsMasker._has_warned_short_secret = True
filt = SecretsMasker()
configure_secrets_masker_for_test(filt, min_length=5)
caplog.clear()
filt.add_mask(secret)
if is_first_short:
assert "Skipping masking for a secret as it's too short" in caplog.text
assert len(caplog.records) == 1
else:
assert "Skipping masking for a secret as it's too short" not in caplog.text
if should_be_masked:
assert secret in filt.patterns
else:
assert secret not in filt.patterns
caplog.clear()
if should_be_masked:
assert filt.replacer is not None
| TestMaskSecretAdapter |
python | coleifer__peewee | peewee.py | {
"start": 40522,
"end": 40824
} | class ____(object):
def __and__(self, other):
return self.bin_and(other)
def __or__(self, other):
return self.bin_or(other)
def __sub__(self, other):
return self.bin_and(other.bin_negated())
def __invert__(self):
return BitwiseNegated(self)
| BitwiseMixin |
python | doocs__leetcode | solution/3400-3499/3461.Check If Digits Are Equal in String After Operations I/Solution.py | {
"start": 0,
"end": 253
} | class ____:
def hasSameDigits(self, s: str) -> bool:
t = list(map(int, s))
n = len(t)
for k in range(n - 1, 1, -1):
for i in range(k):
t[i] = (t[i] + t[i + 1]) % 10
return t[0] == t[1]
| Solution |
python | davidhalter__jedi | test/completion/docstring.py | {
"start": 1400,
"end": 2669
} | class ____():
pass
def list_with_non_imports(lst):
"""
Should be able to work with tuples and lists and still import stuff.
:type lst: (random.Random, [collections.defaultdict, ...])
"""
#? ['seed']
lst[0].seed
import collections as col
# use some weird index
#? col.defaultdict()
lst[1][10]
def two_dots(a):
"""
:type a: json.decoder.JSONDecoder
"""
#? ['raw_decode']
a.raw_decode
# sphinx returns
def return_module_object():
"""
:rtype: :class:`random.Random`
"""
#? ['seed']
return_module_object().seed
# -----------------
# epydoc style
# -----------------
def epydoc(a, b):
""" asdfasdf
@type a: str
@param a: blablabla
@type b: (str, int)
@param b: blablah
@rtype: list
"""
#? str()
a
#? str()
b[0]
#? int()
b[1]
#? list()
epydoc()
# Returns with param type only
def rparam(a,b):
"""
@type a: str
"""
return a
#? str()
rparam()
# Composite types
def composite():
"""
@rtype: (str, int, dict)
"""
x, y, z = composite()
#? str()
x
#? int()
y
#? dict()
z
# Both docstring and calculated return type
def both():
"""
@rtype: str
"""
return 23
#? str() int()
both()
| ProgramNode2 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 35073,
"end": 35502
} | class ____(ChainedAssetSelection):
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
selection = self.child.resolve_inner(asset_graph, allow_missing=allow_missing)
return fetch_sources(asset_graph, selection)
def to_selection_str(self) -> str:
return f"roots({self.child.to_selection_str()})"
@whitelist_for_serdes
| RootsAssetSelection |
python | numba__numba | numba/cuda/deviceufunc.py | {
"start": 17625,
"end": 20201
} | class ____(object):
'''Determine how to broadcast and execute a gufunc
base on input shape and signature
'''
@classmethod
def from_signature(cls, signature):
return cls(*parse_signature(signature))
def __init__(self, inputsig, outputsig):
# signatures
self.sin = inputsig
self.sout = outputsig
# argument count
self.nin = len(self.sin)
self.nout = len(self.sout)
def schedule(self, ishapes):
if len(ishapes) != self.nin:
raise TypeError('invalid number of input argument')
# associate symbol values for input signature
symbolmap = {}
outer_shapes = []
inner_shapes = []
for argn, (shape, symbols) in enumerate(zip(ishapes, self.sin)):
argn += 1 # start from 1 for human
inner_ndim = len(symbols)
if len(shape) < inner_ndim:
fmt = "arg #%d: insufficient inner dimension"
raise ValueError(fmt % (argn,))
if inner_ndim:
inner_shape = shape[-inner_ndim:]
outer_shape = shape[:-inner_ndim]
else:
inner_shape = ()
outer_shape = shape
for axis, (dim, sym) in enumerate(zip(inner_shape, symbols)):
axis += len(outer_shape)
if sym in symbolmap:
if symbolmap[sym] != dim:
fmt = "arg #%d: shape[%d] mismatch argument"
raise ValueError(fmt % (argn, axis))
symbolmap[sym] = dim
outer_shapes.append(outer_shape)
inner_shapes.append(inner_shape)
# solve output shape
oshapes = []
for outsig in self.sout:
oshape = []
for sym in outsig:
oshape.append(symbolmap[sym])
oshapes.append(tuple(oshape))
# find the biggest outershape as looping dimension
sizes = [reduce(operator.mul, s, 1) for s in outer_shapes]
largest_i = np.argmax(sizes)
loopdims = outer_shapes[largest_i]
pinned = [False] * self.nin # same argument for each iteration
for i, d in enumerate(outer_shapes):
if d != loopdims:
if d == (1,) or d == ():
pinned[i] = True
else:
fmt = "arg #%d: outer dimension mismatch"
raise ValueError(fmt % (i + 1,))
return GUFuncSchedule(self, inner_shapes, oshapes, loopdims, pinned)
| GUFuncEngine |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-build-sturdy-brick-wall.py | {
"start": 1308,
"end": 2941
} | class ____(object):
def buildWall(self, height, width, bricks):
"""
:type height: int
:type width: int
:type bricks: List[int]
:rtype: int
"""
MOD = 10**9+7
def backtracking(height, width, bricks, total, mask, lookup, patterns):
if mask in lookup:
return
lookup.add(mask)
if total >= width:
if total == width:
patterns.append(mask^(1<<width))
return
for x in bricks:
backtracking(height, width, bricks, total+x, mask|(1<<(total+x)), lookup, patterns)
def matrix_mult(A, B):
ZB = zip(*B)
return [[sum(a*b % MOD for a, b in itertools.izip(row, col)) % MOD for col in ZB] for row in A]
def matrix_expo(A, K):
result = [[int(i == j) for j in xrange(len(A))] for i in xrange(len(A))]
while K:
if K % 2:
result = matrix_mult(result, A)
A = matrix_mult(A, A)
K /= 2
return result
patterns, lookup = [], set()
backtracking(height, width, bricks, 0, 0, lookup, patterns)
return reduce(lambda x,y: (x+y)%MOD,
matrix_mult([[1]*len(patterns)],
matrix_expo([[int((mask1 & mask2) == 0)
for mask2 in patterns]
for mask1 in patterns], height-1))[0],
0) # Time: O(p^3 * logh), Space: O(p^2)
| Solution_TLE |
python | sqlalchemy__sqlalchemy | test/orm/test_lambdas.py | {
"start": 11191,
"end": 11869
} | class ____(_poly_fixtures._Polymorphic):
run_setup_mappers = "once"
__dialect__ = "default"
def test_join_second_prop_lambda(self):
Company = self.classes.Company
Manager = self.classes.Manager
s = Session(future=True)
q = s.query(Company).join(lambda: Manager, lambda: Company.employees)
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name FROM companies "
"JOIN (people JOIN managers ON people.person_id = "
"managers.person_id) ON companies.company_id = people.company_id",
)
| PolymorphicTest |
python | wandb__wandb | wandb/vendor/pygments/formatters/img.py | {
"start": 1336,
"end": 1420
} | class ____(Exception):
"""When there are no usable fonts specified"""
| FontNotFound |
python | walkccc__LeetCode | solutions/1663. Smallest String With A Given Numeric Value/1663.py | {
"start": 0,
"end": 273
} | class ____:
def getSmallestString(self, n: int, k: int) -> str:
ans = []
for i in range(n):
remainingLetters = n - 1 - i
rank = max(1, k - remainingLetters * 26)
ans.append(chr(ord('a') + rank - 1))
k -= rank
return ''.join(ans)
| Solution |
python | falconry__falcon | falcon/errors.py | {
"start": 4983,
"end": 5128
} | class ____(WebSocketDisconnected):
"""The routed resource does not contain an ``on_websocket()`` handler."""
pass
| WebSocketHandlerNotFound |
python | pytest-dev__pytest | testing/_py/test_local.py | {
"start": 28590,
"end": 28909
} | class ____:
pytestmark = win32only
def test_sysfind_bat_exe_before(self, tmpdir, monkeypatch):
monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep)
tmpdir.ensure("hello")
h = tmpdir.ensure("hello.bat")
x = local.sysfind("hello")
assert x == h
| TestExecutionOnWindows |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 32032,
"end": 32354
} | class ____:
xlErrDiv0 = 2007 # from enum XlCVError
xlErrNA = 2042 # from enum XlCVError
xlErrName = 2029 # from enum XlCVError
xlErrNull = 2000 # from enum XlCVError
xlErrNum = 2036 # from enum XlCVError
xlErrRef = 2023 # from enum XlCVError
xlErrValue = 2015 # from enum XlCVError
| CVError |
python | django__django | django/core/management/commands/runserver.py | {
"start": 724,
"end": 7539
} | class ____(BaseCommand):
help = "Starts a lightweight web server for development."
stealth_options = ("shutdown_message",)
suppressed_base_arguments = {"--verbosity", "--traceback"}
default_addr = "127.0.0.1"
default_addr_ipv6 = "::1"
default_port = "8000"
protocol = "http"
server_cls = WSGIServer
def add_arguments(self, parser):
parser.add_argument(
"addrport", nargs="?", help="Optional port number, or ipaddr:port"
)
parser.add_argument(
"--ipv6",
"-6",
action="store_true",
dest="use_ipv6",
help="Tells Django to use an IPv6 address.",
)
parser.add_argument(
"--nothreading",
action="store_false",
dest="use_threading",
help="Tells Django to NOT use threading.",
)
parser.add_argument(
"--noreload",
action="store_false",
dest="use_reloader",
help="Tells Django to NOT use the auto-reloader.",
)
def execute(self, *args, **options):
if options["no_color"]:
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ["DJANGO_COLORS"] = "nocolor"
super().execute(*args, **options)
def get_handler(self, *args, **options):
"""Return the default WSGI handler for the runner."""
return get_internal_wsgi_application()
def get_check_kwargs(self, options):
"""Validation is called explicitly each time the server reloads."""
return {"tags": set()}
def handle(self, *args, **options):
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError("You must set settings.ALLOWED_HOSTS if DEBUG is False.")
self.use_ipv6 = options["use_ipv6"]
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError("Your Python does not support IPv6.")
self._raw_ipv6 = False
if not options["addrport"]:
self.addr = ""
self.port = self.default_port
else:
m = re.match(naiveip_re, options["addrport"])
if m is None:
raise CommandError(
'"%s" is not a valid port number '
"or address:port pair." % options["addrport"]
)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = self.default_addr_ipv6 if self.use_ipv6 else self.default_addr
self._raw_ipv6 = self.use_ipv6
self.run(**options)
def run(self, **options):
"""Run the server, using the autoreloader if needed."""
use_reloader = options["use_reloader"]
if use_reloader:
autoreload.run_with_reloader(self.inner_run, **options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options["use_threading"]
# 'shutdown_message' is a stealth option.
shutdown_message = options.get("shutdown_message", "")
if not options["skip_checks"]:
self.stdout.write("Performing system checks...\n\n")
check_kwargs = super().get_check_kwargs(options)
check_kwargs["display_num_errors"] = True
self.check(**check_kwargs)
# Need to check migrations here, so can't use the
# requires_migrations_check attribute.
self.check_migrations()
# Close all connections opened during migration checking.
for conn in connections.all(initialized_only=True):
conn.close()
try:
handler = self.get_handler(*args, **options)
run(
self.addr,
int(self.port),
handler,
ipv6=self.use_ipv6,
threading=threading,
on_bind=self.on_bind,
server_cls=self.server_cls,
)
except OSError as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = e
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def on_bind(self, server_port):
quit_command = "CTRL-BREAK" if sys.platform == "win32" else "CONTROL-C"
if self._raw_ipv6:
addr = f"[{self.addr}]"
elif self.addr == "0":
addr = "0.0.0.0"
else:
addr = self.addr
now = datetime.now().strftime("%B %d, %Y - %X")
version = self.get_version()
print(
f"{now}\n"
f"Django version {version}, using settings {settings.SETTINGS_MODULE!r}\n"
f"Starting development server at {self.protocol}://{addr}:{server_port}/\n"
f"Quit the server with {quit_command}.",
file=self.stdout,
)
docs_version = get_docs_version()
if os.environ.get("DJANGO_RUNSERVER_HIDE_WARNING") != "true":
self.stdout.write(
self.style.WARNING(
"WARNING: This is a development server. Do not use it in a "
"production setting. Use a production WSGI or ASGI server "
"instead.\nFor more information on production servers see: "
f"https://docs.djangoproject.com/en/{docs_version}/howto/"
"deployment/"
)
)
| Command |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/device.py | {
"start": 4863,
"end": 6945
} | class ____(LoginRequiredMixin, FormView):
"""
The view where the user approves or denies a device.
"""
template_name = "oauth2_provider/device/accept_deny.html"
form_class = DeviceConfirmForm
def get_object(self):
"""
Returns the DeviceGrant object in the AUTHORIZATION_PENDING state identified
by the slugs client_id and user_code. Raises Http404 if not found.
"""
client_id, user_code = self.kwargs.get("client_id"), self.kwargs.get("user_code")
return get_object_or_404(
DeviceGrant,
client_id=client_id,
user_code=user_code,
status=DeviceGrant.AUTHORIZATION_PENDING,
)
def get_success_url(self):
return reverse(
"oauth2_provider:device-grant-status",
kwargs={
"client_id": self.kwargs["client_id"],
"user_code": self.kwargs["user_code"],
},
)
def get(self, request, *args, **kwargs):
"""
Enable GET requests for improved user experience. But validate that the URL params
are correct (i.e. there exists a device grant in the db that corresponds to the URL
params) by calling .get_object()
"""
_ = self.get_object() # raises 404 if URL parameters are incorrect
return super().get(request, args, kwargs)
def form_valid(self, form):
"""
Uses get_object() to retrieves the DeviceGrant object and updates its state
to authorized or denied, based on the user input.
"""
device = self.get_object()
action = form.cleaned_data["action"]
if action == "accept":
device.status = device.AUTHORIZED
device.save(update_fields=["status"])
return super().form_valid(form)
elif action == "deny":
device.status = device.DENIED
device.save(update_fields=["status"])
return super().form_valid(form)
else:
return http.HttpResponseBadRequest()
| DeviceConfirmView |
python | tensorflow__tensorflow | tensorflow/python/keras/callbacks.py | {
"start": 7592,
"end": 21810
} | class ____:
"""Container abstracting a list of callbacks."""
def __init__(self,
callbacks=None,
add_history=False,
add_progbar=False,
model=None,
**params):
"""Container for `Callback` instances.
This object wraps a list of `Callback` instances, making it possible
to call them all at once via a single endpoint
(e.g. `callback_list.on_epoch_end(...)`).
Args:
callbacks: List of `Callback` instances.
add_history: Whether a `History` callback should be added, if one does not
already exist in the `callbacks` list.
add_progbar: Whether a `ProgbarLogger` callback should be added, if one
does not already exist in the `callbacks` list.
model: The `Model` these callbacks are used with.
**params: If provided, parameters will be passed to each `Callback` via
`Callback.set_params`.
"""
self.callbacks = nest.flatten(callbacks) if callbacks else []
self._add_default_callbacks(add_history, add_progbar)
if model:
self.set_model(model)
if params:
self.set_params(params)
# Performance optimization: determines if batch hooks need to be called.
# pylint: disable=protected-access
self._supports_tf_logs = all(
getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks)
self._batch_hooks_support_tf_logs = all(
getattr(cb, '_supports_tf_logs', False)
for cb in self.callbacks
if cb._implements_train_batch_hooks() or cb
._implements_test_batch_hooks() or cb._implements_predict_batch_hooks())
self._should_call_train_batch_hooks = any(
cb._implements_train_batch_hooks() for cb in self.callbacks)
self._should_call_test_batch_hooks = any(
cb._implements_test_batch_hooks() for cb in self.callbacks)
self._should_call_predict_batch_hooks = any(
cb._implements_predict_batch_hooks() for cb in self.callbacks)
# pylint: enable=protected-access
self._disallow_batch_hooks_in_ps_strategy()
# Performance check: Check batch hooks for slowness compared to batch time.
# Only run check for custom callbacks (i.e. not present in this file).
self._check_timing = any(
cbk.__class__.__name__ not in globals() for cbk in self.callbacks)
self._num_batches_for_timing_check = 5
self._hook_times = {}
self._batch_start_time = None
self._batch_times = []
def _add_default_callbacks(self, add_history, add_progbar):
"""Adds `Callback`s that are always present."""
self._progbar = None
self._history = None
for cb in self.callbacks:
if isinstance(cb, ProgbarLogger):
self._progbar = cb
elif isinstance(cb, History):
self._history = cb
if self._progbar is None and add_progbar:
self._progbar = ProgbarLogger(count_mode='steps')
self.callbacks.insert(0, self._progbar)
if self._history is None and add_history:
self._history = History()
self.callbacks.append(self._history)
def _process_logs(self, logs, is_batch_hook=False):
"""Turns tensors into numpy arrays or Python scalars if necessary."""
if logs is None:
return {}
if self._supports_tf_logs:
return logs
if is_batch_hook and self._batch_hooks_support_tf_logs:
return logs
return tf_utils.sync_to_numpy_or_python_type(logs)
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
if self._history:
model.history = self._history
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
"""Helper function for all batch_{begin | end} methods."""
if not self.callbacks:
return
if hook == 'begin':
self._call_batch_begin_hook(mode, batch, logs)
elif hook == 'end':
self._call_batch_end_hook(mode, batch, logs)
else:
raise ValueError('Unrecognized hook: {}'.format(hook))
def _call_batch_begin_hook(self, mode, batch, logs):
"""Helper function for `on_*_batch_begin` methods."""
hook_name = 'on_{mode}_batch_begin'.format(mode=mode)
self._call_batch_hook_helper(hook_name, batch, logs)
if self._check_timing:
self._batch_start_time = time.time()
def _call_batch_end_hook(self, mode, batch, logs):
"""Helper function for `on_*_batch_end` methods."""
hook_name = 'on_{mode}_batch_end'.format(mode=mode)
if self._check_timing and batch >= 1:
batch_time = time.time() - self._batch_start_time
self._batch_times.append(batch_time)
self._call_batch_hook_helper(hook_name, batch, logs)
if len(self._batch_times) >= self._num_batches_for_timing_check:
end_hook_name = hook_name
begin_hook_name = 'on_{mode}_batch_begin'.format(mode=mode)
avg_batch_time = sum(self._batch_times) / len(self._batch_times)
avg_end_hook_time = sum(self._hook_times[end_hook_name]) / len(
self._hook_times[end_hook_name])
avg_begin_hook_time = sum(self._hook_times[begin_hook_name]) / len(
self._hook_times[begin_hook_name])
threshold_time = 1.0 * avg_batch_time
warning_msg = ('Callback method `{hook}` is slow compared to '
'the batch time (batch time: {batch_time:.4f}s vs '
'`{hook}` time: {hook_time:.4f}s). Check your callbacks.')
if avg_begin_hook_time > threshold_time:
logging.warning(warning_msg.format(
hook=begin_hook_name,
batch_time=avg_batch_time,
hook_time=avg_begin_hook_time))
if avg_end_hook_time > threshold_time:
logging.warning(warning_msg.format(
hook=end_hook_name,
batch_time=avg_batch_time,
hook_time=avg_end_hook_time))
self._check_timing = False
self._batch_start_time = None
self._batch_times = []
self._hook_times = {}
def _call_batch_hook_helper(self, hook_name, batch, logs):
"""Helper function for `on_*_batch_*` methods."""
if self._check_timing:
start_time = time.time()
logs = self._process_logs(logs, is_batch_hook=True)
for callback in self.callbacks:
hook = getattr(callback, hook_name)
hook(batch, logs)
if self._check_timing:
if hook_name not in self._hook_times:
self._hook_times[hook_name] = []
self._hook_times[hook_name].append(time.time() - start_time)
def _call_begin_hook(self, mode):
"""Helper function for on_{train|test|predict}_begin methods."""
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
"""Helper function for on_{train|test|predict}_end methods."""
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_batch_end(self, batch, logs=None):
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
"""Calls the `on_epoch_begin` methods of its callbacks.
This function should only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
def on_epoch_end(self, epoch, logs=None):
"""Calls the `on_epoch_end` methods of its callbacks.
This function should only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.test_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self._should_call_test_batch_hooks:
self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_test_batch_hooks:
self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.predict_step`,
it typically returns a dict with a key 'outputs' containing
the model's outputs.
"""
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)
def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_end(logs)
def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_test_end(logs)
def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_predict_end(logs)
def __iter__(self):
return iter(self.callbacks)
def _disallow_batch_hooks_in_ps_strategy(self):
"""Error out if batch-level callbacks are passed with PSStrategy."""
# pylint: disable=protected-access
strategy = distribute_lib.get_strategy()
if strategy._should_use_with_coordinator:
unsupported_callbacks = []
for cb in self.callbacks:
# These Callbacks can accept RemoteValues directly.
if getattr(cb, '_supports_tf_logs', False):
continue
if (cb._implements_train_batch_hooks() or
cb._implements_test_batch_hooks() or
cb._implements_predict_batch_hooks()):
unsupported_callbacks.append(cb)
if unsupported_callbacks:
raise ValueError('Batch-level `Callback`s are not supported with '
'`ParameterServerStrategy`. Found unsupported '
'callbacks: {}'.format(unsupported_callbacks))
# pylint: enable=protected-access
| CallbackList |
python | yaml__pyyaml | lib/yaml/events.py | {
"start": 2223,
"end": 2281
} | class ____(CollectionStartEvent):
pass
| SequenceStartEvent |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 82265,
"end": 83293
} | class ____:
def __init__(self, deterministic, *, warn_only=False, fill_uninitialized_memory=True):
self.deterministic = deterministic
self.warn_only = warn_only
self.fill_uninitialized_memory = fill_uninitialized_memory
@classmethod
def _current_state(cls):
return cls(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled(),
fill_uninitialized_memory=torch.utils.deterministic.fill_uninitialized_memory, # type: ignore[attr-defined]
)
def _update(self):
torch.use_deterministic_algorithms(self.deterministic, warn_only=self.warn_only)
torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory # type: ignore[attr-defined]
def __enter__(self):
self._restore = self._current_state()
self._update()
def __exit__(self, exception_type, exception_value, traceback):
self._restore._update()
| DeterministicGuard |
python | huggingface__transformers | src/transformers/models/exaone4/modeling_exaone4.py | {
"start": 9317,
"end": 13088
} | class ____(nn.Module):
def __init__(self, config: Exaone4Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.num_attention_heads = config.num_attention_heads
self.num_key_value_heads = config.num_key_value_heads
self.hidden_size = config.hidden_size
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.scaling = self.head_dim**-0.5
self.sliding_window = config.sliding_window
self.sliding_window_pattern = config.sliding_window_pattern
layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.is_sliding = layer_type == "sliding_attention"
self.q_proj = nn.Linear(self.hidden_size, self.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_attention_heads * self.head_dim, self.hidden_size, bias=False)
self.q_norm = Exaone4RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = Exaone4RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
# We use QK-norm
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
# We use global NoPE for hybrid attention model
if self.sliding_window is None or self.is_sliding:
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {
"cache_position": cache_position,
}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=self.sliding_window if self.is_sliding else None,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Exaone4Attention |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.