language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/1452. People Whose List of Favorite Companies Is Not a Subset of Another List/1452.py | {
"start": 0,
"end": 443
} | class ____:
def peopleIndexes(self, favoriteCompanies: list[list[str]]) -> list[int]:
ans = []
n = len(favoriteCompanies)
companies = [set(comp) for comp in favoriteCompanies]
for i in range(n):
find = False
for j in range(n):
if i == j:
continue
if companies[i].issubset(companies[j]):
find = True
break
if not find:
ans.append(i)
return ans
| Solution |
python | python-excel__xlrd | xlrd/biffh.py | {
"start": 301,
"end": 417
} | class ____(Exception):
"""
An exception indicating problems reading data from an Excel file.
"""
| XLRDError |
python | lepture__authlib | authlib/integrations/starlette_client/apps.py | {
"start": 2380,
"end": 4140
} | class ____(
StarletteAppMixin, AsyncOAuth2Mixin, AsyncOpenIDMixin, BaseApp
):
client_cls = AsyncOAuth2Client
async def authorize_access_token(self, request, **kwargs):
if request.scope.get("method", "GET") == "GET":
error = request.query_params.get("error")
if error:
description = request.query_params.get("error_description")
raise OAuthError(error=error, description=description)
params = {
"code": request.query_params.get("code"),
"state": request.query_params.get("state"),
}
else:
async with request.form() as form:
params = {
"code": form.get("code"),
"state": form.get("state"),
}
if self.framework.cache:
session = None
else:
session = request.session
state_data = await self.framework.get_state_data(session, params.get("state"))
await self.framework.clear_state_data(session, params.get("state"))
params = self._format_state_params(state_data, params)
claims_options = kwargs.pop("claims_options", None)
claims_cls = kwargs.pop("claims_cls", None)
leeway = kwargs.pop("leeway", 120)
token = await self.fetch_access_token(**params, **kwargs)
if "id_token" in token and "nonce" in state_data:
userinfo = await self.parse_id_token(
token,
nonce=state_data["nonce"],
claims_options=claims_options,
claims_cls=claims_cls,
leeway=leeway,
)
token["userinfo"] = userinfo
return token
| StarletteOAuth2App |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 930105,
"end": 930543
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of RemoveUpvote"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "subject")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
subject = sgqlc.types.Field(Votable, graphql_name="subject")
"""The votable subject."""
| RemoveUpvotePayload |
python | neetcode-gh__leetcode | python/0212-word-search-ii.py | {
"start": 0,
"end": 581
} | class ____:
def __init__(self):
self.children = {}
self.isWord = False
self.refs = 0
def addWord(self, word):
cur = self
cur.refs += 1
for c in word:
if c not in cur.children:
cur.children[c] = TrieNode()
cur = cur.children[c]
cur.refs += 1
cur.isWord = True
def removeWord(self, word):
cur = self
cur.refs -= 1
for c in word:
if c in cur.children:
cur = cur.children[c]
cur.refs -= 1
| TrieNode |
python | pytorch__pytorch | torch/_inductor/runtime/static_cuda_launcher.py | {
"start": 209,
"end": 10928
} | class ____:
"""
Parses the metadata of a CompiledKernel from Triton into a structure that can
launch the cuda kernel directly. Only works for triton kernels compiled to cubin.
Doing this avoids C++ codegen and compilation during compile, since we can use a
statically compiled library to launch the kernel. To avoid mallocing for the arguments,
we have a launcher for different numbers of arguments up to a max. StaticCudaLauncher
only supports # of arguments up until 10 for now.
Workflow:
Compile time:
1. Compile a kernel with triton and get a CompiledKernel
2. Instantiate kernel = StaticallyLaunchedCudaKernel(triton_kernel)
3. Write to a cubin file: kernel.write_cubin_to_file(filepath)
4. Call kernel.load_kernel() (CUDA should be initialized by this point) to load the cubin
Runtime:
5. Call kernel.run(grid, stream, args) to launch the kernel
Note that after step 3, StaticallyLaunchedCudaKernel is fully pickleable/serializable.
This allows it to be cached by FXGraphCache/TritonBundler, as well as sent from the worker
to the parent process in inductor.
There are two main versions of triton that we wish to support: 3.3 and 3.2. Triton makes considerable changes
to how it handles constants in 3.3, so there's some special logic necessary to handle both versions.
"""
def __init__(self, kernel: CompiledKernel) -> None:
# pyrefly: ignore [missing-attribute]
self.name = kernel.src.fn.__name__
# pyrefly: ignore [missing-attribute]
self.cubin_raw = kernel.asm.get("cubin", None)
# pyrefly: ignore [missing-attribute]
self.cubin_path = kernel._cubin_path
# Used by torch.compile to filter constants in older triton versions
# pyrefly: ignore [missing-attribute]
self.arg_names = kernel.src.fn.arg_names
# Const exprs that are declared by the triton kernel directly
# Used to generate the kernel launcher's def args
# pyrefly: ignore [missing-attribute]
self.declared_constexprs = get_constexprs(kernel.src.fn)
# pyrefly: ignore [missing-attribute]
self.hash = kernel.hash
if triton_knobs is None:
# pyrefly: ignore [missing-attribute]
launch_enter = kernel.__class__.launch_enter_hook
# pyrefly: ignore [missing-attribute]
launch_exit = kernel.__class__.launch_exit_hook
else:
launch_enter = triton_knobs.runtime.launch_enter_hook
launch_exit = triton_knobs.runtime.launch_exit_hook
def hook_is_empty(hook: Any) -> bool:
if hook is None:
return True
if (
triton_knobs
and (HookChain := getattr(triton_knobs, "HookChain", None)) is not None
and isinstance(hook, HookChain)
):
# Support hooks after https://github.com/triton-lang/triton/pull/7866
return len(hook.calls) == 0
return False
if not hook_is_empty(launch_enter) or not hook_is_empty(launch_exit):
raise NotImplementedError(
"We don't support launch enter or launch exit hooks"
)
# pyrefly: ignore [missing-attribute]
self.num_warps = kernel.metadata.num_warps
self.shared = (
# pyrefly: ignore [missing-attribute]
kernel.shared if hasattr(kernel, "shared") else kernel.metadata.shared
)
def needs_scratch_arg(scratch_name: str, param_name: str) -> bool:
# pyrefly: ignore [missing-attribute]
if hasattr(kernel.metadata, param_name):
if getattr(kernel.metadata, param_name) > 0:
raise NotImplementedError(
f"{scratch_name} scratch not yet supported"
)
return True
return False
# Newer triton versions pass an extra global scratch parameter to the compiled cuda kernel.
# Inductor never uses this field or enables it, but we still have to pass
# an extra None into the set of params if its enabled
self.has_global_scratch = needs_scratch_arg("Global", "global_scratch_size")
# same situation for profile scratch - triton-lang/triton#7258
self.has_profile_scratch = needs_scratch_arg("Profile", "profile_scratch_size")
# pyrefly: ignore [missing-attribute]
self.arg_tys = self.arg_ty_from_signature(kernel.src)
self.function: int | None = None # Loaded by load_kernel(on the parent process)
num_ctas = 1
if hasattr(kernel, "num_ctas"):
num_ctas = kernel.num_ctas
elif hasattr(kernel, "metadata"):
num_ctas = kernel.metadata.num_ctas
if num_ctas != 1:
raise NotImplementedError(
"Static cuda launcher only supports num_ctas == 1"
)
def reload_cubin_from_raw(self, filepath: str) -> str:
"""
If the cubin file triton generated gets deleted under us, we can
reload it from the raw cubin file.
"""
if self.cubin_path is None:
assert self.cubin_raw is not None
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "wb") as f:
f.write(self.cubin_raw)
self.cubin_path = filepath
return self.cubin_path
def load_kernel(self, device: int) -> None:
from torch._C import _StaticCudaLauncher
if self.function is not None:
return
assert hasattr(self, "cubin_path")
assert self.cubin_path is not None
(self.function, self.n_regs, self.n_spills) = _StaticCudaLauncher._load_kernel(
self.cubin_path, self.name, self.shared, device
)
# Don't need the cubin path anymore now that we've loaded
self.cubin_path = None
self.cubin_raw = None
@staticmethod
@functools.lru_cache
def type_mappings() -> dict[str, str]:
return {
"i1": "i",
"i8": "b",
"i16": "h",
"i32": "i",
"i64": "l",
"u1": "I",
"u8": "B",
"u16": "H",
"u32": "I",
"u64": "K",
"fp16": "f",
"bf16": "f",
"fp32": "f",
"f32": "f",
"fp64": "d",
# TODO handle nvTmaDesc/CUtensormap
}
def extract_type(self, ty: str) -> str:
"""
Takes a triton type from CompiledKernel.signature and
converts it into a single char encoding. _StaticCudaLauncher
will switch on this char to figure out what type the underlying
value should be passed to the triton kernel as.
"""
if ty[0] == "*":
return "O"
elif ty == "nvTmaDesc":
raise NotImplementedError("nvTmaDesc kernels are not yet supported")
return StaticallyLaunchedCudaKernel.type_mappings()[ty]
def arg_ty_from_signature(self, src: ASTSource) -> str:
def index_key(i: Any) -> int:
if isinstance(i, str):
# pyrefly: ignore [missing-attribute]
return src.fn.arg_names.index(i)
elif isinstance(i, tuple):
# In triton 3.3, src.fn.constants has tuples as a key
return i[0]
else:
return i
# pyrefly: ignore [missing-attribute]
signature = {index_key(key): value for key, value in src.signature.items()}
# Triton uses these as the main way to filter out constants passed to their cubin
constants = [index_key(key) for key in getattr(src, "constants", dict())]
# This value is always a superset of kernel.fn.constexprs: kernel.fn.constexprs are
# constants declared by the triton kernel directly, whereas this list can have
# constants that are unused by the triton kernel that triton figured out during
# compilation.
self.full_constexprs = constants
# Despite requiring them to be passed in, the triton CUDA launcher
# completely ignores the constexprs passed into it when generating code.
# So we can ignore them here too
params = []
for i in sorted(signature.keys()):
ty = signature[i]
# In newer triton versions, constants are passed in to signature with type `constexpr`
# In older triton versions, there can be constants in src.constants that are not `constexpr` in signature
# so we check both here
if ty == "constexpr" or i in constants:
pass
else:
# pyrefly: ignore [bad-argument-type]
params.append(self.extract_type(ty))
return "".join(params)
def __getstate__(self) -> dict[str, Any]:
# Remove objects that are no longer valid for pickling
state = self.__dict__.copy()
state["function"] = None
# Cubin paths aren't consistent across processes, so we clear
# and reload them.
state["cubin_path"] = None
return state
def run(
self,
grid_x: int,
grid_y: int,
grid_z: int,
stream: int,
*args: Unpack[tuple[object, ...]],
) -> None:
"""Actually run the kernel at runtime. This function is the hot codepath."""
from torch._C import _StaticCudaLauncher
# Assert load_kernel() has been called and args match
assert self.function is not None
# TODO: actually, if the args *don't* match, we probably should
# throw an exception. But if inductor is the only one calling this
# thing, it should always match.
# Get rid of constants before passing to cubin launcher
# Add a None if triton wants extra parameters for scratch spaces
arg_tys = self.arg_tys
for has_scratch in [self.has_global_scratch, self.has_profile_scratch]:
if has_scratch:
arg_tys = arg_tys + "O"
args = (*args, None)
# pyrefly: ignore [bad-argument-type]
assert len(args) == len(arg_tys)
# TODO: can handle grid functions here or in C++, so
# that we don't need the grid handler above.
_StaticCudaLauncher._launch_kernel(
self.function,
grid_x,
grid_y,
grid_z,
self.num_warps,
self.shared,
arg_tys,
# pyrefly: ignore [bad-argument-type]
args,
stream,
)
| StaticallyLaunchedCudaKernel |
python | Netflix__metaflow | metaflow/_vendor/importlib_metadata/__init__.py | {
"start": 8018,
"end": 9704
} | class ____(DeprecatedList):
"""
An immutable collection of selectable EntryPoint objects.
"""
__slots__ = ()
def __getitem__(self, name): # -> EntryPoint:
"""
Get the EntryPoint in self matching name.
"""
if isinstance(name, int):
warnings.warn(
"Accessing entry points by index is deprecated. "
"Cast to tuple if needed.",
DeprecationWarning,
stacklevel=2,
)
return super().__getitem__(name)
try:
return next(iter(self.select(name=name)))
except StopIteration:
raise KeyError(name)
def select(self, **params):
"""
Select entry points from self that match the
given parameters (typically group and/or name).
"""
return EntryPoints(ep for ep in self if ep.matches(**params))
@property
def names(self):
"""
Return the set of all names of all entry points.
"""
return {ep.name for ep in self}
@property
def groups(self):
"""
Return the set of all groups of all entry points.
For coverage while SelectableGroups is present.
>>> EntryPoints().groups
set()
"""
return {ep.group for ep in self}
@classmethod
def _from_text_for(cls, text, dist):
return cls(ep._for(dist) for ep in cls._from_text(text))
@staticmethod
def _from_text(text):
return (
EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
for item in Sectioned.section_pairs(text or '')
)
| EntryPoints |
python | django__django | tests/template_tests/test_library.py | {
"start": 133,
"end": 1311
} | class ____(SimpleTestCase):
def setUp(self):
self.library = Library()
def test_filter(self):
@self.library.filter
def func():
return ""
self.assertEqual(self.library.filters["func"], func)
def test_filter_parens(self):
@self.library.filter()
def func():
return ""
self.assertEqual(self.library.filters["func"], func)
def test_filter_name_arg(self):
@self.library.filter("name")
def func():
return ""
self.assertEqual(self.library.filters["name"], func)
def test_filter_name_kwarg(self):
@self.library.filter(name="name")
def func():
return ""
self.assertEqual(self.library.filters["name"], func)
def test_filter_call(self):
def func():
return ""
self.library.filter("name", func)
self.assertEqual(self.library.filters["name"], func)
def test_filter_invalid(self):
msg = "Unsupported arguments to Library.filter: (None, '')"
with self.assertRaisesMessage(ValueError, msg):
self.library.filter(None, "")
| FilterRegistrationTests |
python | getsentry__sentry | src/sentry/release_health/base.py | {
"start": 3089,
"end": 3309
} | class ____(TypedDict):
currentCrashFreeRate: float | None
previousCrashFreeRate: float | None
CurrentAndPreviousCrashFreeRates = Mapping[ProjectId, CurrentAndPreviousCrashFreeRate]
| CurrentAndPreviousCrashFreeRate |
python | pytorch__pytorch | benchmarks/instruction_counts/execution/work.py | {
"start": 1416,
"end": 5088
} | class ____:
"""Wraps subprocess.Popen for a given WorkOrder."""
_work_order: WorkOrder
_cpu_list: Optional[str]
_proc: PopenType
# Internal bookkeeping
_communication_file: str
_start_time: float
_end_time: Optional[float] = None
_retcode: Optional[int]
_result: Optional[Union[WorkerOutput, WorkerFailure]] = None
def __init__(self, work_order: WorkOrder, cpu_list: Optional[str]) -> None:
self._work_order = work_order
self._cpu_list = cpu_list
self._start_time = time.time()
self._communication_file = os.path.join(get_temp_dir(), f"{uuid.uuid4()}.pkl")
with open(self._communication_file, "wb") as f:
pickle.dump(self._work_order.timer_args, f)
self._proc = subprocess.Popen(
self.cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable=SHELL,
)
def clone(self) -> "_BenchmarkProcess":
return _BenchmarkProcess(self._work_order, self._cpu_list)
@property
def cmd(self) -> str:
cmd: list[str] = []
if self._work_order.source_cmd is not None:
cmd.extend([self._work_order.source_cmd, "&&"])
cmd.append(_ENV)
if self._cpu_list is not None:
cmd.extend(
[
f"GOMP_CPU_AFFINITY={self._cpu_list}",
"taskset",
"--cpu-list",
self._cpu_list,
]
)
cmd.extend(
[
_PYTHON,
WORKER_PATH,
"--communication-file",
self._communication_file,
]
)
return " ".join(cmd)
@property
def duration(self) -> float:
return (self._end_time or time.time()) - self._start_time
@property
def result(self) -> Union[WorkerOutput, WorkerFailure]:
self._maybe_collect()
assert self._result is not None
return self._result
def poll(self) -> Optional[int]:
self._maybe_collect()
return self._retcode
def interrupt(self) -> None:
"""Soft interrupt. Allows subprocess to cleanup."""
self._proc.send_signal(signal.SIGINT)
def terminate(self) -> None:
"""Hard interrupt. Immediately SIGTERM subprocess."""
self._proc.terminate()
def _maybe_collect(self) -> None:
if self._result is not None:
# We've already collected the results.
return
self._retcode = self._proc.poll()
if self._retcode is None:
# `_proc` is still running
return
with open(self._communication_file, "rb") as f:
result = WorkerUnpickler(f).load_output()
if isinstance(result, WorkerOutput) and self._retcode:
# Worker managed to complete the designated task, but worker
# process did not finish cleanly.
result = WorkerFailure("Worker failed silently.")
if isinstance(result, WorkerTimerArgs):
# Worker failed, but did not write a result so we're left with the
# original TimerArgs. Grabbing all of stdout and stderr isn't
# ideal, but we don't have a better way to determine what to keep.
proc_stdout = self._proc.stdout
assert proc_stdout is not None
result = WorkerFailure(failure_trace=proc_stdout.read().decode("utf-8"))
self._result = result
self._end_time = time.time()
# Release communication file.
os.remove(self._communication_file)
| _BenchmarkProcess |
python | coleifer__peewee | tests/models.py | {
"start": 128059,
"end": 128585
} | class ____(BaseTestCase):
def test_set_database(self):
class Register(Model):
value = IntegerField()
db_a = get_in_memory_db()
db_b = get_in_memory_db()
Register._meta.set_database(db_a)
Register.create_table()
Register._meta.set_database(db_b)
self.assertFalse(Register.table_exists())
self.assertEqual(db_a.get_tables(), ['register'])
self.assertEqual(db_b.get_tables(), [])
db_a.close()
db_b.close()
| TestModelSetDatabase |
python | astropy__astropy | astropy/units/tests/test_quantity_info.py | {
"start": 3768,
"end": 5092
} | class ____:
"""Regression test for gh-14514: _new_view should __array_finalize__.
But info should be propagated only for slicing, etc.
"""
@classmethod
def setup_class(cls):
class MyQuantity(u.Quantity):
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "swallow"):
self.swallow = obj.swallow
cls.my_q = MyQuantity([10.0, 20.0], u.m / u.s)
cls.my_q.swallow = "African"
cls.my_q_w_info = cls.my_q.copy()
cls.my_q_w_info.info.name = "swallow"
def test_setup(self):
assert_no_info(self.my_q)
assert self.my_q_w_info.swallow == self.my_q.swallow
assert self.my_q_w_info.info.name == "swallow"
def test_slice(self):
slc1 = self.my_q[:1]
assert slc1.swallow == self.my_q.swallow
assert_no_info(slc1)
slc2 = self.my_q_w_info[1:]
assert slc2.swallow == self.my_q.swallow
assert_info_equal(slc2, self.my_q_w_info)
def test_op(self):
square1 = self.my_q**2
assert square1.swallow == self.my_q.swallow
assert_no_info(square1)
square2 = self.my_q_w_info**2
assert square2.swallow == self.my_q.swallow
assert_no_info(square2)
| TestQuantitySubclass |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 30730,
"end": 32944
} | class ____(_TestParametrizer):
"""
Decorator for adjusting the way an existing parametrizer operates. This class runs
the given adapter_fn on each parametrization produced by the given parametrizer,
allowing for on-the-fly parametrization more flexible than the default,
product-based composition that occurs when stacking parametrization decorators.
If the adapter_fn returns None for a given test parametrization, that parametrization
will be excluded. Otherwise, it's expected that the adapter_fn returns an iterable of
modified parametrizations, with tweaked test names and parameter kwargs.
Examples::
def include_is_even_arg(test_name, param_kwargs):
x = param_kwargs["x"]
is_even = x % 2 == 0
new_param_kwargs = dict(param_kwargs)
new_param_kwargs["is_even"] = is_even
is_even_suffix = "_even" if is_even else "_odd"
new_test_name = f"{test_name}{is_even_suffix}"
yield (new_test_name, new_param_kwargs)
...
@reparametrize(parametrize("x", range(5)), include_is_even_arg)
def test_foo(self, x, is_even):
...
def exclude_odds(test_name, param_kwargs):
x = param_kwargs["x"]
is_even = x % 2 == 0
yield None if not is_even else (test_name, param_kwargs)
...
@reparametrize(parametrize("x", range(5)), exclude_odds)
def test_bar(self, x):
...
"""
def __init__(self, parametrizer, adapter_fn):
self.parametrizer = parametrizer
self.adapter_fn = adapter_fn
def _parametrize_test(self, test, generic_cls, device_cls):
for (gen_test, test_name, param_kwargs, decorator_fn) in \
self.parametrizer._parametrize_test(test, generic_cls, device_cls):
adapted = self.adapter_fn(test_name, param_kwargs)
if adapted is not None:
for adapted_item in adapted:
if adapted_item is not None:
new_test_name, new_param_kwargs = adapted_item
yield (gen_test, new_test_name, new_param_kwargs, decorator_fn)
| reparametrize |
python | django__django | tests/backends/test_utils.py | {
"start": 3717,
"end": 5559
} | class ____(TransactionTestCase):
available_apps = []
def _test_procedure(self, procedure_sql, params, param_types, kparams=None):
with connection.cursor() as cursor:
cursor.execute(procedure_sql)
# Use a new cursor because in MySQL a procedure can't be used in the
# same cursor in which it was created.
with connection.cursor() as cursor:
cursor.callproc("test_procedure", params, kparams)
with connection.schema_editor() as editor:
editor.remove_procedure("test_procedure", param_types)
@skipUnlessDBFeature("create_test_procedure_without_params_sql")
def test_callproc_without_params(self):
self._test_procedure(
connection.features.create_test_procedure_without_params_sql, [], []
)
@skipUnlessDBFeature("create_test_procedure_with_int_param_sql")
def test_callproc_with_int_params(self):
self._test_procedure(
connection.features.create_test_procedure_with_int_param_sql,
[1],
["INTEGER"],
)
@skipUnlessDBFeature(
"create_test_procedure_with_int_param_sql", "supports_callproc_kwargs"
)
def test_callproc_kparams(self):
self._test_procedure(
connection.features.create_test_procedure_with_int_param_sql,
[],
["INTEGER"],
{"P_I": 1},
)
@skipIfDBFeature("supports_callproc_kwargs")
def test_unsupported_callproc_kparams_raises_error(self):
msg = (
"Keyword parameters for callproc are not supported on this database "
"backend."
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.cursor() as cursor:
cursor.callproc("test_procedure", [], {"P_I": 1})
| CursorWrapperTests |
python | pytorch__pytorch | torch/ao/pruning/_experimental/pruner/saliency_pruner.py | {
"start": 94,
"end": 1536
} | class ____(BaseStructuredSparsifier):
"""
Prune rows based on the saliency (L1 norm) of each row.
This pruner works on N-Dimensional weight tensors.
For each row, we will calculate the saliency, which is the sum the L1 norm of all weights in that row.
We expect that the resulting saliency vector has the same shape as our mask.
We then pick elements to remove until we reach the target sparsity_level.
"""
def update_mask(self, module, tensor_name, **kwargs):
# tensor_name will give you the FQN, all other entries in sparse config is present in kwargs
weights = getattr(module, tensor_name)
mask = getattr(module.parametrizations, tensor_name)[0].mask
# use negative weights so we can use topk (we prune out the smallest)
if weights.dim() <= 1:
raise Exception( # noqa: TRY002
"Structured pruning can only be applied to a 2+dim weight tensor!"
)
saliency = -weights.norm(dim=tuple(range(1, weights.dim())), p=1)
if saliency.shape != mask.shape:
raise AssertionError(
f"saliency shape ({saliency.shape}) must match mask shape ({mask.shape})"
)
num_to_pick = int(len(mask) * kwargs["sparsity_level"])
prune = saliency.topk(num_to_pick).indices
# Set the mask to be false for the rows we want to prune
mask.data[prune] = False
| SaliencyPruner |
python | jina-ai__jina | tests/unit/serve/runtimes/gateway/http/test_app.py | {
"start": 482,
"end": 12374
} | class ____(Executor):
@requests
def empty(self, docs: DocumentArray, **kwargs):
print(f"# docs {docs}")
@pytest.fixture
def error_log_level():
old_env = os.environ.get('JINA_LOG_LEVEL')
os.environ['JINA_LOG_LEVEL'] = 'ERROR'
yield
os.environ['JINA_LOG_LEVEL'] = old_env
def test_tag_update():
port = random_port()
f = Flow(port=port, protocol='http').add(uses=ExecutorTest)
d1 = Document(id='1', prop1='val')
d2 = Document(id='2', prop2='val')
with f:
d1 = {'data': [d1.to_dict()]}
d2 = {'data': [d2.to_dict()]}
r1 = req.post(f'http://localhost:{port}/index', json=d1)
r2 = req.post(f'http://localhost:{port}/index', json=d2)
assert r1.json()['data'][0]['tags'] == {'prop1': 'val'}
assert r2.json()['data'][0]['tags'] == {'prop2': 'val'}
@pytest.fixture
def cert_pem():
"""This is the cert entry of a self-signed local cert"""
# avoid PermissionError on Windows by deleting later
tmp = NamedTemporaryFile('w', delete=False)
tmp.write(
"""-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgIUE663J9NKJE5sTDXei0ScmKE1TskwDQYJKoZIhvcNAQEL
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMTA5MjAwOTQ4NThaFw0yMjA5
MjAwOTQ4NThaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggIiMA0GCSqGSIb3DQEB
AQUAA4ICDwAwggIKAoICAQC243Axri0Aafq5VsS5+w1QgSIYjhWWCi0frm/w95+O
SleiyQ2nR6Cas2YHViLPo4casch+M5d7fzxzSyezKLoM9FJ7p9rHAc08sjuIkqMt
kDApgfl4Rtco/KqgEr0HELpo6rWG8tby0Wbl82eSm93GUAyZwyuZMdr3Ag6v8ppn
JaUit1oWWs8XZdvEIoRxXQu+APNiKaWWrFjbSXay/ZxbsDrdk7Q+bHLiwYxhx3Bj
SZX9xWPjchFv+fD1pBOyq/P76VGr6B938vEj+EorqUwdiIeW3vgw2FODLg5bXMSo
YR6uZ1V2W8xGwWHpj0s1UChbaOY9thRxvtOrKeW9F4xoFoBrr6ZjkcqD/5mARJz+
Uwee/XhLE7Z5L+eyzLXcXLR2lOs8AXgCmUgAgk0NJi8IPQGZFEBuWVJ7DBO87G7p
DbKMkQ4QGB4dj7lJdHUr6v07Z+Etus7Z+cwjQWe2wdQgDV05E/zCSwWIv4AYbGXs
s1P4XXMeYxxK/74vh7k15TmIiq77A96FaxStK2PZXJjI1dB5DhoC93qCZogq4vup
r6Yk6B29whOlHsBWVL4nW6SYxEDNKyWYRRekiJlcxlw+NpZxBUdC5PwOkh4AZmnW
PpBZv/rCXC7Ow0DS9F9CbfzVynihUHLlZk2SvH8Dc4htum+guiwBMyRtNaSdD8l2
OwIDAQABo1MwUTAdBgNVHQ4EFgQUvTljFuE/DJlq0s8U3wdteIHmQbwwHwYDVR0j
BBgwFoAUvTljFuE/DJlq0s8U3wdteIHmQbwwDwYDVR0TAQH/BAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAgEAh7yvPSX3qzWtczJNO4HGzo0xJC9vnEy01xKLOPZiGSFE
CFA15mtCnX1hYglkK8Bp7UnKMo9MwQFC4WLjBj7Ts6NamR0KpMf5M75WrCCA55UT
aWbwqFmHH47j7DYl/j1WK/nLocAzW3zyjDQaBArAls9Iw5nkeonyl+yaXh/7J45G
tNRrMyyxk1hl2C4NA3grQHycZiviNr6OQmgZ3sPexaPfAva3Zuwwv24UeliB0Lpb
opxcZJ9ojqSAok/eJCKSpywuVkxy61Iz2MKIpLA+WoRFjVGuvM5rZPSEQweWlnJT
f4GVKdfGQW9bzM27fMse/sg03z6odTn0rkxUM8TWsZR3Jg9YKbP2zgo4snU9FUMZ
RQA1A83U1T10yaeaCLBjN2psATQr7blYZhNUwYVr41C4K9+g3ghK3zhrKeaokBKQ
xo1aQZQNMyxrpe6NU+Iu9Esz4LRKaf8B4Q5vXJhf2YPqaz3FSHOFHNTiILvIEnuD
DFRwYLPkWlFLr5MYyjo8IlL/lcAjv3F3+Nx7qfvtIoLLxVON4hacYpG2uyyDGqg0
TiIvOLZ67W63nUk6h7+Pwm/8EhxTxFjguSOh0fu7GXtF75kDueBLoERr6DgcBTTg
adVnffnjz+hTFEjwXL48iGRPM142AGNOfXNp8tvPZOYjkc2prtIhGlvOu+De8tg=
-----END CERTIFICATE-----"""
)
tmp.flush()
tmp.close()
yield tmp.name
os.unlink(tmp.name)
@pytest.fixture
def key_pem():
"""This is the key entry of a self-signed local cert"""
# avoid PermissionError on Windows by deleting later
tmp = NamedTemporaryFile('w', delete=False)
tmp.write(
"""-----BEGIN ENCRYPTED PRIVATE KEY-----
MIIJnDBOBgkqhkiG9w0BBQ0wQTApBgkqhkiG9w0BBQwwHAQIQZi3yv841tUCAggA
MAwGCCqGSIb3DQIJBQAwFAYIKoZIhvcNAwcECJlWkgQTVxuQBIIJSMEqOJZ6gnFe
NPER3S6xBLwRToNvspoj9gl8befzOEgLEHv5TFQ2WCznQ5+HkrufnSVwu/R8XR68
CvrIhjYqmO3TAI0t8PWeKOA2Zw3AI5Qlry3L3HouPXoj7OMBs3A99bjgdjIV7C1b
EaiMA9RsBe3jZfaqHnBX19n6pymej9JYJKmqaY2b5+jRh7bu36+0J4/TbLYH3AKX
U7nKG+cKmbYdrAx3ftHuSwTO8KgXy2fDxjqicGJ1D80RT0Pk+jUAYdZg6OWwGwiP
6qeObJikseR/FuSqYhuH5vf9ialLBVR2jmWZw7xJh+Kv4T7GZs0V/hru31cYQU+b
2IcpfBJsR9mKpilD/A62obCk0TSJDY8gehmtvhyZpGnFyh2eOESMuIPpvvsR2kdS
9j7E2luksuMATcdJMElFgM2xS0il0H1amsCtQWgK9BFDlzmOOxr6K6Hm6MS+dq20
1nvznQFt1Qd5e9hyuPV8qd/uuqA+BlnwAJds++fR8jB55ZNOfWzD5sQky+wGkJ80
CwzOQLgqKQqUyYR/+SD8dfTyOPNyBu5f1RBkb2gTOTRQgwDQUOOdwfNmb9brHj0Y
/c6zQ6UCkXYKXxjBr/O7S2yTwsC/gnN3PUEQBWCbYlIe/A0EorouOKMFwCLj1r/U
fn3H0XvuY0ahqNZBxVnuWcUsSGzMiaePIsJsWXmz0A4ufS+CqSIwGx28A/cYhC/2
yZzss3yACFCHIJeoPpOsKPcyhw8K48YyofW5L8vKI5eadhbjDyRH38Z2zSYGLw/1
YRwxsAVRimnLUiz2GND2XXBLEJFLthd8BPnQM1+CEun54UeQaVRJ8p1PyPGIWvWS
Nb4o8jBIjMavwjxjCF7WCdO7V0iThcPwCLme9AN2+MaqWC1HBVZ1QRAVAoVsEFXz
4TPjJljn3RYf8anGfRxsNX74QqatCL2+Oy1M48vqAUICaTPh6z88hpBCYAyvRiXz
S9CEs9o/TcBtcemF1AYB7tsmkYkaJJaOd3M+t/0VFuXYv4OdqiY5PtsdSFh+j1KJ
u1+Pav+jqGrJ/zJuLOC6dx2yaoT1TWRcMcUT3r7j7+yAL97D8azo7PW2F2a9zHl5
2HgS4byzHSRPsGSyKOtGQT2hz12TzgsJ0YPvUz9z4Hi62PgPfNWyukZovWEhxu0X
F1ll2xd/g4QYCqs1dsCU8IQB6xBUbLJ5noQNGN1JvAqKTDBENr28cD2r+QcZPuE3
84LbQJLWCJfwYHJ5GyWFNWyb4DjfZ3HdVHSvRVdsjYLhJDEWKNyW9EJi9hW3PujI
CEZgW3JfWkVmUj64DbmkervtpUM//J/KOdCHIaxKcjSbJziiQfI0q+hR5VS9ceaE
9AYcviAQ3few5MNt869HeHfxGfuG1FDKBSmtf5bLbtlx+RIq79bkFl/A3esD0q0t
2UccHTorNBgVDKkBLETuyCyugiOe6XEpVD+gooW39C+fWe7dxeN7uWYB+IVTfaDB
qKMrgiuUkZZmO2B0YLoDCsvnVlOlH2tvqm6DSAn8BsKU5LzIGDv4g04CCMDkHt0I
8DoUFFhjPHOwGK40gtsekFLz3DlU5c43AVcW9V8pV8A4m5+ZXWI0re6M38QzEaIM
Mtso1Mq/y8gyE+iB0Y1Tx3OY3l0FDmyAwAzCMbkhcI1OcUW37/43wi4MGk9NPaBH
t7XaiLo78jpH9Y1jC1zhgIrcllNWBzlm1Nh94ZrcDk2YZt4c49Lg0+ghO1CW1IH5
ulGjRn3z/sGrjHfGF4GNICbxODrWidXC58/dRh515BK848sFnQCCTVYR6dARhTQQ
13zEvzXX2UJHDpbE0ut1Z0A4IVfvG0ZUoZGGTx+TZFKalKyFJh7/be19gg7K+1z8
BswuwkIvRbsQaxq9BlzS11clOLPr5gu9DOAICJb8tscPa+B0PC7TgZ5JpB3Gv/GS
zdslokIN+gEGUINZFVTLOJVvactkFNO/bCM/TSdn/5LmSJ9MbkYYhpIgPs3Oz1ia
E6Xq9tacvyeOWp9rbz2LG03iMQd45slsPoGyQPOsvmZ48SipuefkWmMLA3LuB71/
IOeO/MIQ29qunr8edkEm2uV9GS+09JUVOr4N/Ir9OGmr1UPkFenEnbtSiYzSQDov
FLIMj4p1KoPcQDwHPsqj9hF47rgArJ6RWZlMo2vDA4bcTTxKugHPaitedJ2d+WJj
fs+5C6D8E8lXpb3oh3ncsFQt7LGJWBOQYaxhPzfAdX5/s9CIqHyIStEY38/Izs+F
sgC6YUOk+5j5IIik6C65YG9mcQwHvCYWynch4PSpa87qjDkP/3BQWNb5OCcwsZ24
lap/PkIXxMKHsoh3i4moQDcaKUEPF4cgzOj/+IipMu/MCizNAm8bhaS2JRKOXGIN
eU9bsw+ADHMrtiLHiEH98ifabCGadvp8B8ZkpYpcT/LtwkHjJ4x7AMFEKK1Cj92r
eaiYszKVYwuTZObGNkWta6AiIsoqU84/NFUpaGn2Qdr4FK6+YBddhlUPs+amrOZF
hy8I5qP6WqNtKmVyPHWY96OhR9JmYxlpVWYr5UzhJ+JClTnVqy++K+j91JahyCBa
1d8ey15Ibhlu3nQ5stxpnmzA/NpBCLhTFUuri/T3C91hHKJicjeZFYpumjHOPZXA
+JwUGwsIkO7n9KiA6F7IOJgJIMHE3VeO6QLdiA3QJbj47o7vwQLnMwOByKrEGIQP
yKERA6oZft29EqqNBAxgp3hDXQI7SIjHVtq1kuTmwu8o7Y5vFxG/r1D+W2V3ZAsr
atXA7La/FbQwfFvCaWPtCy+vehiKjdr2Z+4/BrkTPtkaMe+1qMc21K2rYZaIw+mh
p++zQ0j+16Y5putGcPPf8i1vQ0eMd2OljXo0bqLn5n3b0Q3arRnPpXumgXZES90I
wJCkQIiAy+AYoLROVVrefmQ4/XlWA5iizqkTDU1NThxSQjVan14O356G0HmxNsi9
RB2a0AmwuGhuYPYjUI8iKU12RMp4/rRb28xbAwSh24qQeY2a/IY4u6bGpOWdTudg
Xb3L8FmNUZVtO0QvLKa6YHUW0BTgUy4EzA9nDKDRMYIrRh3BMTr2YZ4rA5ReY1+T
lFkijOU5iJjWLTYGcCyBHQup/VrqmgxchRbbKFO5+qpDHE0e3oLbPLQ0Rw425SvN
xZ36Vrgc4hfaUiifsIiDwA==
-----END ENCRYPTED PRIVATE KEY-----"""
)
tmp.flush()
tmp.close()
yield tmp.name
os.unlink(tmp.name)
@pytest.mark.parametrize('uses', ['HTTPGateway', 'WebSocketGateway'])
def test_uvicorn_ssl_deprecated(cert_pem, key_pem, uses):
args = set_gateway_parser().parse_args(
[
'--uses',
uses,
'--uvicorn-kwargs',
f'ssl_certfile: {cert_pem}', # deprecated
f'ssl_keyfile: {key_pem}', # deprecated
'ssl_keyfile_password: abcd',
]
)
with AsyncNewLoopRuntime(args, req_handler_cls=GatewayRequestHandler):
pass
@pytest.mark.parametrize('uses', ['HTTPGateway', 'WebSocketGateway'])
def test_uvicorn_ssl(cert_pem, key_pem, uses):
args = set_gateway_parser().parse_args(
[
'--uses',
uses,
'--uvicorn-kwargs',
'ssl_keyfile_password: abcd',
'--ssl-certfile',
f'{cert_pem}',
'--ssl-keyfile',
f'{key_pem}',
]
)
with AsyncNewLoopRuntime(args, req_handler_cls=GatewayRequestHandler):
pass
@pytest.mark.parametrize('uses', ['HTTPGateway', 'WebSocketGateway'])
def test_uvicorn_ssl_wrong_password(cert_pem, key_pem, uses):
args = set_gateway_parser().parse_args(
[
'--uses',
uses,
'--uvicorn-kwargs',
'ssl_keyfile_password: abcde',
'--ssl-certfile ',
f'{cert_pem}',
'--ssl-keyfile ',
f'{key_pem}',
]
)
with pytest.raises(ssl.SSLError):
with AsyncNewLoopRuntime(args, req_handler_cls=GatewayRequestHandler):
pass
@pytest.mark.parametrize('uses', ['HTTPGateway', 'WebSocketGateway'])
def test_uvicorn_ssl_wrong_password(cert_pem, key_pem, uses):
args = set_gateway_parser().parse_args(
[
'--uses',
uses,
'--uvicorn-kwargs',
'ssl_keyfile_password: abcde',
'--ssl-certfile',
f'{cert_pem}',
'--ssl-keyfile',
f'{key_pem}',
]
)
with pytest.raises(ssl.SSLError):
with AsyncNewLoopRuntime(args, req_handler_cls=GatewayRequestHandler):
pass
@pytest.mark.parametrize('protocol', ['http', 'websocket'])
def test_uvicorn_ssl_with_flow(cert_pem, key_pem, protocol, capsys, error_log_level):
with Flow(
protocol=protocol,
uvicorn_kwargs=[
'ssl_keyfile_password: abcd',
],
ssl_certfile=cert_pem,
ssl_keyfile=key_pem,
) as f:
with pytest.raises(aiohttp.ClientConnectorCertificateError):
Client(protocol=protocol, port=f.port, tls=True).index([Document()])
da = DocumentArray([Document(text='text_input')])
@pytest.mark.parametrize(
'docs_input',
[
{'data': [{'text': 'text_input'}]},
{'data': {'docs': [{'text': 'text_input'}]}},
{'data': da.to_dict()},
{'data': {'docs': da.to_dict()}},
{'data': [da[0].to_dict()]},
{'data': {'docs': [da[0].to_dict()]}},
],
)
def test_app_models_acceptance(docs_input):
f = Flow(protocol='http').add()
with f:
r = req.post(f'http://localhost:{f.port}/index', json=docs_input)
assert DocumentArray.from_dict(r.json()['data'])[0].text == 'text_input'
@pytest.fixture
def health_check_env():
_prev_loglevel = os.environ.get('JINA_LOG_LEVEL', None)
os.environ['JINA_LOG_LEVEL'] = 'INFO'
os.environ['CICD_JINA_DISABLE_HEALTHCHECK_LOGS'] = '1'
yield
os.environ['JINA_LOG_LEVEL'] = _prev_loglevel
os.environ.pop('CICD_JINA_DISABLE_HEALTHCHECK_LOGS')
@pytest.fixture
def no_health_check_env():
_prev_loglevel = os.environ.get('JINA_LOG_LEVEL', None)
os.environ['JINA_LOG_LEVEL'] = 'INFO'
yield
os.environ['JINA_LOG_LEVEL'] = _prev_loglevel
def test_healthcheck_logs_http(capfd, no_health_check_env):
f = Flow(protocol='http', port=12345).add()
with f:
req.get('http://localhost:12345/')
req.get('http://localhost:12345/docs')
out, _ = capfd.readouterr()
assert '"GET / HTTP/1.1" 200 OK' in out
assert '"GET /docs HTTP/1.1" 200 OK' in out
def test_no_healthcheck_logs_http_with_env(capfd, health_check_env):
f = Flow(protocol='http', port=12345).add()
with f:
req.get('http://localhost:12345/')
req.get('http://localhost:12345/docs')
out, _ = capfd.readouterr()
assert '"GET / HTTP/1.1" 200 OK' not in out
assert '"GET /docs HTTP/1.1" 200 OK' in out
def test_healthcheck_logs_websocket(capfd, no_health_check_env):
f = Flow(protocol='websocket', port=12345).add()
with f:
req.get('http://localhost:12345/')
f.post('/', inputs=DocumentArray.empty())
out, _ = capfd.readouterr()
assert '"GET / HTTP/1.1" 200 OK' in out
def test_healthcheck_logs_websocket_with_env(capfd, health_check_env):
f = Flow(protocol='websocket', port=12345).add()
with f:
f.post('/', inputs=DocumentArray.empty())
req.get('http://localhost:12345/')
out, _ = capfd.readouterr()
assert '"GET / HTTP/1.1" 200 OK' not in out
| ExecutorTest |
python | weaviate__weaviate-python-client | profiling/test_refs.py | {
"start": 1771,
"end": 2205
} | class ____:
contents: str
author: Reference
hasParagraphs: Optional[Reference]
uuid: uuid_lib.UUID = field(init=False)
class_name: str = field(init=False)
def to_data_object(self) -> DataObject:
return DataObject({"contents": self.contents}, self.class_name, self.uuid)
def __post_init__(self) -> None:
self.uuid = uuid_lib.uuid4()
self.class_name = "Paragraph"
@dataclass
| Paragraph |
python | getsentry__sentry | src/sentry/integrations/middleware/hybrid_cloud/parser.py | {
"start": 2052,
"end": 2263
} | class ____:
def __init__(
self,
response: HttpResponseBase | None = None,
error: Exception | None = None,
):
self.response = response
self.error = error
| RegionResult |
python | kubernetes-client__python | kubernetes/base/dynamic/exceptions.py | {
"start": 3507,
"end": 3591
} | class ____(DynamicApiError):
""" 429: StatusTooManyRequests """
| TooManyRequestsError |
python | weaviate__weaviate-python-client | weaviate/collections/batch/collection.py | {
"start": 2418,
"end": 6018
} | class ____(Generic[Properties], _BatchBaseNew):
def __init__(
self,
executor: ThreadPoolExecutor,
connection: ConnectionSync,
consistency_level: Optional[ConsistencyLevel],
results: _BatchDataWrapper,
batch_mode: _BatchMode,
name: str,
tenant: Optional[str],
vectorizer_batching: bool,
) -> None:
super().__init__(
connection=connection,
consistency_level=consistency_level,
results=results,
batch_mode=batch_mode,
executor=executor,
vectorizer_batching=vectorizer_batching,
)
self.__name = name
self.__tenant = tenant
def add_object(
self,
properties: Optional[Properties] = None,
references: Optional[ReferenceInputs] = None,
uuid: Optional[UUID] = None,
vector: Optional[VECTORS] = None,
) -> UUID:
"""Add one object to this batch.
NOTE: If the UUID of one of the objects already exists then the existing object will be replaced by the new object.
Args:
properties: The data properties of the object to be added as a dictionary.
references: The references of the object to be added as a dictionary.
uuid: The UUID of the object as an uuid.UUID object or str. If it is None an UUIDv4 will generated, by default None
vector: The embedding of the object. Can be used when a collection does not have a vectorization module or the given
vector was generated using the _identical_ vectorization module that is configured for the class. In this
case this vector takes precedence. Supported types are:
- for single vectors: `list`, 'numpy.ndarray`, `torch.Tensor` and `tf.Tensor`, by default None.
- for named vectors: Dict[str, *list above*], where the string is the name of the vector.
Returns:
The UUID of the added object. If one was not provided a UUIDv4 will be auto-generated for you and returned here.
Raises:
WeaviateBatchValidationError: If the provided options are in the format required by Weaviate.
"""
return self._add_object(
collection=self.__name,
properties=properties,
references=references,
uuid=uuid,
vector=vector,
tenant=self.__tenant,
)
def add_reference(
self, from_uuid: UUID, from_property: str, to: Union[ReferenceInput, List[UUID]]
) -> None:
"""Add a reference to this batch.
Args:
from_uuid: The UUID of the object, as an uuid.UUID object or str, that should reference another object.
from_property: The name of the property that contains the reference.
to: The UUID of the referenced object, as an uuid.UUID object or str, that is actually referenced.
For multi-target references use wvc.Reference.to_multi_target().
Raises:
WeaviateBatchValidationError: If the provided options are in the format required by Weaviate.
"""
self._add_reference(
from_uuid,
self.__name,
from_property,
to,
self.__tenant,
)
BatchCollection = _BatchCollection
BatchCollectionNew = _BatchCollectionNew
CollectionBatchingContextManager = _ContextManagerWrapper[
Union[BatchCollection[Properties], BatchCollectionNew[Properties]],
BatchCollectionProtocol[Properties],
]
| _BatchCollectionNew |
python | kubernetes-client__python | kubernetes/client/models/v1_endpoints_list.py | {
"start": 383,
"end": 6848
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1Endpoints]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointsList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1EndpointsList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1EndpointsList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1EndpointsList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1EndpointsList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1EndpointsList. # noqa: E501
List of endpoints. # noqa: E501
:return: The items of this V1EndpointsList. # noqa: E501
:rtype: list[V1Endpoints]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1EndpointsList.
List of endpoints. # noqa: E501
:param items: The items of this V1EndpointsList. # noqa: E501
:type: list[V1Endpoints]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1EndpointsList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1EndpointsList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1EndpointsList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1EndpointsList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1EndpointsList. # noqa: E501
:return: The metadata of this V1EndpointsList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1EndpointsList.
:param metadata: The metadata of this V1EndpointsList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointsList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointsList):
return True
return self.to_dict() != other.to_dict()
| V1EndpointsList |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 242382,
"end": 243616
} | class ____(Response):
"""
Response of tasks.get_configuration_names endpoint.
:param configurations: Names of task configuration items (keyed by task ID)
:type configurations: dict
"""
_service = "tasks"
_action = "get_configuration_names"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"configurations": {
"description": "Names of task configuration items (keyed by task ID)",
"type": ["object", "null"],
}
},
"type": "object",
}
def __init__(self, configurations: Optional[dict] = None, **kwargs: Any) -> None:
super(GetConfigurationNamesResponse, self).__init__(**kwargs)
self.configurations = configurations
@schema_property("configurations")
def configurations(self) -> Optional[dict]:
return self._property_configurations
@configurations.setter
def configurations(self, value: Optional[dict]) -> None:
if value is None:
self._property_configurations = None
return
self.assert_isinstance(value, "configurations", (dict,))
self._property_configurations = value
| GetConfigurationNamesResponse |
python | jina-ai__jina | jina/proto/serializer.py | {
"start": 4210,
"end": 4735
} | class ____:
"""Placeholder that delegates the serialization and deserialization to the internal protobuf"""
@staticmethod
def SerializeToString(x):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
return x.SerializeToString()
@staticmethod
def FromString(x: bytes):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
si = jina_pb2.SnapshotId()
si.ParseFromString(x)
return si
| SnapshotId |
python | huggingface__transformers | src/transformers/models/layoutlmv2/image_processing_layoutlmv2.py | {
"start": 1561,
"end": 4569
} | class ____(ImagesKwargs, total=False):
r"""
apply_ocr (`bool`, *optional*, defaults to `True`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by
the `apply_ocr` parameter in the `preprocess` method.
ocr_lang (`str`, *optional*):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.
tesseract_config (`str`, *optional*):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the
`preprocess` method.
"""
apply_ocr: bool
ocr_lang: Optional[str]
tesseract_config: Optional[str]
def normalize_box(box, width, height):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def apply_tesseract(
image: np.ndarray,
lang: Optional[str],
tesseract_config: Optional[str] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""
tesseract_config = tesseract_config if tesseract_config is not None else ""
# apply OCR
pil_image = to_pil_image(image, input_data_format=input_data_format)
image_width, image_height = pil_image.size
data = pytesseract.image_to_data(pil_image, lang=lang, output_type="dict", config=tesseract_config)
words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]
words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]
left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]
top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]
width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]
height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
actual_boxes = []
for x, y, w, h in zip(left, top, width, height):
actual_box = [x, y, x + w, y + h]
actual_boxes.append(actual_box)
# finally, normalize the bounding boxes
normalized_boxes = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(box, image_width, image_height))
assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes"
return words, normalized_boxes
@requires(backends=("vision",))
| LayoutLMv2ImageProcessorKwargs |
python | huggingface__transformers | src/transformers/models/gemma3/modeling_gemma3.py | {
"start": 59069,
"end": 59694
} | class ____(GenericForSequenceClassification, Gemma3PreTrainedModel):
"""
Gemma3TextForSequenceClassification is a text-only sequence classification model that works with Gemma3TextConfig.
It uses the generic sequence classification implementation for efficiency and consistency.
"""
config: Gemma3TextConfig
input_modalities = ("text",)
__all__ = [
"Gemma3PreTrainedModel",
"Gemma3TextModel",
"Gemma3ForCausalLM",
"Gemma3ForConditionalGeneration",
"Gemma3Model",
"Gemma3ForSequenceClassification",
"Gemma3TextForSequenceClassification",
]
| Gemma3TextForSequenceClassification |
python | pypa__pipenv | pipenv/vendor/colorama/winterm.py | {
"start": 247,
"end": 416
} | class ____(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
| WinColor |
python | wandb__wandb | tests/unit_tests/test_file_stream.py | {
"start": 244,
"end": 6495
} | class ____:
data: str = None
def test_split_files():
def choices(pop, k=1):
# Note: random.choices was added in python 3.6
return [random.choice(pop) for _ in range(k)]
def rand_string_list(size):
width = max(1, int(size / 10))
num_lines = int(size / width)
return [
"".join(
choices(
string.ascii_letters
+ string.punctuation
+ string.digits
+ string.whitespace,
k=random.randint(1, width),
)
)
for _ in range(num_lines)
]
file_size = 1 # MB
num_files = 10
chunk_size = 0.1 # MB
files = {
f"file_{i}.txt": {
"content": rand_string_list(int(file_size * 1024 * 1024)),
"offset": 0,
}
for i in range(num_files)
}
chunks = list(split_files(files, max_bytes=chunk_size * 1024 * 1024))
# re-combine chunks
buff = {}
for c in chunks:
for k, v in c.items():
if k in buff:
buff[k].append(v)
else:
buff[k] = [v]
files2 = {
k: {
"content": list(
itertools.chain(
*(c["content"] for c in sorted(v, key=lambda c: c["offset"]))
)
),
"offset": 0,
}
for k, v in buff.items()
}
assert files == files2
# Verify chunk offsets (These can be messed up and above assertion would still pass).
for fname in files:
offset_size_pairs = [
(c[fname]["offset"], len(c[fname]["content"])) for c in chunks if fname in c
]
offset_size_pairs.sort(key=lambda p: p[0])
assert offset_size_pairs[0][0] == 0
for i in range(len(offset_size_pairs) - 1):
assert offset_size_pairs[i + 1][0] == sum(offset_size_pairs[i])
assert sum(offset_size_pairs[-1]) == len(files[fname]["content"])
def test_crdedupe_consecutive_offsets():
fp = CRDedupeFilePolicy()
console = {1: "a", 2: "a", 3: "a", 8: "a", 12: "a", 13: "a", 30: "a"}
intervals = fp.get_consecutive_offsets(console)
assert intervals == [[1, 3], [8, 8], [12, 13], [30, 30]]
def test_crdedupe_split_chunk():
fp = CRDedupeFilePolicy()
answer = [
("2020-08-25T20:38:36.895321 ", "this is my line of text\nsecond line\n"),
("ERROR 2020-08-25T20:38:36.895321 ", "this is my line of text\nsecond line\n"),
]
test_data = [
"2020-08-25T20:38:36.895321 this is my line of text\nsecond line\n",
"ERROR 2020-08-25T20:38:36.895321 this is my line of text\nsecond line\n",
]
for i, data in enumerate(test_data):
c = Chunk(data=data)
prefix, rest = fp.split_chunk(c)
assert prefix == answer[i][0]
assert rest == answer[i][1]
def test_crdedupe_process_chunks():
fp = CRDedupeFilePolicy()
sep = os.linesep
files = {"output.log": None}
# Test STDERR progress bar updates (\r lines) overwrite the correct offset.
# Test STDOUT and STDERR normal messages get appended correctly.
chunks = [
Chunk(data=f"timestamp text{sep}"),
Chunk(data=f"ERROR timestamp error message{sep}"),
Chunk(data=f"ERROR timestamp progress bar{sep}"),
Chunk(data=f"ERROR timestamp \rprogress bar update 1{sep}"),
Chunk(data=f"ERROR timestamp \rprogress bar update 2{sep}"),
Chunk(data=f"timestamp text{sep}text{sep}text{sep}"),
Chunk(data=f"ERROR timestamp error message{sep}"),
]
ret = fp.process_chunks(chunks)
want = [
{
"offset": 0,
"content": [
"timestamp text\n",
"ERROR timestamp error message\n",
"ERROR timestamp progress bar update 2\n",
"timestamp text\n",
"timestamp text\n",
"timestamp text\n",
"ERROR timestamp error message\n",
],
}
]
assert ret == want
files["output.log"] = ret
file_requests = list(split_files(files, max_bytes=util.MAX_LINE_BYTES))
assert 1 == len(file_requests)
# Test that STDERR progress bar updates in next list of chunks still
# maps to the correct offset.
# Test that we can handle STDOUT progress bars (\r lines) as well.
chunks = [
Chunk(data=f"ERROR timestamp \rprogress bar update 3{sep}"),
Chunk(data=f"ERROR timestamp \rprogress bar update 4{sep}"),
Chunk(data=f"timestamp \rstdout progress bar{sep}"),
Chunk(data=f"timestamp text{sep}"),
Chunk(data=f"timestamp \rstdout progress bar update{sep}"),
]
ret = fp.process_chunks(chunks)
want = [
{"offset": 2, "content": ["ERROR timestamp progress bar update 4\n"]},
{"offset": 5, "content": ["timestamp stdout progress bar update\n"]},
{"offset": 7, "content": ["timestamp text\n"]},
]
assert ret == want
files["output.log"] = ret
file_requests = list(split_files(files, max_bytes=util.MAX_LINE_BYTES))
assert 3 == len(file_requests)
# Test that code handles final progress bar output and correctly
# offsets any new progress bars.
chunks = [
Chunk(data=f"timestamp text{sep}"),
Chunk(data=f"ERROR timestamp \rprogress bar final{sep}text{sep}text{sep}"),
Chunk(data=f"ERROR timestamp error message{sep}"),
Chunk(data=f"ERROR timestamp new progress bar{sep}"),
Chunk(data=f"ERROR timestamp \rnew progress bar update 1{sep}"),
]
ret = fp.process_chunks(chunks)
want = [
{"offset": 2, "content": ["ERROR timestamp progress bar final\n"]},
{
"offset": 8,
"content": [
"timestamp text\n",
"ERROR timestamp text\n",
"ERROR timestamp text\n",
"ERROR timestamp error message\n",
"ERROR timestamp new progress bar update 1\n",
],
},
]
assert ret == want
files["output.log"] = ret
file_requests = list(split_files(files, max_bytes=util.MAX_LINE_BYTES))
assert 2 == len(file_requests)
| Chunk |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 483736,
"end": 484454
} | class ____(VegaLiteSchema):
"""
ImputeSequence schema wrapper.
Parameters
----------
stop : float
The ending value(exclusive) of the sequence.
start : float
The starting value of the sequence. **Default value:** ``0``
step : float
The step value between sequence entries. **Default value:** ``1`` or ``-1`` if
``stop < start``
"""
_schema = {"$ref": "#/definitions/ImputeSequence"}
def __init__(
self,
stop: Optional[float] = Undefined,
start: Optional[float] = Undefined,
step: Optional[float] = Undefined,
**kwds,
):
super().__init__(stop=stop, start=start, step=step, **kwds)
| ImputeSequence |
python | getsentry__sentry | src/sentry/monitors/serializers.py | {
"start": 5660,
"end": 5802
} | class ____:
updated: list[MonitorSerializerResponse]
errored: list[MonitorSerializerResponse]
@register(Monitor)
| MonitorBulkEditResponse |
python | pytorch__pytorch | torch/_inductor/compile_worker/subproc_pool.py | {
"start": 3463,
"end": 3528
} | class ____(Enum):
FORK = "fork"
SPAWN = "spawn"
| SubprocKind |
python | ZoranPandovski__al-go-rithms | data_structures/Linked_list/Python/Singly_linked_list.py | {
"start": 115,
"end": 2277
} | class ____:
def __init__(self):
self.head = None #creating a header
#inserting a new node at the beginning
def push(self, new_data):
new_node = node(new_data)
new_node.next = self.head
self.head = new_node
#inserting a new node after a node
def push_after(self, prev_node, new_data):
#checking whether previous node exists
if prev_node is None:
print("The given previous node is not found.\n")
return
new_node = node(new_data)
#Make next of new Node as next of previous node
new_node.next = prev_node.next
#make next of previous node as new node
prev_node.next = new_node
# append new node at the end
def pushend(self, new_data):
new_node = node(new_data)
#If the Linked List is empty, then make the new node as head
if self.head is None:
self.head = new_node
return
#otherwise traverse to the last node
end = self.head
while (end.next):
end = end.next
#Change the next of last node
end.next = new_node
def deletion(self, position):
#check if linked list is empty
if self.head == None:
return
# store header
temp = self.head
# If header is removed
if position == 0:
self.head = temp.next
temp = None
return
#previous node to the node to be deleted
for i in range(position -1 ):
temp = temp.next
if temp is None:
break
# If position is more than number of nodes
if temp is None and temp.next is None:
return
# store pointer to the next of node to be deleted
next = temp.next.next
# remove node from the linked list
temp.next = None
temp.next = next
def printing(self):
temp = self.head
while temp is not None:
print(temp.data)
temp = temp.next
| linkedlist |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_bundle.py | {
"start": 9051,
"end": 10048
} | class ____:
def test_without_widgets(self) -> None:
assert beb._use_widgets(beb._all_objs([plot()])) is False
assert beb._use_widgets(beb._all_objs([plot(), glplot()])) is False
d = Document()
d.add_root(plot())
d.add_root(glplot())
assert beb._use_widgets(beb._all_objs([d])) is False
def test_with_widgets(self) -> None:
assert beb._use_widgets(beb._all_objs([widget()])) is True
assert beb._use_widgets(beb._all_objs([widget(), plot()])) is True
assert beb._use_widgets(beb._all_objs([widget(), plot(), glplot()])) is True
assert beb._use_widgets(beb._all_objs([widget(), plot(), glplot(), table()])) is True
assert beb._use_widgets(beb._all_objs([table(), table(), glplot()])) is True
d = Document()
d.add_root(plot())
d.add_root(table())
d.add_root(widget())
d.add_root(glplot())
assert beb._use_widgets(beb._all_objs([d])) is True
| Test__use_widgets |
python | ray-project__ray | python/ray/tune/tests/execution/test_controller_search_alg_integration.py | {
"start": 728,
"end": 12264
} | class ____(TuneController):
def __init__(self, *args, **kwargs):
kwargs.update(dict(storage=mock_storage_context()))
super().__init__(*args, **kwargs)
@pytest.fixture(autouse=True)
def register_test_trainable():
register_mock_trainable()
yield
@pytest.fixture(scope="function")
def ray_start_8_cpus():
address_info = ray.init(num_cpus=8, num_gpus=0)
yield address_info
ray.shutdown()
@pytest.fixture(scope="function")
def ray_start_4_cpus_2_gpus_extra():
address_info = ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
yield address_info
ray.shutdown()
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
def test_search_alg_notification(ray_start_4_cpus_2_gpus_extra, resource_manager_cls):
"""Check that the searchers gets notified of trial results + completions.
Also check that the searcher is "finished" before the runner, i.e. the runner
continues processing trials when the searcher finished.
Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgNotification
Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgFinished
"""
experiment_spec = {"run": MOCK_TRAINABLE_NAME, "stop": {"training_iteration": 2}}
experiments = [Experiment.from_json("test", experiment_spec)]
search_alg = _MockSuggestionAlgorithm()
searcher = search_alg.searcher
search_alg.add_configurations(experiments)
runner = TestTuneController(
resource_manager_factory=lambda: resource_manager_cls(), search_alg=search_alg
)
# Run until trial is running
while not search_alg.is_finished():
runner.step()
trials = runner.get_trials()
# Make sure trial started
while trials[0].status != Trial.RUNNING:
runner.step()
assert trials[0].status == Trial.RUNNING
assert search_alg.is_finished()
assert not runner.is_finished()
# Run until everything finished
while not runner.is_finished():
runner.step()
assert trials[0].status == Trial.TERMINATED
assert search_alg.is_finished()
assert runner.is_finished()
assert searcher.counter["result"] == 1
assert searcher.counter["complete"] == 1
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
def test_search_alg_scheduler_stop(ray_start_4_cpus_2_gpus_extra, resource_manager_cls):
"""Check that a scheduler-issued stop also notifies the search algorithm.
Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgSchedulerInteraction # noqa
"""
class _MockScheduler(FIFOScheduler):
def on_trial_result(self, *args, **kwargs):
return TrialScheduler.STOP
experiment_spec = {"run": MOCK_TRAINABLE_NAME, "stop": {"training_iteration": 5}}
experiments = [Experiment.from_json("test", experiment_spec)]
search_alg = _MockSuggestionAlgorithm()
searcher = search_alg.searcher
search_alg.add_configurations(experiments)
runner = TestTuneController(
resource_manager_factory=lambda: resource_manager_cls(),
search_alg=search_alg,
scheduler=_MockScheduler(),
)
trials = runner.get_trials()
while not runner.is_finished():
runner.step()
# Result is not processed because trial stop takes precedence
assert searcher.counter["result"] == 0
# But on_trial_complete is triggered...
assert searcher.counter["complete"] == 1
# ... and still updates the last result.
assert trials[0].last_result[TRAINING_ITERATION] == 1
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
def test_search_alg_stalled(ray_start_4_cpus_2_gpus_extra, resource_manager_cls):
"""Checks that runner and searcher state is maintained when stalled.
We use a concurrency limit of 1, meaning each trial is added one-by-one
from the searchers.
We then run three samples. During the second trial, we stall the searcher,
which means we don't suggest new trials after it finished.
In this case, the runner should still be considered "running". Once we unstall,
the experiment finishes regularly.
Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgStalled
"""
experiment_spec = {
"run": MOCK_TRAINABLE_NAME,
"num_samples": 3,
"stop": {"training_iteration": 1},
}
experiments = [Experiment.from_json("test", experiment_spec)]
search_alg = _MockSuggestionAlgorithm(max_concurrent=1)
search_alg.add_configurations(experiments)
searcher = search_alg.searcher
runner = TestTuneController(
resource_manager_factory=lambda: resource_manager_cls(),
search_alg=search_alg,
)
runner.step()
trials = runner.get_trials()
while trials[0].status != Trial.TERMINATED:
runner.step()
# On next step, trials[1] is created
runner.step()
trials = runner.get_trials()
while trials[1].status != Trial.RUNNING:
runner.step()
assert trials[1].status == Trial.RUNNING
assert len(searcher.live_trials) == 1
# Stall: We don't suggest new algorithms
searcher.stall = True
while trials[1].status != Trial.TERMINATED:
runner.step()
assert trials[1].status == Trial.TERMINATED
assert len(searcher.live_trials) == 0
assert all(trial.is_finished() for trial in trials)
assert not search_alg.is_finished()
assert not runner.is_finished()
# Unstall
searcher.stall = False
# Create trials[2]
runner.step()
trials = runner.get_trials()
while trials[2].status != Trial.RUNNING:
runner.step()
assert trials[2].status == Trial.RUNNING
assert len(searcher.live_trials) == 1
while trials[2].status != Trial.TERMINATED:
runner.step()
assert len(searcher.live_trials) == 0
assert search_alg.is_finished()
assert runner.is_finished()
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
def test_search_alg_finishes(ray_start_4_cpus_2_gpus_extra, resource_manager_cls):
"""Empty SearchAlg changing state in `next_trials` does not crash.
The search algorithm changes to ``finished`` mid-run. This should not
affect processing of the experiment.
Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgFinishes
"""
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
class FinishFastAlg(_MockSuggestionAlgorithm):
_index = 0
def next_trial(self):
spec = self._experiment.spec
trial = None
if self._index < spec["num_samples"]:
trial = Trial(
spec.get("run"),
stopping_criterion=spec.get("stop"),
storage=spec.get("storage"),
)
self._index += 1
if self._index > 4:
self.set_finished()
return trial
def suggest(self, trial_id):
return {}
experiment_spec = {
"run": MOCK_TRAINABLE_NAME,
"num_samples": 2,
"stop": {"training_iteration": 1},
}
searcher = FinishFastAlg()
experiments = [Experiment.from_json("test", experiment_spec)]
searcher.add_configurations(experiments)
runner = TestTuneController(
resource_manager_factory=lambda: resource_manager_cls(),
search_alg=searcher,
)
assert not runner.is_finished()
while len(runner.get_trials()) < 2:
runner.step() # Launch 2 runs
assert not searcher.is_finished()
assert not runner.is_finished()
searcher_finished_before = False
while not runner.is_finished():
runner.step()
searcher_finished_before = searcher.is_finished()
# searcher_finished_before will be True if the searcher was finished before
# the controller.
assert searcher_finished_before
# Todo (krfricke): Fix in next batch
@pytest.mark.skip("This test is currently flaky as it can fail due to timing issues.")
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
def test_searcher_save_restore(ray_start_8_cpus, resource_manager_cls, tmpdir):
"""Searchers state should be saved and restored in the experiment checkpoint.
Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearcherSaveRestore
"""
def create_searcher():
class TestSuggestion(Searcher):
def __init__(self, index):
self.index = index
self.returned_result = []
super().__init__(metric="episode_reward_mean", mode="max")
def suggest(self, trial_id):
self.index += 1
return {"test_variable": self.index}
def on_trial_complete(self, trial_id, result=None, **kwargs):
self.returned_result.append(result)
def save(self, checkpoint_path):
with open(checkpoint_path, "wb") as f:
pickle.dump(self.__dict__, f)
def restore(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
self.__dict__.update(pickle.load(f))
searcher = TestSuggestion(0)
searcher = ConcurrencyLimiter(searcher, max_concurrent=2)
searcher = Repeater(searcher, repeat=3, set_index=False)
search_alg = SearchGenerator(searcher)
experiment_spec = {
"run": MOCK_TRAINABLE_NAME,
"num_samples": 20,
"config": {"sleep": 10},
"stop": {"training_iteration": 2},
"resources_per_trial": PlacementGroupFactory([{"CPU": 1}]),
}
experiments = [Experiment.from_json("test", experiment_spec)]
search_alg.add_configurations(experiments)
return search_alg
searcher = create_searcher()
runner = TestTuneController(
resource_manager_factory=lambda: resource_manager_cls(),
search_alg=searcher,
checkpoint_period=-1,
experiment_path=str(tmpdir),
)
while len(runner.get_trials()) < 6:
runner.step()
assert len(runner.get_trials()) == 6, [t.config for t in runner.get_trials()]
runner.checkpoint()
trials = runner.get_trials()
[runner._schedule_trial_stop(t) for t in trials if t.status is not Trial.ERROR]
runner.cleanup()
del runner
searcher = create_searcher()
runner2 = TestTuneController(
resource_manager_factory=lambda: resource_manager_cls(),
search_alg=searcher,
experiment_path=str(tmpdir),
resume="LOCAL",
)
assert len(runner2.get_trials()) == 6, [t.config for t in runner2.get_trials()]
def trial_statuses():
return [t.status for t in runner2.get_trials()]
def num_running_trials():
return sum(t.status == Trial.RUNNING for t in runner2.get_trials())
while num_running_trials() < 6:
runner2.step()
assert len(set(trial_statuses())) == 1
assert Trial.RUNNING in trial_statuses()
for i in range(20):
runner2.step()
assert 1 <= num_running_trials() <= 6
evaluated = [t.evaluated_params["test_variable"] for t in runner2.get_trials()]
count = Counter(evaluated)
assert all(v <= 3 for v in count.values())
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestTuneController |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/data_factory.py | {
"start": 4218,
"end": 5080
} | class ____(AirflowException):
"""An exception that indicates a pipeline run failed to complete."""
def get_field(extras: dict, field_name: str, strict: bool = False):
"""Get field from extra, first checking short name, then for backcompat we check for prefixed name."""
backcompat_prefix = "extra__azure_data_factory__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix "
"when using this method."
)
if field_name in extras:
return extras[field_name] or None
prefixed_name = f"{backcompat_prefix}{field_name}"
if prefixed_name in extras:
return extras[prefixed_name] or None
if strict:
raise KeyError(f"Field {field_name} not found in extras")
| AzureDataFactoryPipelineRunException |
python | django__django | django/contrib/gis/gdal/field.py | {
"start": 4129,
"end": 4552
} | class ____(Field):
_bit64 = False
@property
def value(self):
"Return an integer contained in this field."
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
| OFTInteger |
python | huggingface__transformers | src/transformers/models/colqwen2/modular_colqwen2.py | {
"start": 1674,
"end": 11845
} | class ____(ColPaliProcessor):
r"""
Constructs a ColQwen2 processor which wraps a Qwen2VLProcessor and special methods to process images and queries, as
well as to compute the late-interaction retrieval score.
[`ColQwen2Processor`] offers all the functionalities of [`Qwen2VLProcessor`]. See the [`~Qwen2VLProcessor.__call__`]
for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
visual_prompt_prefix (`str`, *optional*): A string that gets tokenized and prepended to the image tokens.
query_prefix (`str`, *optional*): A prefix to be used for the query.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
chat_template=None,
visual_prompt_prefix: Optional[str] = None,
query_prefix: Optional[str] = None,
**kwargs,
):
ProcessorMixin.__init__(self, image_processor, tokenizer, chat_template=chat_template)
self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
if visual_prompt_prefix is None:
visual_prompt_prefix = "<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe the image.<|im_end|><|endoftext|>"
self.visual_prompt_prefix = visual_prompt_prefix
if query_prefix is None:
query_prefix = "Query: "
self.query_prefix = query_prefix
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
**kwargs: Unpack[ColQwen2ProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom
wrapper around the Qwen2VLProcessor's [`~Qwen2VLProcessor.__call__`] method adapted for the ColQwen2 model. It cannot process
both text and images at the same time.
When preparing the text(s), this method forwards the `text` and `kwargs` arguments to Qwen2TokenizerFast's
[`~Qwen2TokenizerFast.__call__`].
When preparing the image(s), this method forwards the `images` and `kwargs` arguments to Qwen2VLImageProcessor's
[`~Qwen2VLImageProcessor.__call__`].
Please refer to the doctsring of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
ColQwen2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
suffix = output_kwargs["text_kwargs"].pop("suffix", None)
return_token_type_ids = suffix is not None
if text is None and images is None:
raise ValueError("Either text or images must be provided")
if text is not None and images is not None:
raise ValueError("Only one of text or images can be processed at a time")
if images is not None:
if is_valid_image(images):
images = [images]
elif isinstance(images, list) and is_valid_image(images[0]):
pass
elif not (isinstance(images, list) and isinstance(images[0], list) and is_valid_image(images[0][0])):
raise ValueError("images must be an image, list of images or list of list of images")
texts_doc = [self.visual_prompt_prefix] * len(images)
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(texts_doc)):
while self.image_token in texts_doc[i]:
texts_doc[i] = texts_doc[i].replace(
self.image_token, "<|placeholder|>" * (image_grid_thw[index].prod() // merge_length), 1
)
index += 1
texts_doc[i] = texts_doc[i].replace("<|placeholder|>", self.image_token)
text_inputs = self.tokenizer(
texts_doc,
return_token_type_ids=False,
**output_kwargs["text_kwargs"],
)
return_data = BatchFeature(data={**text_inputs, **image_inputs})
# NOTE: The following adjustment ensures correct behavior with DDP on multiple GPUs.
offsets = return_data["image_grid_thw"][:, 1] * return_data["image_grid_thw"][:, 2] # (batch_size,)
# Split the pixel_values tensor into a list of tensors, one per image
pixel_values = list(
torch.split(return_data["pixel_values"], offsets.tolist())
) # [(num_patches_image_0, pixel_values), ..., (num_patches_image_n, pixel_values)]
# Pad the list of pixel_value tensors to the same length along the sequence dimension
return_data["pixel_values"] = torch.nn.utils.rnn.pad_sequence(
pixel_values, batch_first=True
) # (batch_size, max_num_patches, pixel_values)
if return_token_type_ids:
labels = return_data["input_ids"].masked_fill(return_data["token_type_ids"] == 0, -100)
return_data.update({"labels": labels})
return return_data
elif text is not None:
if isinstance(text, str):
text = [text]
elif not (isinstance(text, list) and isinstance(text[0], str)):
raise ValueError("Text must be a string or a list of strings")
if suffix is None:
suffix = self.query_augmentation_token * 10
texts_query: list[str] = []
for query in text:
augmented_query = self.query_prefix + query + suffix
texts_query.append(augmented_query)
batch_query = self.tokenizer(
texts_query,
return_token_type_ids=False,
**output_kwargs["text_kwargs"],
)
return batch_query
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = ColQwen2ProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
# ColQwen doesn't process videos. Make a copy of list when removing
# otherwise `self.feature_extractor.model_input_names` is also modified
image_processor_input_names = [
name for name in image_processor_input_names if name not in ["pixel_values_videos", "video_grid_thw"]
]
return tokenizer_input_names + image_processor_input_names
| ColQwen2Processor |
python | django-import-export__django-import-export | tests/core/tests/test_declarative.py | {
"start": 1305,
"end": 3706
} | class ____(TestCase):
def test_meta_inheritance_3_levels(self):
# issue 1363
class GrandparentResource(Resource):
class Meta:
batch_size = 666
class ParentResource(GrandparentResource):
class Meta:
pass
class ChildResource(ParentResource):
class Meta:
pass
parent_resource = ParentResource()
child_resource = ChildResource()
self.assertEqual(666, parent_resource._meta.batch_size)
self.assertEqual(666, child_resource._meta.batch_size)
def test_meta_inheritance_2_levels(self):
class GrandparentResource(Resource):
class Meta:
batch_size = 666
class ParentResource(GrandparentResource):
class Meta:
batch_size = 333
class ChildResource(ParentResource):
class Meta:
pass
parent_resource = ParentResource()
child_resource = ChildResource()
self.assertEqual(333, parent_resource._meta.batch_size)
self.assertEqual(333, child_resource._meta.batch_size)
def test_meta_inheritance_1_level(self):
class GrandparentResource(Resource):
class Meta:
batch_size = 666
class ParentResource(GrandparentResource):
class Meta:
batch_size = 333
class ChildResource(ParentResource):
class Meta:
batch_size = 111
parent_resource = ParentResource()
child_resource = ChildResource()
self.assertEqual(333, parent_resource._meta.batch_size)
self.assertEqual(111, child_resource._meta.batch_size)
def test_meta_inheritance_default(self):
class GrandparentResource(Resource):
class Meta:
pass
class ParentResource(GrandparentResource):
class Meta:
pass
class ChildResource(ParentResource):
class Meta:
pass
grandparent_resource = GrandparentResource()
parent_resource = ParentResource()
child_resource = ChildResource()
self.assertEqual(1000, grandparent_resource._meta.batch_size)
self.assertEqual(1000, parent_resource._meta.batch_size)
self.assertEqual(1000, child_resource._meta.batch_size)
| TestMultiInheritance |
python | faif__python-patterns | patterns/creational/factory.py | {
"start": 1092,
"end": 1397
} | class ____:
"""A simple localizer a la gettext"""
def __init__(self) -> None:
self.translations = {"dog": "σκύλος", "cat": "γάτα"}
def localize(self, msg: str) -> str:
"""We'll punt if we don't have a translation"""
return self.translations.get(msg, msg)
| GreekLocalizer |
python | ansible__ansible | test/lib/ansible_test/_internal/connections.py | {
"start": 608,
"end": 2201
} | class ____(metaclass=abc.ABCMeta):
"""Base class for connecting to a host."""
@abc.abstractmethod
def run(
self,
command: list[str],
capture: bool,
interactive: bool = False,
data: t.Optional[str] = None,
stdin: t.Optional[t.IO[bytes]] = None,
stdout: t.Optional[t.IO[bytes]] = None,
output_stream: t.Optional[OutputStream] = None,
) -> tuple[t.Optional[str], t.Optional[str]]:
"""Run the specified command and return the result."""
def extract_archive(
self,
chdir: str,
src: t.IO[bytes],
):
"""Extract the given archive file stream in the specified directory."""
tar_cmd = ['tar', 'oxzf', '-', '-C', chdir]
retry(lambda: self.run(tar_cmd, stdin=src, capture=True))
def create_archive(
self,
chdir: str,
name: str,
dst: t.IO[bytes],
exclude: t.Optional[str] = None,
):
"""Create the specified archive file stream from the specified directory, including the given name and optionally excluding the given name."""
tar_cmd = ['tar', 'cf', '-', '-C', chdir]
gzip_cmd = ['gzip']
if exclude:
tar_cmd += ['--exclude', exclude]
tar_cmd.append(name)
# Using gzip to compress the archive allows this to work on all POSIX systems we support.
commands = [tar_cmd, gzip_cmd]
sh_cmd = ['sh', '-c', ' | '.join(shlex.join(command) for command in commands)]
retry(lambda: self.run(sh_cmd, stdout=dst, capture=True))
| Connection |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/FeedbackButton.py | {
"start": 66,
"end": 5914
} | class ____(QtWidgets.QPushButton):
"""
QPushButton which flashes success/failure indication for slow or asynchronous procedures.
"""
### For thread-safetyness
sigCallSuccess = QtCore.Signal(object, object, object)
sigCallFailure = QtCore.Signal(object, object, object)
sigCallProcess = QtCore.Signal(object, object, object)
sigReset = QtCore.Signal()
def __init__(self, *args):
QtWidgets.QPushButton.__init__(self, *args)
self.origStyle = None
self.origText = self.text()
self.origStyle = self.styleSheet()
self.origTip = self.toolTip()
self.limitedTime = True
#self.textTimer = QtCore.QTimer()
#self.tipTimer = QtCore.QTimer()
#self.textTimer.timeout.connect(self.setText)
#self.tipTimer.timeout.connect(self.setToolTip)
self.sigCallSuccess.connect(self.success)
self.sigCallFailure.connect(self.failure)
self.sigCallProcess.connect(self.processing)
self.sigReset.connect(self.reset)
def feedback(self, success, message=None, tip="", limitedTime=True):
"""Calls success() or failure(). If you want the message to be displayed until the user takes an action, set limitedTime to False. Then call self.reset() after the desired action.Threadsafe."""
if success:
self.success(message, tip, limitedTime=limitedTime)
else:
self.failure(message, tip, limitedTime=limitedTime)
def success(self, message=None, tip="", limitedTime=True):
"""Displays specified message on button and flashes button green to let user know action was successful. If you want the success to be displayed until the user takes an action, set limitedTime to False. Then call self.reset() after the desired action. Threadsafe."""
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
self.setEnabled(True)
#print "success"
self.startBlink("#0F0", message, tip, limitedTime=limitedTime)
else:
self.sigCallSuccess.emit(message, tip, limitedTime)
def failure(self, message=None, tip="", limitedTime=True):
"""Displays specified message on button and flashes button red to let user know there was an error. If you want the error to be displayed until the user takes an action, set limitedTime to False. Then call self.reset() after the desired action. Threadsafe. """
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
self.setEnabled(True)
#print "fail"
self.startBlink("#F00", message, tip, limitedTime=limitedTime)
else:
self.sigCallFailure.emit(message, tip, limitedTime)
def processing(self, message="Processing..", tip="", processEvents=True):
"""Displays specified message on button to let user know the action is in progress. Threadsafe. """
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
self.setEnabled(False)
self.setText(message, temporary=True)
self.setToolTip(tip, temporary=True)
if processEvents:
QtWidgets.QApplication.processEvents()
else:
self.sigCallProcess.emit(message, tip, processEvents)
def reset(self):
"""Resets the button to its original text and style. Threadsafe."""
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
self.limitedTime = True
self.setText()
self.setToolTip()
self.setStyleSheet()
else:
self.sigReset.emit()
def startBlink(self, color, message=None, tip="", limitedTime=True):
#if self.origStyle is None:
#self.origStyle = self.styleSheet()
#self.origText = self.text()
self.setFixedHeight(self.height())
if message is not None:
self.setText(message, temporary=True)
self.setToolTip(tip, temporary=True)
self.count = 0
#self.indStyle = "QPushButton {border: 2px solid %s; border-radius: 5px}" % color
self.indStyle = "QPushButton {background-color: %s}" % color
self.limitedTime = limitedTime
self.borderOn()
if limitedTime:
QtCore.QTimer.singleShot(2000, self.setText)
QtCore.QTimer.singleShot(10000, self.setToolTip)
def borderOn(self):
self.setStyleSheet(self.indStyle, temporary=True)
if self.limitedTime or self.count <=2:
QtCore.QTimer.singleShot(100, self.borderOff)
def borderOff(self):
self.setStyleSheet()
self.count += 1
if self.count >= 2:
if self.limitedTime:
return
QtCore.QTimer.singleShot(30, self.borderOn)
def setText(self, text=None, temporary=False):
if text is None:
text = self.origText
#print text
QtWidgets.QPushButton.setText(self, text)
if not temporary:
self.origText = text
def setToolTip(self, text=None, temporary=False):
if text is None:
text = self.origTip
QtWidgets.QPushButton.setToolTip(self, text)
if not temporary:
self.origTip = text
def setStyleSheet(self, style=None, temporary=False):
if style is None:
style = self.origStyle
QtWidgets.QPushButton.setStyleSheet(self, style)
if not temporary:
self.origStyle = style
| FeedbackButton |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/ops.py | {
"start": 127,
"end": 360
} | class ____:
def get(self, _url):
return MockResponse()
requests = MockRequest()
# start_op_marker
@dg.op
def my_op():
return "hello"
# end_op_marker
# start_configured_op_marker
import dagster as dg
| MockRequest |
python | gevent__gevent | src/greentest/3.10/test_threading.py | {
"start": 56639,
"end": 59428
} | class ____(unittest.TestCase):
def check_interrupt_main_with_signal_handler(self, signum):
def handler(signum, frame):
1/0
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
with self.assertRaises(ZeroDivisionError):
_thread.interrupt_main()
def check_interrupt_main_noerror(self, signum):
handler = signal.getsignal(signum)
try:
# No exception should arise.
signal.signal(signum, signal.SIG_IGN)
_thread.interrupt_main(signum)
signal.signal(signum, signal.SIG_DFL)
_thread.interrupt_main(signum)
finally:
# Restore original handler
signal.signal(signum, handler)
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_with_signal_handler(self):
self.check_interrupt_main_with_signal_handler(signal.SIGINT)
self.check_interrupt_main_with_signal_handler(signal.SIGTERM)
def test_interrupt_main_noerror(self):
self.check_interrupt_main_noerror(signal.SIGINT)
self.check_interrupt_main_noerror(signal.SIGTERM)
def test_interrupt_main_invalid_signal(self):
self.assertRaises(ValueError, _thread.interrupt_main, -1)
self.assertRaises(ValueError, _thread.interrupt_main, signal.NSIG)
self.assertRaises(ValueError, _thread.interrupt_main, 1000000)
@threading_helper.reap_threads
def test_can_interrupt_tight_loops(self):
cont = [True]
started = [False]
interrupted = [False]
def worker(started, cont, interrupted):
iterations = 100_000_000
started[0] = True
while cont[0]:
if iterations:
iterations -= 1
else:
return
pass
interrupted[0] = True
t = threading.Thread(target=worker,args=(started, cont, interrupted))
t.start()
while not started[0]:
pass
cont[0] = False
t.join()
self.assertTrue(interrupted[0])
| InterruptMainTests |
python | walkccc__LeetCode | solutions/3074. Apple Redistribution into Boxes/3074.py | {
"start": 0,
"end": 298
} | class ____:
def minimumBoxes(self, apple: list[int], capacity: list[int]) -> int:
appleSum = sum(apple)
capacitySum = 0
for i, c in enumerate(sorted(capacity, reverse=True)):
capacitySum += c
if capacitySum >= appleSum:
return i + 1
return len(capacity)
| Solution |
python | python-openxml__python-docx | tests/test_settings.py | {
"start": 219,
"end": 1633
} | class ____:
"""Unit-test suite for the `docx.settings.Settings` objects."""
@pytest.mark.parametrize(
("cxml", "expected_value"),
[
("w:settings", False),
("w:settings/w:evenAndOddHeaders", True),
("w:settings/w:evenAndOddHeaders{w:val=0}", False),
("w:settings/w:evenAndOddHeaders{w:val=1}", True),
("w:settings/w:evenAndOddHeaders{w:val=true}", True),
],
)
def it_knows_when_the_document_has_distinct_odd_and_even_headers(
self, cxml: str, expected_value: bool
):
assert Settings(element(cxml)).odd_and_even_pages_header_footer is expected_value
@pytest.mark.parametrize(
("cxml", "new_value", "expected_cxml"),
[
("w:settings", True, "w:settings/w:evenAndOddHeaders"),
("w:settings/w:evenAndOddHeaders", False, "w:settings"),
("w:settings/w:evenAndOddHeaders{w:val=1}", True, "w:settings/w:evenAndOddHeaders"),
("w:settings/w:evenAndOddHeaders{w:val=off}", False, "w:settings"),
],
)
def it_can_change_whether_the_document_has_distinct_odd_and_even_headers(
self, cxml: str, new_value: bool, expected_cxml: str
):
settings = Settings(element(cxml))
settings.odd_and_even_pages_header_footer = new_value
assert settings._settings.xml == xml(expected_cxml)
| DescribeSettings |
python | kamyu104__LeetCode-Solutions | Python/design-memory-allocator.py | {
"start": 167,
"end": 1575
} | class ____(object):
def __init__(self, n):
"""
:type n: int
"""
self.__avails = SortedList([[0, n]])
self.__lookup = collections.defaultdict(list)
def allocate(self, size, mID):
"""
:type size: int
:type mID: int
:rtype: int
"""
for l, s in self.__avails:
if s < size:
continue
self.__avails.remove([l, s])
self.__lookup[mID].append([l, size])
if s-size > 0:
self.__avails.add([l+size, s-size])
return l
return -1
def free(self, mID):
"""
:type mID: int
:rtype: int
"""
if mID not in self.__lookup:
return 0
result = 0
for l, s in self.__lookup[mID]:
self.__avails.add([l, s])
i = self.__avails.bisect_left([l, s])
if i+1 < len(self.__avails) and self.__avails[i][0]+self.__avails[i][1] == self.__avails[i+1][0]:
self.__avails[i][1] += self.__avails[i+1][1]
del self.__avails[i+1]
if i-1 >= 0 and self.__avails[i-1][0]+self.__avails[i-1][1] == self.__avails[i][0]:
self.__avails[i-1][1] += self.__avails[i][1]
del self.__avails[i]
result += s
del self.__lookup[mID]
return result
| Allocator |
python | google__jax | docs/autodidax.py | {
"start": 35691,
"end": 35906
} | class ____(Tracer):
__slots__ = ['aval']
aval: ShapedArray
def __init__(self, trace, aval):
self._trace = trace
self.aval = aval
# NB: the analogous class in JAX is called 'DynamicJaxprTrace'
| JaxprTracer |
python | numba__llvmlite | llvmlite/binding/targets.py | {
"start": 9712,
"end": 15369
} | class ____(ffi.ObjectRef):
def _dispose(self):
self._capi.LLVMPY_DisposeTargetMachine(self)
def add_analysis_passes(self, pm):
"""
Register analysis passes for this target machine with a pass manager.
"""
ffi.lib.LLVMPY_AddAnalysisPasses(self, pm)
def set_asm_verbosity(self, verbose):
"""
Set whether this target machine will emit assembly with human-readable
comments describing control flow, debug information, and so on.
"""
ffi.lib.LLVMPY_SetTargetMachineAsmVerbosity(self, verbose)
def emit_object(self, module):
"""
Represent the module as a code object, suitable for use with
the platform's linker. Returns a byte string.
"""
return self._emit_to_memory(module, use_object=True)
def emit_assembly(self, module):
"""
Return the raw assembler of the module, as a string.
llvm.initialize_native_asmprinter() must have been called first.
"""
return _decode_string(self._emit_to_memory(module, use_object=False))
def _emit_to_memory(self, module, use_object=False):
"""Returns bytes of object code of the module.
Args
----
use_object : bool
Emit object code or (if False) emit assembly code.
"""
with ffi.OutputString() as outerr:
mb = ffi.lib.LLVMPY_TargetMachineEmitToMemory(self, module,
int(use_object),
outerr)
if not mb:
raise RuntimeError(str(outerr))
bufptr = ffi.lib.LLVMPY_GetBufferStart(mb)
bufsz = ffi.lib.LLVMPY_GetBufferSize(mb)
try:
return string_at(bufptr, bufsz)
finally:
ffi.lib.LLVMPY_DisposeMemoryBuffer(mb)
@property
def target_data(self):
return TargetData(ffi.lib.LLVMPY_CreateTargetMachineData(self))
@property
def triple(self):
with ffi.OutputString() as out:
ffi.lib.LLVMPY_GetTargetMachineTriple(self, out)
return str(out)
# ============================================================================
# FFI
ffi.lib.LLVMPY_GetProcessTriple.argtypes = [POINTER(c_char_p)]
ffi.lib.LLVMPY_GetTripleParts.argtypes = [c_char_p, POINTER(c_char_p),
POINTER(c_char_p), POINTER(c_char_p),
POINTER(c_char_p)]
ffi.lib.LLVMPY_GetHostCPUFeatures.argtypes = [POINTER(c_char_p)]
ffi.lib.LLVMPY_GetHostCPUFeatures.restype = c_int
ffi.lib.LLVMPY_GetDefaultTargetTriple.argtypes = [POINTER(c_char_p)]
ffi.lib.LLVMPY_GetHostCPUName.argtypes = [POINTER(c_char_p)]
ffi.lib.LLVMPY_GetTripleObjectFormat.argtypes = [c_char_p]
ffi.lib.LLVMPY_GetTripleObjectFormat.restype = c_int
ffi.lib.LLVMPY_CreateTargetData.argtypes = [c_char_p]
ffi.lib.LLVMPY_CreateTargetData.restype = ffi.LLVMTargetDataRef
ffi.lib.LLVMPY_CopyStringRepOfTargetData.argtypes = [
ffi.LLVMTargetDataRef,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_DisposeTargetData.argtypes = [
ffi.LLVMTargetDataRef,
]
ffi.lib.LLVMPY_ABISizeOfType.argtypes = [ffi.LLVMTargetDataRef,
ffi.LLVMTypeRef]
ffi.lib.LLVMPY_ABISizeOfType.restype = c_longlong
ffi.lib.LLVMPY_OffsetOfElement.argtypes = [ffi.LLVMTargetDataRef,
ffi.LLVMTypeRef,
c_int]
ffi.lib.LLVMPY_OffsetOfElement.restype = c_longlong
ffi.lib.LLVMPY_ABIAlignmentOfType.argtypes = [ffi.LLVMTargetDataRef,
ffi.LLVMTypeRef]
ffi.lib.LLVMPY_ABIAlignmentOfType.restype = c_longlong
ffi.lib.LLVMPY_GetTargetFromTriple.argtypes = [c_char_p, POINTER(c_char_p)]
ffi.lib.LLVMPY_GetTargetFromTriple.restype = ffi.LLVMTargetRef
ffi.lib.LLVMPY_GetTargetName.argtypes = [ffi.LLVMTargetRef]
ffi.lib.LLVMPY_GetTargetName.restype = c_char_p
ffi.lib.LLVMPY_GetTargetDescription.argtypes = [ffi.LLVMTargetRef]
ffi.lib.LLVMPY_GetTargetDescription.restype = c_char_p
ffi.lib.LLVMPY_CreateTargetMachine.argtypes = [
ffi.LLVMTargetRef,
# Triple
c_char_p,
# CPU
c_char_p,
# Features
c_char_p,
# OptLevel
c_int,
# Reloc
c_char_p,
# CodeModel
c_char_p,
# PrintMC
c_int,
# JIT
c_int,
# ABIName
c_char_p,
]
ffi.lib.LLVMPY_CreateTargetMachine.restype = ffi.LLVMTargetMachineRef
ffi.lib.LLVMPY_DisposeTargetMachine.argtypes = [ffi.LLVMTargetMachineRef]
ffi.lib.LLVMPY_GetTargetMachineTriple.argtypes = [ffi.LLVMTargetMachineRef,
POINTER(c_char_p)]
ffi.lib.LLVMPY_SetTargetMachineAsmVerbosity.argtypes = [
ffi.LLVMTargetMachineRef, c_int]
ffi.lib.LLVMPY_AddAnalysisPasses.argtypes = [
ffi.LLVMTargetMachineRef,
ffi.LLVMPassManagerRef,
]
ffi.lib.LLVMPY_TargetMachineEmitToMemory.argtypes = [
ffi.LLVMTargetMachineRef,
ffi.LLVMModuleRef,
c_int,
POINTER(c_char_p),
]
ffi.lib.LLVMPY_TargetMachineEmitToMemory.restype = ffi.LLVMMemoryBufferRef
ffi.lib.LLVMPY_GetBufferStart.argtypes = [ffi.LLVMMemoryBufferRef]
ffi.lib.LLVMPY_GetBufferStart.restype = c_void_p
ffi.lib.LLVMPY_GetBufferSize.argtypes = [ffi.LLVMMemoryBufferRef]
ffi.lib.LLVMPY_GetBufferSize.restype = c_size_t
ffi.lib.LLVMPY_DisposeMemoryBuffer.argtypes = [ffi.LLVMMemoryBufferRef]
ffi.lib.LLVMPY_CreateTargetMachineData.argtypes = [
ffi.LLVMTargetMachineRef,
]
ffi.lib.LLVMPY_CreateTargetMachineData.restype = ffi.LLVMTargetDataRef
| TargetMachine |
python | google__pytype | pytype/typegraph/cfg_test.py | {
"start": 101,
"end": 33301
} | class ____(unittest.TestCase):
"""Test control flow graph creation."""
def test_simple_graph(self):
p = cfg.Program()
n1 = p.NewCFGNode("foo")
n2 = n1.ConnectNew("n2")
n3 = n1.ConnectNew("n3")
n4 = n3.ConnectNew("n4")
self.assertEqual(0, n1.id)
self.assertEqual("foo", n1.name)
self.assertEqual(len(n1.outgoing), 2)
self.assertEqual(len(n2.outgoing), 0) # pylint: disable=g-generic-assert
self.assertEqual(len(n3.outgoing), 1)
self.assertEqual(len(n2.incoming), 1)
self.assertEqual(len(n3.incoming), 1)
self.assertEqual(len(n4.incoming), 1)
self.assertIn(n2, n1.outgoing)
self.assertIn(n3, n1.outgoing)
self.assertIn(n1, n2.incoming)
self.assertIn(n1, n3.incoming)
self.assertIn(n3, n4.incoming)
def test_binding_binding(self):
p = cfg.Program()
node = p.NewCFGNode()
u = p.NewVariable()
v1 = u.AddBinding(None, source_set=[], where=node)
v2 = u.AddBinding("data", source_set=[], where=node)
v3 = u.AddBinding({1: 2}, source_set=[], where=node)
self.assertIsNone(v1.data)
self.assertEqual(v2.data, "data")
self.assertEqual(v3.data, {1: 2})
self.assertEqual(f"<binding of variable 0 to data {id(v2.data)}>", str(v2))
self.assertEqual(f"<binding of variable 0 to data {id(v3.data)}>", str(v3))
def test_cfg_node_str(self):
p = cfg.Program()
n1 = p.NewCFGNode()
n2 = p.NewCFGNode("n2")
v = p.NewVariable()
av = v.AddBinding("a", source_set=[], where=n1)
n3 = p.NewCFGNode("n3", condition=av)
self.assertEqual("<cfgnode 0 None>", str(n1))
self.assertEqual("<cfgnode 1 n2>", str(n2))
self.assertEqual("<cfgnode 2 n3 condition:0>", str(n3))
def test_get_attro(self):
p = cfg.Program()
node = p.NewCFGNode()
u = p.NewVariable()
data = [1, 2, 3]
a = u.AddBinding(data, source_set=[], where=node)
self.assertEqual(a.variable.bindings, [a])
(origin,) = a.origins # we expect exactly one origin
self.assertEqual(origin.where, node)
self.assertEqual(len(origin.source_sets), 1)
(source_set,) = origin.source_sets
self.assertEqual(list(source_set), [])
self.assertEqual(a.data, data)
def test_get_origins(self):
p = cfg.Program()
node = p.NewCFGNode()
u = p.NewVariable()
a = u.AddBinding(1, source_set=[], where=node)
b = u.AddBinding(2, source_set=[a], where=node)
c = u.AddBinding(3, source_set=[a, b], where=node)
expected_source_sets = [[], [a], [a, b]]
for binding, expected_source_set in zip([a, b, c], expected_source_sets):
(origin,) = binding.origins
self.assertEqual(origin.where, node)
(source_set,) = origin.source_sets
self.assertCountEqual(list(source_set), expected_source_set)
def test_variable_set(self):
p = cfg.Program()
node1 = p.NewCFGNode("n1")
node2 = node1.ConnectNew("n2")
d = p.NewVariable()
d.AddBinding("v1", source_set=[], where=node1)
d.AddBinding("v2", source_set=[], where=node2)
self.assertEqual(len(d.bindings), 2)
def test_has_source(self):
p = cfg.Program()
n0, n1, n2 = p.NewCFGNode("n0"), p.NewCFGNode("n1"), p.NewCFGNode("n2")
u = p.NewVariable()
u1 = u.AddBinding(0, source_set=[], where=n0)
v = p.NewVariable()
v1 = v.AddBinding(1, source_set=[], where=n1)
v2 = v.AddBinding(2, source_set=[u1], where=n1)
v3a = v.AddBinding(3, source_set=[], where=n1)
v3b = v.AddBinding(3, source_set=[u1], where=n2)
self.assertEqual(v3a, v3b)
v3 = v3a
self.assertTrue(v1.HasSource(v1))
self.assertTrue(v2.HasSource(v2))
self.assertTrue(v3.HasSource(v3))
self.assertFalse(v1.HasSource(u1))
self.assertTrue(v2.HasSource(u1))
self.assertTrue(v3.HasSource(u1))
def test_filter1(self):
# x.ab = A()
# ,---+------------.
# | n3 |
# x = X() | x.ab = B() |
# +------------+---+------------+------------+
# n1 n2 n4 n5 n6
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n2.ConnectNew("n3")
n4 = n2.ConnectNew("n4")
n5 = n3.ConnectNew("n5")
n4.ConnectTo(n5)
n6 = n5.ConnectNew("n6")
n5.ConnectTo(n6)
all_x = p.NewVariable()
x = all_x.AddBinding({}, source_set=[], where=n1)
ab = p.NewVariable()
x.data["ab"] = ab
a = ab.AddBinding("A", source_set=[], where=n3)
b = ab.AddBinding("B", source_set=[], where=n4)
p.entrypoint = n1
self.assertFalse(a.IsVisible(n1) or b.IsVisible(n1))
self.assertFalse(a.IsVisible(n2) or b.IsVisible(n2))
self.assertTrue(a.IsVisible(n3))
self.assertTrue(b.IsVisible(n4))
self.assertEqual(ab.Filter(n1), [])
self.assertEqual(ab.Filter(n2), [])
self.assertEqual(ab.FilteredData(n3), ["A"])
self.assertEqual(ab.FilteredData(n4), ["B"])
self.assertCountEqual(["A", "B"], ab.FilteredData(n5))
self.assertCountEqual(["A", "B"], ab.FilteredData(n6))
def test_can_have_combination(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n1.ConnectNew("n3")
n4 = p.NewCFGNode("n4")
n2.ConnectTo(n4)
n3.ConnectTo(n4)
x = p.NewVariable()
y = p.NewVariable()
x1 = x.AddBinding("1", source_set=[], where=n2)
y2 = y.AddBinding("2", source_set=[], where=n3)
self.assertTrue(n4.CanHaveCombination([x1, y2]))
self.assertTrue(n4.CanHaveCombination([x1]))
self.assertTrue(n4.CanHaveCombination([y2]))
self.assertTrue(n3.CanHaveCombination([y2]))
self.assertTrue(n2.CanHaveCombination([x1]))
self.assertTrue(n1.CanHaveCombination([]))
self.assertFalse(n1.CanHaveCombination([x1]))
self.assertFalse(n1.CanHaveCombination([y2]))
self.assertFalse(n2.CanHaveCombination([x1, y2]))
self.assertFalse(n3.CanHaveCombination([x1, y2]))
def test_conflicting_bindings_from_condition(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n2.ConnectNew("n3")
x = p.NewVariable()
x_a = x.AddBinding("a", source_set=[], where=n1)
x_b = x.AddBinding("b", source_set=[], where=n1)
p.entrypoint = n1
n2.condition = x_a
self.assertFalse(n3.HasCombination([x_b]))
def test_condition_order(self):
p = cfg.Program()
x, y = p.NewVariable(), p.NewVariable()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n2.ConnectNew("n3")
n4 = n3.ConnectNew("n4")
n5 = n4.ConnectNew("n5")
n6 = n5.ConnectNew("n6")
p.entrypoint = n1
y_a = y.AddBinding("a", source_set=[], where=n1)
n3.condition = x.AddBinding("b", source_set=[], where=n2)
n5.condition = x.AddBinding("c", source_set=[], where=n4)
self.assertTrue(n6.HasCombination([y_a]))
def test_contained_if_conflict(self):
p = cfg.Program()
x = p.NewVariable()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n1.ConnectNew("n3")
n4 = p.NewCFGNode("n4")
n2.ConnectTo(n4)
n3.ConnectTo(n4)
n5 = n4.ConnectNew("n5")
p.entrypoint = n1
x_a = x.AddBinding("a", source_set=[], where=n1)
n4.condition = x.AddBinding("b", source_set=[], where=n2)
# This is impossible since we have a condition on the way, enforcing x=b.
self.assertFalse(n5.HasCombination([x_a]))
def test_conflicting_conditions_on_path(self):
# This test case is rather academic - there's no obvious way to construct
# a Python program that actually creates the CFG below.
p = cfg.Program()
x, y, z = p.NewVariable(), p.NewVariable(), p.NewVariable()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n1.ConnectNew("n3")
n4 = p.NewCFGNode("n4")
n2.ConnectTo(n4)
n3.ConnectTo(n4)
n5 = n4.ConnectNew("n5")
n6 = n5.ConnectNew("n6")
p.entrypoint = n1
n4.condition = x.AddBinding("a", source_set=[], where=n2)
n5.condition = y.AddBinding("a", source_set=[], where=n3)
z_a = z.AddBinding("a", source_set=[], where=n1)
# Impossible since we can only pass either n2 or n3.
self.assertFalse(n6.HasCombination([z_a]))
def test_conditions_block(self):
p = cfg.Program()
unreachable_node = p.NewCFGNode("unreachable_node")
y = p.NewVariable()
unsatisfiable_binding = y.AddBinding(
"2", source_set=[], where=unreachable_node
)
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2", condition=unsatisfiable_binding)
n3 = n2.ConnectNew("n3")
x = p.NewVariable()
b1 = x.AddBinding("1", source_set=[], where=n1)
self.assertFalse(n3.HasCombination([b1]))
n1.ConnectTo(n3)
self.assertTrue(n3.HasCombination([b1]))
self.assertFalse(n2.HasCombination([b1]))
def test_conditions_multiple_paths(self):
p = cfg.Program()
unreachable_node = p.NewCFGNode("unreachable_node")
y = p.NewVariable()
unsatisfiable_binding = y.AddBinding(
"2", source_set=[], where=unreachable_node
)
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2", condition=unsatisfiable_binding)
n3 = n2.ConnectNew("n3")
n4 = n2.ConnectNew("n4")
n4.ConnectTo(n3)
x = p.NewVariable()
b1 = x.AddBinding("1", source_set=[], where=n1)
self.assertFalse(n3.HasCombination([b1]))
self.assertFalse(n2.HasCombination([b1]))
def test_conditions_not_used_if_alternative_exist(self):
p = cfg.Program()
unreachable_node = p.NewCFGNode("unreachable_node")
y = p.NewVariable()
unsatisfiable_binding = y.AddBinding(
"2", source_set=[], where=unreachable_node
)
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2", condition=unsatisfiable_binding)
n3 = n2.ConnectNew("n3")
x = p.NewVariable()
b1 = x.AddBinding("1", source_set=[], where=n1)
self.assertFalse(n3.HasCombination([b1]))
def test_satisfiable_condition(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
x1 = x.AddBinding("1", source_set=[], where=n1)
n2 = n1.ConnectNew("n2")
y = p.NewVariable()
y2 = y.AddBinding("2", source_set=[], where=n2)
n3 = n2.ConnectNew("n3", condition=y2)
n4 = n3.ConnectNew("n4")
self.assertTrue(n4.HasCombination([x1]))
def test_unsatisfiable_condition(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
x1 = x.AddBinding("1", source_set=[], where=n1)
n2 = n1.ConnectNew("n2")
x2 = x.AddBinding("2", source_set=[], where=n2)
n3 = n2.ConnectNew("n3", condition=x2)
n4 = n3.ConnectNew("n4")
self.assertFalse(n4.HasCombination([x1]))
def test_no_node_on_all_paths(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
y = p.NewVariable()
y1 = y.AddBinding("y", source_set=[], where=n1)
n3 = n2.ConnectNew("n3")
n4 = n1.ConnectNew("n4")
n5 = n4.ConnectNew("n5")
n3.ConnectTo(n5)
x = p.NewVariable()
x1 = x.AddBinding("x", source_set=[], where=n2)
n3.condition = x1
n4.condition = x1
self.assertTrue(n5.HasCombination([y1]))
def test_condition_on_start_node(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = p.NewCFGNode("n3")
a = p.NewVariable().AddBinding("a", source_set=[], where=n3)
b = p.NewVariable().AddBinding("b", source_set=[], where=n1)
n2.condition = a
self.assertFalse(n2.HasCombination([b]))
self.assertTrue(n1.HasCombination([b]))
def test_condition_loop(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = p.NewCFGNode("n3")
a = p.NewVariable().AddBinding("a")
u1 = p.NewVariable().AddBinding("1", source_set=[], where=n3)
p.NewVariable().AddBinding("2", source_set=[], where=n3)
c = p.NewVariable().AddBinding("c", source_set=[u1], where=n1)
a.AddOrigin(n2, [c])
n2.condition = a
self.assertFalse(n2.HasCombination([c]))
def test_combinations(self):
# n1------->n2
# | |
# v v
# n3------->n4
# [n2] x = a; y = a
# [n3] x = b; y = b
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n1.ConnectNew("n3")
n4 = n2.ConnectNew("n4")
n3.ConnectTo(n4)
x = p.NewVariable()
y = p.NewVariable()
xa = x.AddBinding("a", source_set=[], where=n2)
ya = y.AddBinding("a", source_set=[], where=n2)
xb = x.AddBinding("b", source_set=[], where=n3)
yb = y.AddBinding("b", source_set=[], where=n3)
p.entrypoint = n1
self.assertTrue(n4.HasCombination([xa, ya]))
self.assertTrue(n4.HasCombination([xb, yb]))
self.assertFalse(n4.HasCombination([xa, yb]))
self.assertFalse(n4.HasCombination([xb, ya]))
def test_conflicting(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
a = x.AddBinding("a", source_set=[], where=n1)
b = x.AddBinding("b", source_set=[], where=n1)
p.entrypoint = n1
# At n1, x can either be a or b, but not both.
self.assertTrue(n1.HasCombination([a]))
self.assertTrue(n1.HasCombination([b]))
self.assertFalse(n1.HasCombination([a, b]))
def test_loop(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n2.ConnectTo(n1)
x = p.NewVariable()
a = x.AddBinding("a")
b = x.AddBinding("b")
a.AddOrigin(n1, [b])
b.AddOrigin(n2, [a])
self.assertFalse(n2.HasCombination([b]))
def test_one_step_simultaneous(self):
# Like testSimultaneous, but woven through an additional node
# n1->n2->n3
# [n1] x = a or b
# [n2] y = x
# [n2] z = x
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
x = p.NewVariable()
y = p.NewVariable()
z = p.NewVariable()
a = x.AddBinding("a", source_set=[], where=n1)
b = x.AddBinding("b", source_set=[], where=n1)
ya = y.AddBinding("ya", source_set=[a], where=n2)
yb = y.AddBinding("yb", source_set=[b], where=n2)
za = z.AddBinding("za", source_set=[a], where=n2)
zb = z.AddBinding("zb", source_set=[b], where=n2)
p.entrypoint = n1
self.assertTrue(n2.HasCombination([ya, za]))
self.assertTrue(n2.HasCombination([yb, zb]))
self.assertFalse(n2.HasCombination([ya, zb]))
self.assertFalse(n2.HasCombination([yb, za]))
def test_conflicting_bindings(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
x = p.NewVariable()
x_a = x.AddBinding("a", source_set=[], where=n1)
x_b = x.AddBinding("b", source_set=[], where=n1)
p.entrypoint = n1
self.assertTrue(n1.HasCombination([x_a]))
self.assertTrue(n1.HasCombination([x_b]))
self.assertFalse(n1.HasCombination([x_a, x_b]))
self.assertFalse(n2.HasCombination([x_a, x_b]))
def test_mid_point(self):
p = cfg.Program()
x = p.NewVariable()
y = p.NewVariable()
n1 = p.NewCFGNode("n1")
x1 = x.AddBinding("1", source_set=[], where=n1)
y1 = y.AddBinding("1", source_set=[x1], where=n1)
n2 = n1.ConnectNew("n2")
x2 = x.AddBinding("2", source_set=[], where=n2)
n3 = n2.ConnectNew("n3")
self.assertTrue(n3.HasCombination([y1, x2]))
self.assertTrue(n3.HasCombination([x2, y1]))
def test_conditions_are_ordered(self):
# The error case in this test is non-deterministic. The test tries to verify
# that the list returned by _PathFinder.FindNodeBackwards is ordered from
# child to parent.
# The error case would be a random order or the reverse order.
# To guarantee that this test is working go to FindNodeBackwards and reverse
# the order of self._on_path before generating the returned list.
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x1 = p.NewVariable().AddBinding("1", source_set=[], where=n1)
n2 = n1.ConnectNew(
"n2", condition=p.NewVariable().AddBinding("1", source_set=[], where=n1)
)
n3 = n2.ConnectNew(
"n3", condition=p.NewVariable().AddBinding("1", source_set=[], where=n2)
)
n4 = n3.ConnectNew(
"n3", condition=p.NewVariable().AddBinding("1", source_set=[], where=n3)
)
# Strictly speaking n1, n2 and n3 would be enough to expose errors. n4 is
# added to increase the chance of a failure if the order is random.
self.assertTrue(n4.HasCombination([x1]))
def test_same_node_origin(self):
# [n1] x = a or b; y = x
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
y = p.NewVariable()
xa = x.AddBinding("xa", source_set=[], where=n1)
xb = x.AddBinding("xb", source_set=[], where=n1)
ya = y.AddBinding("ya", source_set=[xa], where=n1)
yb = y.AddBinding("yb", source_set=[xb], where=n1)
p.entrypoint = n1
self.assertTrue(n1.HasCombination([xa]))
self.assertTrue(n1.HasCombination([xb]))
self.assertTrue(n1.HasCombination([xa, ya]))
self.assertTrue(n1.HasCombination([xb, yb]))
# We don't check the other two combinations, because within one CFG node,
# bindings are treated as having any order, so the other combinations
# are possible, too:
# n1.HasCombination([xa, yb]) == True (because x = b; y = x; x = a)
# n1.HasCombination([xb, ya]) == True (because x = a; y = x; x = b)
def test_new_variable(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = p.NewCFGNode("n2")
x, y, z = "x", "y", "z"
variable = p.NewVariable(bindings=[x, y], source_set=[], where=n1)
variable.AddBinding(z, source_set=variable.bindings, where=n2)
self.assertCountEqual([x, y, z], [v.data for v in variable.bindings])
self.assertTrue(any(len(e.origins) for e in variable.bindings))
# Test that non-list iterables can be passed to NewVariable.
v2 = p.NewVariable((x, y), [], n1)
self.assertCountEqual([x, y], [v.data for v in v2.bindings])
v3 = p.NewVariable({x, y}, [], n1)
self.assertCountEqual([x, y], [v.data for v in v3.bindings])
v4 = p.NewVariable({x: y}, [], n1)
self.assertCountEqual([x], [v.data for v in v4.bindings])
def test_node_bindings(self):
p = cfg.Program()
n1 = p.NewCFGNode("node1")
n2 = n1.ConnectNew("node2")
self.assertEqual(n1.name, "node1")
self.assertEqual(n2.name, "node2")
u = p.NewVariable()
a1 = u.AddBinding(1, source_set=[], where=n1)
a2 = u.AddBinding(2, source_set=[], where=n1)
a3 = u.AddBinding(3, source_set=[], where=n1)
a4 = u.AddBinding(4, source_set=[], where=n1)
self.assertCountEqual([a1, a2, a3, a4], n1.bindings)
def test_program(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
u1 = p.NewVariable()
u2 = p.NewVariable()
a11 = u1.AddBinding(11, source_set=[], where=n1)
a12 = u1.AddBinding(12, source_set=[], where=n2)
a21 = u2.AddBinding(21, source_set=[], where=n1)
a22 = u2.AddBinding(22, source_set=[], where=n2)
self.assertCountEqual([n1, n2], p.cfg_nodes)
self.assertCountEqual([u1, u2], p.variables)
self.assertCountEqual([a11, a21], n1.bindings)
self.assertCountEqual([a12, a22], n2.bindings)
self.assertEqual(p.next_variable_id, 2)
def test_entry_point(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
x = p.NewVariable()
a = x.AddBinding("a", source_set=[], where=n1)
a = x.AddBinding("b", source_set=[], where=n2)
p.entrypoint = n1
self.assertTrue(n2.HasCombination([a]))
def test_non_frozen_solving(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
x = p.NewVariable()
a = x.AddBinding("a", source_set=[], where=n1)
a = x.AddBinding("b", source_set=[], where=n2)
p.entrypoint = n1
self.assertTrue(n2.HasCombination([a]))
def test_filter2(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = p.NewCFGNode("n2")
n1.ConnectTo(n2)
x = p.NewVariable()
a = x.AddBinding("a", source_set=[], where=n2)
p.entrypoint = n1
self.assertEqual(x.Filter(n1), [])
self.assertEqual(x.Filter(n2), [a])
def test_hidden_conflict1(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n1.ConnectNew("n3")
x = p.NewVariable()
y = p.NewVariable()
z = p.NewVariable()
x_a = x.AddBinding("a", source_set=[], where=n1)
x_b = x.AddBinding("b", source_set=[], where=n1)
y_a = y.AddBinding("a", source_set=[x_a], where=n1)
y_b = y.AddBinding("b", source_set=[x_b], where=n2)
z_ab1 = z.AddBinding("ab1", source_set=[x_a, x_b], where=n3)
z_ab2 = z.AddBinding("ab2", source_set=[y_a, x_b], where=n3)
z_ab3 = z.AddBinding("ab3", source_set=[y_b, x_a], where=n3)
z_ab4 = z.AddBinding("ab4", source_set=[y_a, y_b], where=n3)
p.entrypoint = n1
self.assertFalse(n2.HasCombination([y_a, x_b]))
self.assertFalse(n2.HasCombination([y_b, x_a]))
self.assertFalse(n3.HasCombination([z_ab1]))
self.assertFalse(n3.HasCombination([z_ab2]))
self.assertFalse(n3.HasCombination([z_ab3]))
self.assertFalse(n3.HasCombination([z_ab4]))
def test_hidden_conflict2(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
x = p.NewVariable()
y = p.NewVariable()
x_a = x.AddBinding("a", source_set=[], where=n1)
x_b = x.AddBinding("b", source_set=[], where=n1)
y_b = y.AddBinding("b", source_set=[x_b], where=n1)
p.entrypoint = n1
self.assertFalse(n2.HasCombination([y_b, x_a]))
def test_empty_binding(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
x = p.NewVariable()
a = x.AddBinding("a")
p.entrypoint = n1
self.assertEqual(x.Filter(n1), [])
self.assertEqual(x.Filter(n2), [])
a.AddOrigin(n2, [])
p.entrypoint = n1
self.assertEqual(x.Filter(n1), [])
self.assertEqual(x.Filter(n2), [a])
a.AddOrigin(n1, [a])
p.entrypoint = n1
self.assertEqual(x.Filter(n1), [a])
self.assertEqual(x.Filter(n2), [a])
def test_assign_to_new(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n2.ConnectNew("n3")
x = p.NewVariable()
ax = x.AddBinding("a", source_set=[], where=n1)
y = ax.AssignToNewVariable(n2)
(ay,) = y.bindings
z = y.AssignToNewVariable(n3)
(az,) = z.bindings
self.assertEqual([v.data for v in y.bindings], ["a"])
self.assertEqual([v.data for v in z.bindings], ["a"])
p.entrypoint = n1
self.assertTrue(n1.HasCombination([ax]))
self.assertTrue(n2.HasCombination([ax, ay]))
self.assertTrue(n3.HasCombination([ax, ay, az]))
self.assertFalse(n1.HasCombination([ax, ay]))
self.assertFalse(n2.HasCombination([ax, ay, az]))
def test_assign_to_new_no_node(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
ax = x.AddBinding("a", source_set=[], where=n1)
y = ax.AssignToNewVariable()
z = x.AssignToNewVariable()
(ox,) = x.bindings[0].origins
(oy,) = y.bindings[0].origins
(oz,) = z.bindings[0].origins
self.assertEqual(ox, oy, oz)
def test_paste_variable(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
x = p.NewVariable()
ax = x.AddBinding("a", source_set=[], where=n1)
bx = x.AddBinding("b", source_set=[], where=n1)
y = p.NewVariable()
y.PasteVariable(x, n2)
ay, by = y.bindings
self.assertEqual([v.data for v in x.bindings], ["a", "b"])
self.assertEqual([v.data for v in y.bindings], ["a", "b"])
p.entrypoint = n1
self.assertTrue(n1.HasCombination([ax]))
self.assertTrue(n1.HasCombination([bx]))
self.assertFalse(n1.HasCombination([ay]))
self.assertFalse(n1.HasCombination([by]))
self.assertTrue(n2.HasCombination([ay]))
self.assertTrue(n2.HasCombination([by]))
def test_paste_at_same_node(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
x.AddBinding("a", source_set=[], where=n1)
x.AddBinding("b", source_set=[], where=n1)
y = p.NewVariable()
y.PasteVariable(x, n1)
ay, _ = y.bindings
self.assertEqual([v.data for v in x.bindings], ["a", "b"])
self.assertEqual([v.data for v in y.bindings], ["a", "b"])
(o,) = ay.origins
self.assertCountEqual([set()], o.source_sets)
(o,) = ay.origins
self.assertCountEqual([set()], o.source_sets)
def test_paste_with_additional_sources(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
x = p.NewVariable()
y = p.NewVariable()
z = p.NewVariable()
ax = x.AddBinding("a", source_set=[], where=n1)
by = y.AddBinding("b", source_set=[], where=n1)
z.PasteVariable(x, n2, {by})
(az,) = z.bindings
(origin,) = az.origins
(source_set,) = origin.source_sets
self.assertSetEqual(source_set, {ax, by})
def test_paste_at_same_node_with_additional_sources(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
y = p.NewVariable()
z = p.NewVariable()
_ = x.AddBinding("a", source_set=[], where=n1)
by = y.AddBinding("b", source_set=[], where=n1)
z.PasteVariable(x, n1, {by})
(az,) = z.bindings
(origin,) = az.origins
(source_set,) = origin.source_sets
self.assertSetEqual(source_set, {by})
def test_paste_binding(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
ax = x.AddBinding("a", source_set=[], where=n1)
y = p.NewVariable()
y.PasteBinding(ax)
self.assertEqual(x.data, y.data)
def test_id(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = p.NewCFGNode("n2")
x = p.NewVariable()
y = p.NewVariable()
self.assertIsInstance(x.id, int)
self.assertIsInstance(y.id, int)
self.assertLess(x.id, y.id)
self.assertIsInstance(n1.id, int)
self.assertIsInstance(n2.id, int)
self.assertLess(n1.id, n2.id)
def test_prune(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
n3 = n2.ConnectNew("n3")
n4 = n3.ConnectNew("n4")
n1.ConnectTo(n4)
x = p.NewVariable()
x.AddBinding(1, [], n1)
x.AddBinding(2, [], n2)
x.AddBinding(3, [], n3)
self.assertCountEqual([1], [v.data for v in x.Bindings(n1)])
self.assertCountEqual([2], [v.data for v in x.Bindings(n2)])
self.assertCountEqual([3], [v.data for v in x.Bindings(n3)])
self.assertCountEqual([1, 3], [v.data for v in x.Bindings(n4)])
self.assertCountEqual([1], x.Data(n1))
self.assertCountEqual([2], x.Data(n2))
self.assertCountEqual([3], x.Data(n3))
self.assertCountEqual([1, 3], x.Data(n4))
def test_prune_two_origins(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = p.NewCFGNode("n2")
n3 = p.NewCFGNode("n2")
n1.ConnectTo(n3)
n2.ConnectTo(n3)
x = p.NewVariable()
b = x.AddBinding(1)
b.AddOrigin(source_set=[], where=n1)
b.AddOrigin(source_set=[], where=n2)
self.assertEqual(len([v.data for v in x.Bindings(n3)]), 1)
def test_hidden_conflict3(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
z = p.NewVariable()
z_a = z.AddBinding("a", source_set=[], where=n1)
z_b = z.AddBinding("b", source_set=[], where=n1)
goals = []
for _ in range(5):
var = p.NewVariable()
v = var.AddBinding(".")
v.AddOrigin(source_set=[z_a], where=n1)
v.AddOrigin(source_set=[z_b], where=n1)
goals.append(v)
x = p.NewVariable()
x_b = x.AddBinding("a", source_set=[z_b], where=n1)
self.assertTrue(n2.HasCombination(goals + [x_b]))
def test_conflict_with_condition(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = n1.ConnectNew("n2")
z = p.NewVariable()
z_a = z.AddBinding("a", source_set=[], where=n1)
z_b = z.AddBinding("b", source_set=[], where=n1)
n1.condition = z_b
goals = []
for _ in range(5):
var = p.NewVariable()
v = var.AddBinding(".")
v.AddOrigin(source_set=[z_a], where=n1)
v.AddOrigin(source_set=[z_b], where=n1)
goals.append(v)
self.assertTrue(n2.HasCombination(goals))
def test_variable_properties(self):
p = cfg.Program()
n1 = p.NewCFGNode("n1")
n2 = p.NewCFGNode("n2")
n3 = p.NewCFGNode("n3")
v = p.NewVariable()
v.AddBinding("a", source_set=[], where=n1)
v.AddBinding("b", source_set=[], where=n2)
v.AddBinding("c", source_set=[], where=n3)
self.assertCountEqual(v.data, ["a", "b", "c"])
self.assertCountEqual(v.bindings, v.Bindings(None))
self.assertEqual(p, v.program)
def test_add_binding_iterables(self):
# source_set in Pytype is at times a tuple, list or set. They're all
# converted to SourceSets (essentially frozensets) when added to an Origin.
# This is more of a behavioral test than a specification test.
p = cfg.Program()
n1 = p.NewCFGNode("n1")
x = p.NewVariable()
x.AddBinding("a", source_set=(), where=n1)
x.AddBinding("b", source_set=[], where=n1)
x.AddBinding("c", source_set=set(), where=n1)
def test_calls_with_none(self):
# Several parts of the Python API have None as a default value for
# parameters. Make sure the C++ API can # also take None for those
# functions. These are mostly smoke tests.
p = cfg.Program()
n1 = p.NewCFGNode()
n2 = p.NewCFGNode(None)
self.assertEqual(n1.name, n2.name)
v1 = p.NewVariable()
v2 = p.NewVariable(None)
self.assertEqual(v1.bindings, [])
self.assertEqual(v1.bindings, v2.bindings)
v3 = p.NewVariable(None, None, None)
self.assertEqual(v1.bindings, v3.bindings)
n3 = n1.ConnectNew()
n4 = n1.ConnectNew(None)
self.assertEqual(n3.name, n4.name)
n5 = n1.ConnectNew(None, None)
self.assertEqual(n1.condition, n5.condition)
av = v1.AddBinding("a")
bv = v1.AddBinding("b", None, None)
v3 = av.AssignToNewVariable()
v1.PasteVariable(v2)
v1.PasteVariable(v2, None)
v1.PasteVariable(v2, None, None)
v2.PasteBinding(bv)
v2.PasteBinding(bv, None)
v2.PasteBinding(bv, None, None)
def test_program_default_data(self):
# Basic sanity check to make sure Program.default_data works.
p = cfg.Program()
self.assertIsNone(p.default_data)
p.default_data = 1
self.assertEqual(p.default_data, 1)
def test_condition_conflict(self):
# v1 = x or y or z # node_in
# condition = v1 is x # node_in
# if condition: # node_if
# assert v1 is x
# assert v1 is not y and v1 is not z
# else: # node_else
# assert v1 is not x
# assert v1 is y or v1 is z
p = cfg.Program()
node_in = p.NewCFGNode("node_in")
v1 = p.NewVariable()
bx = v1.AddBinding("x", [], node_in)
by = v1.AddBinding("y", [], node_in)
bz = v1.AddBinding("z", [], node_in)
condition_true = p.NewVariable().AddBinding(True, [bx], node_in)
condition_false = condition_true.variable.AddBinding(False, [by], node_in)
condition_false.AddOrigin(node_in, [bz])
b_if = p.NewVariable().AddBinding("if", [condition_true], node_in)
b_else = p.NewVariable().AddBinding("else", [condition_false], node_in)
node_if = node_in.ConnectNew("node_if", b_if)
node_else = node_in.ConnectNew("node_else", b_else)
self.assertTrue(b_if.IsVisible(node_if))
self.assertFalse(b_else.IsVisible(node_if))
self.assertFalse(b_if.IsVisible(node_else))
self.assertTrue(b_else.IsVisible(node_else))
def test_block_condition(self):
# v1 = x or y or z # node_in
# if v1 is x: # node_if
# v1 = w # node_block
# else: ... # node_else
# assert v1 is not x # node_out
p = cfg.Program()
node_in = p.NewCFGNode("node_in")
v1 = p.NewVariable()
bx = v1.AddBinding("x", [], node_in)
by = v1.AddBinding("y", [], node_in)
bz = v1.AddBinding("z", [], node_in)
b_if = p.NewVariable().AddBinding("if", [bx], node_in)
b_else = b_if.variable.AddBinding("else", [by], node_in)
b_else.AddOrigin(node_in, [bz])
node_if = node_in.ConnectNew("node_if", b_if)
node_else = node_in.ConnectNew("node_else", b_else)
node_block = node_if.ConnectNew("node_block")
v1.AddBinding("w", [], node_block)
node_out = node_block.ConnectNew("node_out")
node_else.ConnectTo(node_out)
b_out = p.NewVariable().AddBinding("x", [bx], node_out)
self.assertFalse(b_out.IsVisible(node_out))
def test_strict(self):
# Tests the existence of the strict keyword (but not its behavior).
p = cfg.Program()
node = p.NewCFGNode("root")
v = p.NewVariable()
b = v.AddBinding("x", [], node)
self.assertEqual(v.Filter(node, strict=False), [b])
self.assertEqual(v.FilteredData(node, strict=False), [b.data])
def test_binding_id(self):
p = cfg.Program()
v = p.NewVariable()
v.AddBinding("x", [], p.NewCFGNode("root"))
self.assertEqual(1, p.next_binding_id)
def test_paste_binding_with_new_data(self):
p = cfg.Program()
node = p.NewCFGNode("root")
v1 = p.NewVariable()
b1 = v1.AddBinding(["x"], [], node)
v2 = p.NewVariable()
b2 = v2.AddBinding(["y"], [b1], node)
v3 = p.NewVariable()
b3 = v3.PasteBindingWithNewData(b2, "z")
self.assertEqual(b3.data, "z")
self.assertEqual(b3.origins, b2.origins)
if __name__ == "__main__":
unittest.main()
| CFGTest |
python | pallets__jinja | src/jinja2/compiler.py | {
"start": 7665,
"end": 8217
} | class ____(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self) -> None:
self.filters: set[str] = set()
self.tests: set[str] = set()
def visit_Filter(self, node: nodes.Filter) -> None:
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node: nodes.Test) -> None:
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node: nodes.Block) -> None:
"""Stop visiting at blocks."""
| DependencyFinderVisitor |
python | sympy__sympy | sympy/plotting/pygletplot/plot_controller.py | {
"start": 168,
"end": 6941
} | class ____:
normal_mouse_sensitivity = 4.0
modified_mouse_sensitivity = 1.0
normal_key_sensitivity = 160.0
modified_key_sensitivity = 40.0
keymap = {
key.LEFT: 'left',
key.A: 'left',
key.NUM_4: 'left',
key.RIGHT: 'right',
key.D: 'right',
key.NUM_6: 'right',
key.UP: 'up',
key.W: 'up',
key.NUM_8: 'up',
key.DOWN: 'down',
key.S: 'down',
key.NUM_2: 'down',
key.Z: 'rotate_z_neg',
key.NUM_1: 'rotate_z_neg',
key.C: 'rotate_z_pos',
key.NUM_3: 'rotate_z_pos',
key.Q: 'spin_left',
key.NUM_7: 'spin_left',
key.E: 'spin_right',
key.NUM_9: 'spin_right',
key.X: 'reset_camera',
key.NUM_5: 'reset_camera',
key.NUM_ADD: 'zoom_in',
key.PAGEUP: 'zoom_in',
key.R: 'zoom_in',
key.NUM_SUBTRACT: 'zoom_out',
key.PAGEDOWN: 'zoom_out',
key.F: 'zoom_out',
key.RSHIFT: 'modify_sensitivity',
key.LSHIFT: 'modify_sensitivity',
key.F1: 'rot_preset_xy',
key.F2: 'rot_preset_xz',
key.F3: 'rot_preset_yz',
key.F4: 'rot_preset_perspective',
key.F5: 'toggle_axes',
key.F6: 'toggle_axe_colors',
key.F8: 'save_image'
}
def __init__(self, window, *, invert_mouse_zoom=False, **kwargs):
self.invert_mouse_zoom = invert_mouse_zoom
self.window = window
self.camera = window.camera
self.action = {
# Rotation around the view Y (up) vector
'left': False,
'right': False,
# Rotation around the view X vector
'up': False,
'down': False,
# Rotation around the view Z vector
'spin_left': False,
'spin_right': False,
# Rotation around the model Z vector
'rotate_z_neg': False,
'rotate_z_pos': False,
# Reset to the default rotation
'reset_camera': False,
# Performs camera z-translation
'zoom_in': False,
'zoom_out': False,
# Use alternative sensitivity (speed)
'modify_sensitivity': False,
# Rotation presets
'rot_preset_xy': False,
'rot_preset_xz': False,
'rot_preset_yz': False,
'rot_preset_perspective': False,
# axes
'toggle_axes': False,
'toggle_axe_colors': False,
# screenshot
'save_image': False
}
def update(self, dt):
z = 0
if self.action['zoom_out']:
z -= 1
if self.action['zoom_in']:
z += 1
if z != 0:
self.camera.zoom_relative(z/10.0, self.get_key_sensitivity()/10.0)
dx, dy, dz = 0, 0, 0
if self.action['left']:
dx -= 1
if self.action['right']:
dx += 1
if self.action['up']:
dy -= 1
if self.action['down']:
dy += 1
if self.action['spin_left']:
dz += 1
if self.action['spin_right']:
dz -= 1
if not self.is_2D():
if dx != 0:
self.camera.euler_rotate(dx*dt*self.get_key_sensitivity(),
*(get_direction_vectors()[1]))
if dy != 0:
self.camera.euler_rotate(dy*dt*self.get_key_sensitivity(),
*(get_direction_vectors()[0]))
if dz != 0:
self.camera.euler_rotate(dz*dt*self.get_key_sensitivity(),
*(get_direction_vectors()[2]))
else:
self.camera.mouse_translate(0, 0, dx*dt*self.get_key_sensitivity(),
-dy*dt*self.get_key_sensitivity())
rz = 0
if self.action['rotate_z_neg'] and not self.is_2D():
rz -= 1
if self.action['rotate_z_pos'] and not self.is_2D():
rz += 1
if rz != 0:
self.camera.euler_rotate(rz*dt*self.get_key_sensitivity(),
*(get_basis_vectors()[2]))
if self.action['reset_camera']:
self.camera.reset()
if self.action['rot_preset_xy']:
self.camera.set_rot_preset('xy')
if self.action['rot_preset_xz']:
self.camera.set_rot_preset('xz')
if self.action['rot_preset_yz']:
self.camera.set_rot_preset('yz')
if self.action['rot_preset_perspective']:
self.camera.set_rot_preset('perspective')
if self.action['toggle_axes']:
self.action['toggle_axes'] = False
self.camera.axes.toggle_visible()
if self.action['toggle_axe_colors']:
self.action['toggle_axe_colors'] = False
self.camera.axes.toggle_colors()
if self.action['save_image']:
self.action['save_image'] = False
self.window.plot.saveimage()
return True
def get_mouse_sensitivity(self):
if self.action['modify_sensitivity']:
return self.modified_mouse_sensitivity
else:
return self.normal_mouse_sensitivity
def get_key_sensitivity(self):
if self.action['modify_sensitivity']:
return self.modified_key_sensitivity
else:
return self.normal_key_sensitivity
def on_key_press(self, symbol, modifiers):
if symbol in self.keymap:
self.action[self.keymap[symbol]] = True
def on_key_release(self, symbol, modifiers):
if symbol in self.keymap:
self.action[self.keymap[symbol]] = False
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if buttons & LEFT:
if self.is_2D():
self.camera.mouse_translate(x, y, dx, dy)
else:
self.camera.spherical_rotate((x - dx, y - dy), (x, y),
self.get_mouse_sensitivity())
if buttons & MIDDLE:
self.camera.zoom_relative([1, -1][self.invert_mouse_zoom]*dy,
self.get_mouse_sensitivity()/20.0)
if buttons & RIGHT:
self.camera.mouse_translate(x, y, dx, dy)
def on_mouse_scroll(self, x, y, dx, dy):
self.camera.zoom_relative([1, -1][self.invert_mouse_zoom]*dy,
self.get_mouse_sensitivity())
def is_2D(self):
functions = self.window.plot._functions
for i in functions:
if len(functions[i].i_vars) > 1 or len(functions[i].d_vars) > 2:
return False
return True
| PlotController |
python | altair-viz__altair | altair/vegalite/v6/schema/mixins.py | {
"start": 42104,
"end": 50021
} | class ____(SchemaBase):
"""
BoxPlotDef schema wrapper.
Parameters
----------
box : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
clip : bool
Whether a composite mark be clipped to the enclosing group's width and height.
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
extent : float, Literal['min-max']
The extent of the whiskers. Available options include:
* ``"min-max"``: min and max are the lower and upper whiskers respectively.
* A number representing multiple of the interquartile range. This number will be
multiplied by the IQR to determine whisker boundary, which spans from the smallest
data to the largest data within the range *[Q1 - k * IQR, Q3 + k * IQR]* where
*Q1* and *Q3* are the first and third quartiles while *IQR* is the interquartile
range (*Q3-Q1*).
**Default value:** ``1.5``.
invalid : :class:`MarkInvalidDataMode`, Literal['filter', 'break-paths-filter-domains', 'break-paths-show-domains', 'break-paths-show-path-domains', 'show'], None
Invalid data mode, which defines how the marks and corresponding scales should
represent invalid values (``null`` and ``NaN`` in continuous scales *without*
defined output for invalid values).
* ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and
*scales*. For path marks (for line, area, trail), this option will create paths
that connect valid points, as if the data rows with invalid values do not exist.
* ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at
invalid values. For non-path marks, this is equivalent to ``"filter"``. All
*scale* domains will *exclude* these filtered data points.
* ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid
values. Hide invalid values for non-path marks. All *scale* domains will
*include* these filtered data points (for both path and non-path marks).
* ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each
scale will use the output for invalid values defined in ``config.scale.invalid``
or, if unspecified, by default invalid values will produce the same visual values
as zero (if the scale includes zero) or the minimum value (if the scale does not
include zero).
* ``"break-paths-show-path-domains"`` (default) — This is equivalent to
``"break-paths-show-domains"`` for path-based marks (line/area/trail) and
``"filter"`` for non-path marks.
**Note**: If any channel's scale has an output for invalid values defined in
``config.scale.invalid``, all values for the scales will be considered "valid" since
they can produce a reasonable output for the scales. Thus, fields for such channels
will not be filtered and will not cause path breaks.
median : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
opacity : float
The opacity (value between [0,1]) of the mark.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
Orientation of the box plot. This is normally automatically determined based on
types of fields on x and y channels. However, an explicit ``orient`` be specified
when the orientation is ambiguous.
**Default value:** ``"vertical"``.
outliers : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
rule : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
size : float
Size of the box and median tick of a box plot
ticks : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
"""
_schema = {"$ref": "#/definitions/BoxPlotDef"}
def __init__(
self,
box: Optional[bool | SchemaBase | Map] = Undefined,
clip: Optional[bool] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
extent: Optional[float | Literal["min-max"]] = Undefined,
invalid: Optional[SchemaBase | MarkInvalidDataMode_T | None] = Undefined,
median: Optional[bool | SchemaBase | Map] = Undefined,
opacity: Optional[float] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
outliers: Optional[bool | SchemaBase | Map] = Undefined,
rule: Optional[bool | SchemaBase | Map] = Undefined,
size: Optional[float] = Undefined,
ticks: Optional[bool | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
box=box,
clip=clip,
color=color,
extent=extent,
invalid=invalid,
median=median,
opacity=opacity,
orient=orient,
outliers=outliers,
rule=rule,
size=size,
ticks=ticks,
**kwds,
)
| _BoxPlotDef |
python | streamlit__streamlit | lib/streamlit/runtime/state/common.py | {
"start": 5527,
"end": 7760
} | class ____(Generic[T_co]):
"""Result returned by the `register_widget` family of functions/methods.
Should be usable by widget code to determine what value to return, and
whether to update the UI.
Parameters
----------
value : T_co
The widget's current value, or, in cases where the true widget value
could not be determined, an appropriate fallback value.
This value should be returned by the widget call.
value_changed : bool
True if the widget's value is different from the value most recently
returned from the frontend.
Implies an update to the frontend is needed.
"""
value: T_co
value_changed: bool
@classmethod
def failure(
cls, deserializer: WidgetDeserializer[T_co]
) -> RegisterWidgetResult[T_co]:
"""The canonical way to construct a RegisterWidgetResult in cases
where the true widget value could not be determined.
"""
return cls(value=deserializer(None), value_changed=False)
def user_key_from_element_id(element_id: str) -> str | None:
"""Return the user key portion of a element id, or None if the id does not
have a user key.
TODO This will incorrectly indicate no user key if the user actually provides
"None" as a key, but we can't avoid this kind of problem while storing the
string representation of the no-user-key sentinel as part of the element id.
"""
user_key: str | None = element_id.split("-", maxsplit=2)[-1]
return None if user_key == "None" else user_key
def is_element_id(key: str) -> bool:
"""True if the given session_state key has the structure of a element ID."""
return key.startswith(GENERATED_ELEMENT_ID_PREFIX)
def is_keyed_element_id(key: str) -> bool:
"""True if the given session_state key has the structure of a element ID
with a user_key.
"""
return is_element_id(key) and not key.endswith("-None")
def require_valid_user_key(key: str) -> None:
"""Raise an Exception if the given user_key is invalid."""
if is_element_id(key):
raise StreamlitAPIException(
f"Keys beginning with {GENERATED_ELEMENT_ID_PREFIX} are reserved."
)
| RegisterWidgetResult |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-typesense/destination_typesense/destination.py | {
"start": 1023,
"end": 3304
} | class ____(Destination):
def write(
self,
config: Mapping[str, Any],
configured_catalog: ConfiguredAirbyteCatalog,
input_messages: Iterable[AirbyteMessage],
) -> Iterable[AirbyteMessage]:
client = get_client(config=config)
for configured_stream in configured_catalog.streams:
steam_name = configured_stream.stream.name
if configured_stream.destination_sync_mode == DestinationSyncMode.overwrite:
try:
client.collections[steam_name].delete()
except Exception:
pass
client.collections.create({"name": steam_name, "fields": [{"name": ".*", "type": "auto"}]})
writer = TypesenseWriter(client, config.get("batch_size"))
for message in input_messages:
if message.type == Type.STATE:
writer.flush()
yield message
elif message.type == Type.RECORD:
record = message.record
writer.queue_write_operation(record.stream, record.data)
else:
continue
writer.flush()
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
logger.debug("TypeSense Destination Config Check")
try:
client = get_client(config=config)
client.collections.create({"name": "_airbyte", "fields": [{"name": "title", "type": "string"}]})
writer = TypesenseWriter(client, config.get("batch_size", 10000))
writer.queue_write_operation("_airbyte", {"id": "1", "title": "The Hunger Games"})
writer.flush()
time.sleep(3)
client.collections["_airbyte"].documents["1"].retrieve()
status = AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
status = AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
finally:
try:
client = get_client(config=config)
client.collections["_airbyte"].delete()
except Exception:
logger.warning("Failed to delete _airbyte collection")
return status
| DestinationTypesense |
python | modin-project__modin | modin/tests/pandas/extensions/conftest.py | {
"start": 1423,
"end": 2211
} | class ____(BaseFactory):
@classmethod
def prepare(cls):
cls.io_cls = Test1IO
@pytest.fixture
def Backend1():
factories.Test1_Storage_FormatOnTest1_EngineFactory = Test1Factory
if "Backend1" not in Backend.choices:
StorageFormat.add_option("Test1_storage_format")
Engine.add_option("Test1_engine")
Backend.register_backend(
"Backend1",
Execution(storage_format="Test1_Storage_Format", engine="Test1_Engine"),
)
return "Backend1"
@pytest.fixture(
# sort the set of non-extendable attributes to make the sequence of test
# cases deterministic for pytest-xdist.
params=sorted(_NON_EXTENDABLE_ATTRIBUTES),
)
def non_extendable_attribute_name(request) -> str:
return request.param
| Test1Factory |
python | pytorch__pytorch | benchmarks/dynamo/dist_util.py | {
"start": 1170,
"end": 1412
} | class ____(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.net = nn.Sequential(
nn.Linear(a, b),
nn.ReLU(),
)
def forward(self, x):
return self.net(x)
| MyModule |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 7410,
"end": 7594
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("ENABLED", "NO_POLICY")
| EnterpriseEnabledSettingValue |
python | django__django | tests/utils_tests/test_duration.py | {
"start": 2522,
"end": 3405
} | class ____(unittest.TestCase):
def test_simple(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_iso_string(duration)), duration)
def test_days(self):
duration = datetime.timedelta(days=1, hours=1, minutes=3, seconds=5)
self.assertEqual(parse_duration(duration_iso_string(duration)), duration)
def test_microseconds(self):
duration = datetime.timedelta(hours=1, minutes=3, seconds=5, microseconds=12345)
self.assertEqual(parse_duration(duration_iso_string(duration)), duration)
def test_negative(self):
duration = datetime.timedelta(days=-1, hours=1, minutes=3, seconds=5)
self.assertEqual(
parse_duration(duration_iso_string(duration)).total_seconds(),
duration.total_seconds(),
)
| TestParseISODurationRoundtrip |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingFalsy1.py | {
"start": 3998,
"end": 4034
} | class ____(TypedDict):
d1: int
| TD1 |
python | getsentry__sentry | tests/sentry/utils/test_committers.py | {
"start": 816,
"end": 1857
} | class ____(TestCase):
def setUp(self) -> None:
self.repo = Repository.objects.create(
organization_id=self.organization.id, name=self.organization.id
)
def create_commit_author(self, name=None, email=None):
return CommitAuthor.objects.create(
organization_id=self.organization.id,
name=name or f"Test Author {uuid4().hex[:8]}",
email=email or f"test{uuid4().hex[:8]}@example.com",
)
def create_commit(self, author=None):
return Commit.objects.create(
organization_id=self.organization.id,
repository_id=self.repo.id,
key=uuid4().hex,
author=author,
)
def create_commitfilechange(self, commit=None, filename=None, type=None):
return CommitFileChange.objects.create(
organization_id=self.organization.id,
commit_id=(commit or self.create_commit()).id,
filename=filename or "foo.bar",
type=type or "M",
)
| CommitTestCase |
python | openai__openai-python | src/openai/types/responses/response_function_web_search_param.py | {
"start": 1000,
"end": 1346
} | class ____(TypedDict, total=False):
pattern: Required[str]
"""The pattern or text to search for within the page."""
type: Required[Literal["find"]]
"""The action type."""
url: Required[str]
"""The URL of the page searched for the pattern."""
Action: TypeAlias = Union[ActionSearch, ActionOpenPage, ActionFind]
| ActionFind |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 376165,
"end": 376778
} | class ____(VegaLiteSchema):
"""
FacetMapping schema wrapper.
Parameters
----------
column : dict, :class:`FacetFieldDef`
A field definition for the horizontal facet of trellis plots.
row : dict, :class:`FacetFieldDef`
A field definition for the vertical facet of trellis plots.
"""
_schema = {"$ref": "#/definitions/FacetMapping"}
def __init__(
self,
column: Optional[SchemaBase | Map] = Undefined,
row: Optional[SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(column=column, row=row, **kwds)
| FacetMapping |
python | huggingface__transformers | src/transformers/models/x_clip/modeling_x_clip.py | {
"start": 32817,
"end": 36845
} | class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`XCLIPVisionEncoderLayer`].
Args:
config: XCLIPConfig
"""
def __init__(self, config: XCLIPConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([XCLIPVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
| XCLIPVisionEncoder |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/batchtospace_op_test.py | {
"start": 10800,
"end": 12502
} | class ____(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_shape, crops, crops_dtype):
block_shape = np.array(block_shape)
crops = constant_op.constant(
np.array(crops).reshape((len(block_shape), 2)), crops_dtype)
with self.cached_session():
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.batch_to_space_nd(tf_x, block_shape, crops)
epsilon = 1e-5
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
def _compare(self, input_shape, block_shape, crops, crops_dtype):
input_shape = list(input_shape)
input_shape[0] *= np.prod(block_shape)
x = np.random.normal(
0, 1, np.prod(input_shape)).astype(np.float32).reshape(input_shape)
self._checkGrad(x, block_shape, crops, crops_dtype)
# Don't use very large numbers as dimensions here as the result is tensor
# with cartesian product of the dimensions.
@test_util.run_deprecated_v1
def testSmall(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[0, 0], [0, 0]], dtype)
@test_util.run_deprecated_v1
def testSmall2(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([2, 4, 3, 2], [2, 2], [[0, 0], [0, 0]], dtype)
@test_util.run_deprecated_v1
def testSmallCrop1x1(self):
for dtype in [dtypes.int64, dtypes.int32]:
self._compare([1, 2, 3, 5], [2, 2], [[1, 1], [1, 1]], dtype)
if __name__ == "__main__":
test.main()
| BatchToSpaceNDGradientTest |
python | openai__openai-python | src/openai/types/evals/runs/output_item_list_response.py | {
"start": 2165,
"end": 2937
} | class ____(BaseModel):
error: EvalAPIError
"""An object representing an error response from the Eval API."""
finish_reason: str
"""The reason why the sample generation was finished."""
input: List[SampleInput]
"""An array of input messages."""
max_completion_tokens: int
"""The maximum number of tokens allowed for completion."""
model: str
"""The model used for generating the sample."""
output: List[SampleOutput]
"""An array of output messages."""
seed: int
"""The seed used for generating the sample."""
temperature: float
"""The sampling temperature used."""
top_p: float
"""The top_p value used for sampling."""
usage: SampleUsage
"""Token usage details for the sample."""
| Sample |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass11.py | {
"start": 326,
"end": 386
} | class ____(A):
pass
# This should generate an error.
B()
| B |
python | doocs__leetcode | solution/0200-0299/0217.Contains Duplicate/Solution.py | {
"start": 0,
"end": 136
} | class ____:
def containsDuplicate(self, nums: List[int]) -> bool:
return any(a == b for a, b in pairwise(sorted(nums)))
| Solution |
python | readthedocs__readthedocs.org | readthedocs/gold/tests/test_views.py | {
"start": 219,
"end": 1110
} | class ____(PaymentMixin, TestCase):
def setUp(self):
super().setUp()
self.user = get(User)
def test_csp_headers(self):
"""
Test CSP headers aren't altered.
This view originally altered the CSP directives based on whether we were
using the new dashboard. We weren't using inline scripts in this view
however, so this was reverted. The tests remain for now, but aren't
super useful and will break when we change `script-src` in base settings.
"""
self.client.force_login(self.user)
csp_header = "Content-Security-Policy"
script_src_regex = re.compile(r".*\s+script-src [^;]*'unsafe-inline'")
url = reverse("gold_detail")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIsNone(script_src_regex.match(resp[csp_header]))
| TestViews |
python | scikit-learn__scikit-learn | sklearn/model_selection/_plot.py | {
"start": 4163,
"end": 19725
} | class ____(_BaseCurveDisplay):
"""Learning Curve visualization.
It is recommended to use
:meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` to
create a :class:`~sklearn.model_selection.LearningCurveDisplay` instance.
All parameters are stored as attributes.
Read more in the :ref:`User Guide <visualizations>` for general information
about the visualization API and
:ref:`detailed documentation <learning_curve>` regarding the learning
curve visualization.
.. versionadded:: 1.2
Parameters
----------
train_sizes : ndarray of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve.
train_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on test set.
score_name : str, default=None
The name of the score used in `learning_curve`. It will override the name
inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
`negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
string or a callable, we infer the name. We replace `_` by spaces and capitalize
the first letter. We remove `neg_` and replace it by `"Negative"` if
`negate_score` is `False` or just remove it otherwise.
Attributes
----------
ax_ : matplotlib Axes
Axes with the learning curve.
figure_ : matplotlib Figure
Figure containing the learning curve.
errorbar_ : list of matplotlib Artist or None
When the `std_display_style` is `"errorbar"`, this is a list of
`matplotlib.container.ErrorbarContainer` objects. If another style is
used, `errorbar_` is `None`.
lines_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.lines.Line2D` objects corresponding to the mean train and
test scores. If another style is used, `line_` is `None`.
fill_between_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.collections.PolyCollection` objects. If another style is
used, `fill_between_` is `None`.
See Also
--------
sklearn.model_selection.learning_curve : Compute the learning curve.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import LearningCurveDisplay, learning_curve
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> tree = DecisionTreeClassifier(random_state=0)
>>> train_sizes, train_scores, test_scores = learning_curve(
... tree, X, y)
>>> display = LearningCurveDisplay(train_sizes=train_sizes,
... train_scores=train_scores, test_scores=test_scores, score_name="Score")
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(self, *, train_sizes, train_scores, test_scores, score_name=None):
self.train_sizes = train_sizes
self.train_scores = train_scores
self.test_scores = test_scores
self.score_name = score_name
def plot(
self,
ax=None,
*,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
"""
self._plot_curve(
self.train_sizes,
ax=ax,
negate_score=negate_score,
score_name=score_name,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
self.ax_.set_xlabel("Number of samples in the training set")
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
groups=None,
train_sizes=np.linspace(0.1, 1.0, 5),
cv=None,
scoring=None,
exploit_incremental_learning=False,
n_jobs=None,
pre_dispatch="all",
verbose=0,
shuffle=False,
random_state=None,
error_score=np.nan,
fit_params=None,
ax=None,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Create a learning curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <learning_curve>` regarding the learning curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used
to generate the learning curve. If the dtype is float, it is
regarded as a fraction of the maximum size of the training set
(that is determined by the selected validation method), i.e. it has
to be within (0, 1]. Otherwise it is interpreted as absolute sizes
of the training sets. Note that for classification the number of
samples usually have to be big enough to contain at least one
sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
The scoring method to use when calculating the learning curve. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on`train_sizes`.
random_state : int, RandomState instance or None, default=None
Used when `shuffle` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import LearningCurveDisplay
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> tree = DecisionTreeClassifier(random_state=0)
>>> LearningCurveDisplay.from_estimator(tree, X, y)
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
score_name = _validate_score_name(score_name, scoring, negate_score)
train_sizes, train_scores, test_scores = learning_curve(
estimator,
X,
y,
groups=groups,
train_sizes=train_sizes,
cv=cv,
scoring=scoring,
exploit_incremental_learning=exploit_incremental_learning,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
verbose=verbose,
shuffle=shuffle,
random_state=random_state,
error_score=error_score,
return_times=False,
params=fit_params,
)
viz = cls(
train_sizes=train_sizes,
train_scores=train_scores,
test_scores=test_scores,
score_name=score_name,
)
return viz.plot(
ax=ax,
negate_score=negate_score,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
| LearningCurveDisplay |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/ext/association_proxy/association_proxy_two.py | {
"start": 570,
"end": 1022
} | class ____(Base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column(String(64))
kw: Mapped[list[Keyword]] = relationship(
secondary=lambda: user_keyword_table
)
def __init__(self, name: str):
self.name = name
# proxy the 'keyword' attribute from the 'kw' relationship
keywords: AssociationProxy[list[str]] = association_proxy("kw", "keyword")
| User |
python | sympy__sympy | sympy/stats/symbolic_multivariate_probability.py | {
"start": 3799,
"end": 6730
} | class ____(Variance, MatrixExpr):
"""
Variance of a random matrix probability expression. Also known as
Covariance matrix, auto-covariance matrix, dispersion matrix,
or variance-covariance matrix.
Examples
========
>>> from sympy.stats import VarianceMatrix
>>> from sympy.stats.rv import RandomMatrixSymbol
>>> from sympy import symbols, MatrixSymbol
>>> k = symbols("k")
>>> A, B = MatrixSymbol("A", k, k), MatrixSymbol("B", k, k)
>>> X, Y = RandomMatrixSymbol("X", k, 1), RandomMatrixSymbol("Y", k, 1)
>>> VarianceMatrix(X)
VarianceMatrix(X)
>>> VarianceMatrix(X).shape
(k, k)
To expand the variance in its expression, use ``expand()``:
>>> VarianceMatrix(A*X).expand()
A*VarianceMatrix(X)*A.T
>>> VarianceMatrix(A*X + B*Y).expand()
2*A*CrossCovarianceMatrix(X, Y)*B.T + A*VarianceMatrix(X)*A.T + B*VarianceMatrix(Y)*B.T
"""
def __new__(cls, arg, condition=None):
arg = _sympify(arg)
if 1 not in arg.shape:
raise ShapeError("Expression is not a vector")
shape = (arg.shape[0], arg.shape[0]) if arg.shape[1] == 1 else (arg.shape[1], arg.shape[1])
if condition:
obj = Expr.__new__(cls, arg, condition)
else:
obj = Expr.__new__(cls, arg)
obj._shape = shape
obj._condition = condition
return obj
@property
def shape(self):
return self._shape
def expand(self, **hints):
arg = self.args[0]
condition = self._condition
if not is_random(arg):
return ZeroMatrix(*self.shape)
if isinstance(arg, RandomSymbol):
return self
elif isinstance(arg, Add):
rv = []
for a in arg.args:
if is_random(a):
rv.append(a)
variances = Add(*(Variance(xv, condition).expand() for xv in rv))
map_to_covar = lambda x: 2*Covariance(*x, condition=condition).expand()
covariances = Add(*map(map_to_covar, itertools.combinations(rv, 2)))
return variances + covariances
elif isinstance(arg, (Mul, MatMul)):
nonrv = []
rv = []
for a in arg.args:
if is_random(a):
rv.append(a)
else:
nonrv.append(a)
if len(rv) == 0:
return ZeroMatrix(*self.shape)
# Avoid possible infinite loops with MatMul:
if len(nonrv) == 0:
return self
# Variance of many multiple matrix products is not implemented:
if len(rv) > 1:
return self
return Mul.fromiter(nonrv)*Variance(Mul.fromiter(rv),
condition)*(Mul.fromiter(nonrv)).transpose()
# this expression contains a RandomSymbol somehow:
return self
| VarianceMatrix |
python | readthedocs__readthedocs.org | readthedocs/organizations/views/private.py | {
"start": 6072,
"end": 6445
} | class ____(
PrivateViewMixin,
UpdateChangeReasonPostView,
OrganizationTeamView,
DeleteViewWithMessage,
):
http_method_names = ["post"]
success_message = _("Team deleted")
def get_success_url(self):
return reverse_lazy(
"organization_team_list",
args=[self.get_organization().slug],
)
| DeleteOrganizationTeam |
python | django__django | tests/modeladmin/test_checks.py | {
"start": 51459,
"end": 54501
} | class ____(CheckTestCase):
def test_autocomplete_e036(self):
class Admin(ModelAdmin):
autocomplete_fields = "name"
self.assertIsInvalid(
Admin,
Band,
msg="The value of 'autocomplete_fields' must be a list or tuple.",
id="admin.E036",
invalid_obj=Admin,
)
def test_autocomplete_e037(self):
class Admin(ModelAdmin):
autocomplete_fields = ("nonexistent",)
self.assertIsInvalid(
Admin,
ValidationTestModel,
msg=(
"The value of 'autocomplete_fields[0]' refers to 'nonexistent', "
"which is not a field of 'modeladmin.ValidationTestModel'."
),
id="admin.E037",
invalid_obj=Admin,
)
def test_autocomplete_e38(self):
class Admin(ModelAdmin):
autocomplete_fields = ("name",)
self.assertIsInvalid(
Admin,
ValidationTestModel,
msg=(
"The value of 'autocomplete_fields[0]' must be a foreign "
"key or a many-to-many field."
),
id="admin.E038",
invalid_obj=Admin,
)
def test_autocomplete_e039(self):
class Admin(ModelAdmin):
autocomplete_fields = ("band",)
self.assertIsInvalid(
Admin,
Song,
msg=(
'An admin for model "Band" has to be registered '
"to be referenced by Admin.autocomplete_fields."
),
id="admin.E039",
invalid_obj=Admin,
)
def test_autocomplete_e040(self):
class NoSearchFieldsAdmin(ModelAdmin):
pass
class AutocompleteAdmin(ModelAdmin):
autocomplete_fields = ("featuring",)
site = AdminSite()
site.register(Band, NoSearchFieldsAdmin)
self.assertIsInvalid(
AutocompleteAdmin,
Song,
msg=(
'NoSearchFieldsAdmin must define "search_fields", because '
"it's referenced by AutocompleteAdmin.autocomplete_fields."
),
id="admin.E040",
invalid_obj=AutocompleteAdmin,
admin_site=site,
)
def test_autocomplete_is_valid(self):
class SearchFieldsAdmin(ModelAdmin):
search_fields = "name"
class AutocompleteAdmin(ModelAdmin):
autocomplete_fields = ("featuring",)
site = AdminSite()
site.register(Band, SearchFieldsAdmin)
self.assertIsValid(AutocompleteAdmin, Song, admin_site=site)
def test_autocomplete_is_onetoone(self):
class UserAdmin(ModelAdmin):
search_fields = ("name",)
class Admin(ModelAdmin):
autocomplete_fields = ("best_friend",)
site = AdminSite()
site.register(User, UserAdmin)
self.assertIsValid(Admin, ValidationTestModel, admin_site=site)
| AutocompleteFieldsTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_symbol_annotations/lifecycle.py | {
"start": 15109,
"end": 28398
} | class ____:
breaking_version: str
hidden: bool
additional_warn_text: Optional[str]
subject: Optional[str]
@overload
def deprecated(
__obj: T_Annotatable,
*,
breaking_version: str,
additional_warn_text: Optional[str] = ...,
subject: Optional[str] = ...,
emit_runtime_warning: bool = ...,
) -> T_Annotatable: ...
@overload
def deprecated(
__obj: None = ...,
*,
breaking_version: str,
additional_warn_text: Optional[str] = ...,
subject: Optional[str] = ...,
emit_runtime_warning: bool = ...,
) -> Callable[[T_Annotatable], T_Annotatable]: ...
def deprecated(
__obj: Optional[T_Annotatable] = None,
*,
breaking_version: str,
additional_warn_text: Optional[str] = None,
subject: Optional[str] = None,
emit_runtime_warning: bool = True,
) -> Union[T_Annotatable, Callable[[T_Annotatable], T_Annotatable]]:
"""Mark an object as deprecated. This appends some metadata to the object that causes it to be
rendered with a "deprecated" tag and associated warning in the docs.
If `emit_runtime_warning` is True, a warning will also be emitted when the function is called,
having the same text as is displayed in the docs. For consistency between docs and runtime
warnings, this decorator is preferred to manual calls to `deprecation_warning`.
Args:
breaking_version (str): The version at which the deprecated function will be removed.
additional_warn_text (Optional[str]): Additional text to display after the deprecation warning.
Typically this should suggest a newer API.
subject (Optional[str]): The subject of the deprecation warning. Defaults to a string
representation of the decorated object. This is useful when marking usage of
a deprecated API inside an otherwise non-deprecated function, so
that it can be easily cleaned up later. It should only be used with
`emit_runtime_warning=False`, as we don't want to warn users when a
deprecated API is used internally.
emit_runtime_warning (bool): Whether to emit a warning when the function is called.
Usage:
.. code-block:: python
@deprecated(breaking_version="2.0", additional_warn_text="Use my_new_function instead")
def my_deprecated_function(my_arg):
...
@deprecated(breaking_version="2.0", additional_warn_text="Use MyNewClass instead")
class MyDeprecatedClass:
...
@deprecated(breaking_version="2.0", subject="some_deprecated_function", emit_runtime_warning=False)
def not_deprecated_function():
...
some_deprecated_function()
...
"""
if __obj is None:
return partial(
deprecated,
subject=subject,
emit_runtime_warning=emit_runtime_warning,
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
)
else:
target = _get_annotation_target(__obj)
setattr(
target,
_DEPRECATED_ATTR_NAME,
DeprecatedInfo(
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
subject=subject,
hidden=False,
),
)
if emit_runtime_warning:
stack_level = _get_warning_stacklevel(__obj)
subject = subject or _get_subject(__obj)
warning_fn = partial(
deprecation_warning,
subject=subject,
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
stacklevel=stack_level,
)
return apply_pre_call_decorator(__obj, warning_fn)
else:
return __obj
def is_deprecated(obj: Annotatable) -> bool:
target = _get_annotation_target(obj)
return hasattr(target, _DEPRECATED_ATTR_NAME)
def get_deprecated_info(obj: Annotatable) -> DeprecatedInfo:
target = _get_annotation_target(obj)
return getattr(target, _DEPRECATED_ATTR_NAME)
# ########################
# ##### DEPRECATED PARAM
# ########################
_DEPRECATED_PARAM_ATTR_NAME: Final[str] = "_deprecated_params"
@overload
def deprecated_param(
__obj: T_Annotatable,
*,
param: str,
breaking_version: str,
additional_warn_text: Optional[str] = ...,
emit_runtime_warning: bool = ...,
) -> T_Annotatable: ...
@overload
def deprecated_param(
__obj: None = ...,
*,
param: str,
breaking_version: str,
additional_warn_text: Optional[str] = ...,
emit_runtime_warning: bool = ...,
) -> Callable[[T_Annotatable], T_Annotatable]: ...
def deprecated_param(
__obj: Optional[T_Annotatable] = None,
*,
param: str,
breaking_version: str,
additional_warn_text: Optional[str] = None,
emit_runtime_warning: bool = True,
) -> T_Annotatable:
"""Mark a parameter of a class initializer or function/method as deprecated. This appends some
metadata to the decorated object that causes the specified argument to be rendered with a
"deprecated" tag and associated warning in the docs.
If `emit_runtime_warning` is True, a warning will also be emitted when the function is called
and a non-None value is passed for the parameter. For consistency between docs and runtime
warnings, this decorator is preferred to manual calls to `deprecation_warning`. Note that the
warning will only be emitted if the value is passed as a keyword argument.
Args:
param (str): The name of the parameter to deprecate.
breaking_version (str): The version at which the deprecated function will be removed.
additional_warn_text (str): Additional text to display after the deprecation warning.
Typically this should suggest a newer API.
emit_runtime_warning (bool): Whether to emit a warning when the function is called.
hidden (bool): Whether or not this is a hidden parameters. Hidden parameters are only
passed via kwargs and are hidden from the type signature. This makes it so
that this hidden parameter does not appear in typeaheads. In order to provide
high quality error messages we also provide the helper function
only_allow_hidden_params_in_kwargs to ensure there are high quality
error messages if the user passes an unsupported keyword argument.
"""
if __obj is None:
return partial( # type: ignore
deprecated_param,
param=param,
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
emit_runtime_warning=emit_runtime_warning,
)
else:
return attach_deprecation_info_and_wrap(
__obj,
param=param,
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
emit_runtime_warning=emit_runtime_warning,
hidden=False,
)
def _param_is_used(*_, __param: str, **kwargs):
return kwargs.get(__param) is not None
def attach_deprecation_info_and_wrap(
obj: T_Annotatable,
param: str,
breaking_version: str,
additional_warn_text: Optional[str] = None,
emit_runtime_warning: bool = True,
hidden: bool = False,
) -> T_Annotatable:
if not hidden:
check.invariant(
_annotatable_has_param(obj, param),
f"Attempted to mark undefined parameter `{param}` deprecated.",
)
target = _get_annotation_target(obj)
if not hasattr(target, _DEPRECATED_PARAM_ATTR_NAME):
setattr(target, _DEPRECATED_PARAM_ATTR_NAME, {})
getattr(target, _DEPRECATED_PARAM_ATTR_NAME)[param] = DeprecatedInfo(
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
hidden=hidden,
subject=None,
)
if not emit_runtime_warning:
return obj
warning_fn = partial(
deprecation_warning,
_get_subject(obj, param=param),
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
stacklevel=3,
)
return apply_pre_call_decorator(
obj,
warning_fn,
condition=partial(_param_is_used, __param=param),
)
@overload
def hidden_param(
__obj: T_Annotatable,
*,
param: str,
breaking_version: str,
additional_warn_text: Optional[str] = ...,
emit_runtime_warning: bool = ...,
) -> T_Annotatable: ...
@overload
def hidden_param(
__obj: None = ...,
*,
param: str,
breaking_version: str,
additional_warn_text: Optional[str] = ...,
emit_runtime_warning: bool = ...,
) -> Callable[[T_Annotatable], T_Annotatable]: ...
def hidden_param(
__obj: Optional[T_Annotatable] = None,
*,
param: str,
breaking_version: str,
additional_warn_text: Optional[str] = None,
emit_runtime_warning: bool = True,
) -> T_Annotatable:
"""Hidden parameters are only passed via kwargs and are hidden from the
type signature. This makes it so that this hidden parameter does not
appear in typeaheads. In order to provide high quality error messages
we also provide the helper function only_allow_hidden_params_in_kwargs
to ensure there are high quality error messages if the user passes
an unsupported keyword argument.
Args:
breaking_version (str): The version at which the deprecated function will be removed.
additional_warn_text (Optional[str]): Additional text to display after the deprecation warning.
Typically this should suggest a newer API.
subject (Optional[str]): The subject of the deprecation warning. Defaults to a string
representation of the decorated object. This is useful when marking usage of
a deprecated API inside an otherwise non-deprecated function, so
that it can be easily cleaned up later. It should only be used with
`emit_runtime_warning=False`, as we don't want to warn users when a
deprecated API is used internally.
emit_runtime_warning (bool): Whether to emit a warning when the function is called.
Usage:
.. code-block:: python
@hidden_param(breaking_version="2.0", additional_warn_text="Use my_new_function instead")
def func_with_hidden_args(**kwargs):
only_allow_hidden_params_in_kwargs(func_with_hidden_args, kwargs)
"""
if __obj is None:
return partial( # type: ignore
hidden_param,
param=param,
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
emit_runtime_warning=emit_runtime_warning,
)
else:
return attach_deprecation_info_and_wrap(
__obj,
param=param,
breaking_version=breaking_version,
additional_warn_text=additional_warn_text,
emit_runtime_warning=emit_runtime_warning,
hidden=True,
)
def has_deprecated_params(obj: Annotatable) -> bool:
return hasattr(_get_annotation_target(obj), _DEPRECATED_PARAM_ATTR_NAME)
def get_deprecated_params(obj: Annotatable) -> Mapping[str, DeprecatedInfo]:
return getattr(_get_annotation_target(obj), _DEPRECATED_PARAM_ATTR_NAME)
def is_deprecated_param(obj: Annotatable, param_name: str) -> bool:
target = _get_annotation_target(obj)
return param_name in getattr(target, _DEPRECATED_PARAM_ATTR_NAME, {})
def get_deprecated_param_info(obj: Annotatable, param_name: str) -> DeprecatedInfo:
target = _get_annotation_target(obj)
return getattr(target, _DEPRECATED_PARAM_ATTR_NAME)[param_name]
def only_allow_hidden_params_in_kwargs(annotatable: Annotatable, kwargs: Mapping[str, Any]) -> None:
deprecated_params = (
get_deprecated_params(annotatable) if has_deprecated_params(annotatable) else {}
)
for param in kwargs:
if param not in deprecated_params:
raise TypeError(f"{annotatable.__name__} got an unexpected keyword argument '{param}'")
check.invariant(
deprecated_params[param].hidden,
f"Unexpected non-hidden deprecated parameter '{param}' in kwargs. Should never get here.",
)
# ########################
# ##### HELPERS
# ########################
def _get_warning_stacklevel(obj: Annotatable):
"""Get the stacklevel to use for warnings that are attached to a target via decorator.
The goal is to have the warning point to the line where the function in the
underlying object is actually invoked. This isn't straightforward
because some objects have complicated logic in between `__call__` and
the site at which a wrapped function containing the warning is actually
called. Can be determined through trial and error.
"""
if is_resource_def(obj):
return 5
else:
return 3
def _annotatable_has_param(obj: Annotatable, param: str) -> bool:
target_fn = get_decorator_target(obj)
return param in inspect.signature(target_fn).parameters
| DeprecatedInfo |
python | TheAlgorithms__Python | data_structures/queues/priority_queue_using_list.py | {
"start": 201,
"end": 2636
} | class ____:
"""
Tasks can be added to a Priority Queue at any time and in any order but when Tasks
are removed then the Task with the highest priority is removed in FIFO order. In
code we will use three levels of priority with priority zero Tasks being the most
urgent (high priority) and priority 2 tasks being the least urgent.
Examples
>>> fpq = FixedPriorityQueue()
>>> fpq.enqueue(0, 10)
>>> fpq.enqueue(1, 70)
>>> fpq.enqueue(0, 100)
>>> fpq.enqueue(2, 1)
>>> fpq.enqueue(2, 5)
>>> fpq.enqueue(1, 7)
>>> fpq.enqueue(2, 4)
>>> fpq.enqueue(1, 64)
>>> fpq.enqueue(0, 128)
>>> print(fpq)
Priority 0: [10, 100, 128]
Priority 1: [70, 7, 64]
Priority 2: [1, 5, 4]
>>> fpq.dequeue()
10
>>> fpq.dequeue()
100
>>> fpq.dequeue()
128
>>> fpq.dequeue()
70
>>> fpq.dequeue()
7
>>> print(fpq)
Priority 0: []
Priority 1: [64]
Priority 2: [1, 5, 4]
>>> fpq.dequeue()
64
>>> fpq.dequeue()
1
>>> fpq.dequeue()
5
>>> fpq.dequeue()
4
>>> fpq.dequeue()
Traceback (most recent call last):
...
data_structures.queues.priority_queue_using_list.UnderFlowError: All queues are empty
>>> print(fpq)
Priority 0: []
Priority 1: []
Priority 2: []
""" # noqa: E501
def __init__(self):
self.queues = [
[],
[],
[],
]
def enqueue(self, priority: int, data: int) -> None:
"""
Add an element to a queue based on its priority.
If the priority is invalid ValueError is raised.
If the queue is full an OverFlowError is raised.
"""
try:
if len(self.queues[priority]) >= 100:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(data)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def dequeue(self) -> int:
"""
Return the highest priority element in FIFO order.
If the queue is empty then an under flow exception is raised.
"""
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__(self) -> str:
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues))
| FixedPriorityQueue |
python | scipy__scipy | scipy/io/matlab/_mio5.py | {
"start": 30468,
"end": 33989
} | class ____:
''' Class for writing mat5 files '''
@docfiller
def __init__(self, file_stream,
do_compression=False,
unicode_strings=False,
global_vars=None,
long_field_names=False,
oned_as='row'):
''' Initialize writer for matlab 5 format files
Parameters
----------
%(do_compression)s
%(unicode_strings)s
global_vars : None or sequence of strings, optional
Names of variables to be marked as global for matlab
%(long_fields)s
%(oned_as)s
'''
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
# write header
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = (f'MATLAB 5.0 MAT-file Platform: {os.name}, '
f'Created on: {time.asctime()}')
hdr['version'] = 0x0100
hdr['endian_test'] = np.ndarray(shape=(),
dtype='S2',
buffer=np.uint16(0x4d49))
self.file_stream.write(hdr.tobytes())
def put_variables(self, mdict, write_header=None):
''' Write variables in `mdict` to stream
Parameters
----------
mdict : mapping
mapping with method ``items`` returns name, contents pairs where
``name`` which will appear in the matlab workspace in file load, and
``contents`` is something writeable to a matlab file, such as a NumPy
array.
write_header : {None, True, False}, optional
If True, then write the matlab file header before writing the
variables. If None (the default) then write the file header
if we are at position 0 in the stream. By setting False
here, and setting the stream position to the end of the file,
you can append variables to a matlab file
'''
# write header if requested, or None and start of file
if write_header is None:
write_header = self.file_stream.tell() == 0
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for name, var in mdict.items():
if name[0] == '_':
msg = (f"Starting field name with a "
f"underscore ({name}) is ignored")
warnings.warn(msg, MatWriteWarning, stacklevel=2)
continue
is_global = name in self.global_vars
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tobytes())
self.file_stream.write(out_str)
else: # not compressing
self._matrix_writer.write_top(var, name.encode('latin1'), is_global)
| MatFile5Writer |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 74833,
"end": 75379
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = torch.add(x, 5)
x = self.fc2(x)
self.relu = torch.nn.ReLU()
return x
def get_example_inputs(self) -> tuple[Any, ...]:
return (torch.rand(1, 3, 5, 5),)
| ConvReluAddModel |
python | astropy__astropy | astropy/uncertainty/tests/test_functions.py | {
"start": 1124,
"end": 1913
} | class ____(ArraySetup):
def test_concatenate(self):
# Concatenate needs consistent shapes.
db = self.db[np.newaxis]
concat_a_b = np.concatenate((self.da, db), axis=0)
expected_distr = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
assert_array_equal(concat_a_b.distribution, expected_distr)
def test_concatenate_not_all_distribution(self):
concat_c_a = np.concatenate((self.c, self.da), axis=1)
assert isinstance(concat_c_a, Distribution)
c_bcst = np.broadcast_to(
self.c[..., np.newaxis], self.c.shape + (self.da.n_samples,), subok=True
)
expected_distr = np.concatenate((c_bcst, self.a), axis=1)
assert_array_equal(concat_c_a.distribution, expected_distr)
| TestConcatenation |
python | mlflow__mlflow | mlflow/types/chat.py | {
"start": 3998,
"end": 4196
} | class ____(BaseModel):
properties: dict[str, ParamProperty]
type: Literal["object"] = "object"
required: list[str] | None = None
additionalProperties: bool | None = None
| FunctionParams |
python | doocs__leetcode | solution/3200-3299/3249.Count the Number of Good Nodes/Solution.py | {
"start": 0,
"end": 656
} | class ____:
def countGoodNodes(self, edges: List[List[int]]) -> int:
def dfs(a: int, fa: int) -> int:
pre = -1
cnt = ok = 1
for b in g[a]:
if b != fa:
cur = dfs(b, a)
cnt += cur
if pre < 0:
pre = cur
elif pre != cur:
ok = 0
nonlocal ans
ans += ok
return cnt
g = defaultdict(list)
for a, b in edges:
g[a].append(b)
g[b].append(a)
ans = 0
dfs(0, -1)
return ans
| Solution |
python | pydata__xarray | xarray/computation/apply_ufunc.py | {
"start": 1800,
"end": 47494
} | class ____:
"""Core dimensions signature for a given function.
Based on the signature provided by generalized ufuncs in NumPy.
Attributes
----------
input_core_dims : tuple[tuple, ...]
Core dimension names on each input variable.
output_core_dims : tuple[tuple, ...]
Core dimension names on each output variable.
"""
__slots__ = (
"_all_core_dims",
"_all_input_core_dims",
"_all_output_core_dims",
"input_core_dims",
"output_core_dims",
)
def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
self._all_input_core_dims = None
self._all_output_core_dims = None
self._all_core_dims = None
@property
def all_input_core_dims(self):
if self._all_input_core_dims is None:
self._all_input_core_dims = frozenset(
dim for dims in self.input_core_dims for dim in dims
)
return self._all_input_core_dims
@property
def all_output_core_dims(self):
if self._all_output_core_dims is None:
self._all_output_core_dims = frozenset(
dim for dims in self.output_core_dims for dim in dims
)
return self._all_output_core_dims
@property
def all_core_dims(self):
if self._all_core_dims is None:
self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims
return self._all_core_dims
@property
def dims_map(self):
return {
core_dim: f"dim{n}" for n, core_dim in enumerate(sorted(self.all_core_dims))
}
@property
def num_inputs(self):
return len(self.input_core_dims)
@property
def num_outputs(self):
return len(self.output_core_dims)
def __eq__(self, other):
try:
return (
self.input_core_dims == other.input_core_dims
and self.output_core_dims == other.output_core_dims
)
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return f"{type(self).__name__}({list(self.input_core_dims)!r}, {list(self.output_core_dims)!r})"
def __str__(self):
comma_separated = ",".join
lhs = comma_separated(
f"({comma_separated(dims)})" for dims in self.input_core_dims
)
rhs = comma_separated(
f"({comma_separated(dims)})" for dims in self.output_core_dims
)
return f"{lhs}->{rhs}"
def to_gufunc_string(self, exclude_dims=frozenset()):
"""Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
identifiers.
Also creates unique names for input_core_dims contained in exclude_dims.
"""
input_core_dims = [
[self.dims_map[dim] for dim in core_dims]
for core_dims in self.input_core_dims
]
output_core_dims = [
[self.dims_map[dim] for dim in core_dims]
for core_dims in self.output_core_dims
]
# enumerate input_core_dims contained in exclude_dims to make them unique
if exclude_dims:
exclude_dims = [self.dims_map[dim] for dim in exclude_dims]
counter: Counter = Counter()
def _enumerate(dim):
if dim in exclude_dims:
n = counter[dim]
counter.update([dim])
dim = f"{dim}_{n}"
return dim
input_core_dims = [
[_enumerate(dim) for dim in arg] for arg in input_core_dims
]
alt_signature = type(self)(input_core_dims, output_core_dims)
return str(alt_signature)
def _get_coords_list(args: Iterable[Any]) -> list[Coordinates]:
coords_list = []
for arg in args:
try:
coords = arg.coords
except AttributeError:
pass # skip this argument
else:
coords_list.append(coords)
return coords_list
def build_output_coords_and_indexes(
args: Iterable[Any],
signature: _UFuncSignature,
exclude_dims: AbstractSet = frozenset(),
combine_attrs: CombineAttrsOptions = "override",
) -> tuple[list[dict[Any, Variable]], list[dict[Any, Index]]]:
"""Build output coordinates and indexes for an operation.
Parameters
----------
args : Iterable
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : set, optional
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
Dictionaries of Variable and Index objects with merged coordinates.
"""
coords_list = _get_coords_list(args)
if len(coords_list) == 1 and not exclude_dims:
# we can skip the expensive merge
(unpacked_coords,) = coords_list
merged_vars = dict(unpacked_coords.variables)
merged_indexes = dict(unpacked_coords.xindexes)
else:
merged_vars, merged_indexes = merge_coordinates_without_align(
coords_list, exclude_dims=exclude_dims, combine_attrs=combine_attrs
)
output_coords = []
output_indexes = []
for output_dims in signature.output_core_dims:
dropped_dims = signature.all_input_core_dims - set(output_dims)
if dropped_dims:
filtered_coords = {
k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims)
}
filtered_indexes = filter_indexes_from_coords(
merged_indexes, set(filtered_coords)
)
else:
filtered_coords = merged_vars
filtered_indexes = merged_indexes
output_coords.append(filtered_coords)
output_indexes.append(filtered_indexes)
return output_coords, output_indexes
def apply_dataarray_vfunc(
func,
*args,
signature: _UFuncSignature,
join: JoinOptions = "inner",
exclude_dims=frozenset(),
keep_attrs="override",
) -> tuple[DataArray, ...] | DataArray:
"""Apply a variable level function over DataArray, Variable and/or ndarray
objects.
"""
from xarray.core.dataarray import DataArray
if len(args) > 1:
args = tuple(
deep_align(
args,
join=join,
copy=False,
exclude=exclude_dims,
raise_on_invalid=False,
)
)
objs = _all_of_type(args, DataArray)
if keep_attrs == "drop":
name = result_name(args)
else:
first_obj = _first_of_type(args, DataArray)
name = first_obj.name
result_coords, result_indexes = build_output_coords_and_indexes(
args, signature, exclude_dims, combine_attrs=keep_attrs
)
data_vars = [getattr(a, "variable", a) for a in args]
result_var = func(*data_vars)
out: tuple[DataArray, ...] | DataArray
if signature.num_outputs > 1:
out = tuple(
DataArray(
variable, coords=coords, indexes=indexes, name=name, fastpath=True
)
for variable, coords, indexes in zip(
result_var, result_coords, result_indexes, strict=True
)
)
else:
(coords,) = result_coords
(indexes,) = result_indexes
out = DataArray(
result_var, coords=coords, indexes=indexes, name=name, fastpath=True
)
attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs)
if isinstance(out, tuple):
for da in out:
da.attrs = attrs
else:
out.attrs = attrs
return out
def ordered_set_union(all_keys: list[Iterable]) -> Iterable:
return {key: None for keys in all_keys for key in keys}.keys()
def ordered_set_intersection(all_keys: list[Iterable]) -> Iterable:
intersection = set(all_keys[0])
for keys in all_keys[1:]:
intersection.intersection_update(keys)
return [key for key in all_keys[0] if key in intersection]
def assert_and_return_exact_match(all_keys):
first_keys = all_keys[0]
for keys in all_keys[1:]:
if keys != first_keys:
raise ValueError(
"exact match required for all data variable names, "
f"but {list(keys)} != {list(first_keys)}: {set(keys) ^ set(first_keys)} are not in both."
)
return first_keys
_JOINERS: dict[str, Callable] = {
"inner": ordered_set_intersection,
"outer": ordered_set_union,
"left": operator.itemgetter(0),
"right": operator.itemgetter(-1),
"exact": assert_and_return_exact_match,
}
def join_dict_keys(objects: Iterable[Mapping | Any], how: str = "inner") -> Iterable:
joiner = _JOINERS[how]
all_keys = [obj.keys() for obj in objects if hasattr(obj, "keys")]
return joiner(all_keys)
def collect_dict_values(
objects: Iterable[Mapping | Any], keys: Iterable, fill_value: object = None
) -> list[list]:
return [
[obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects]
for key in keys
]
def _as_variables_or_variable(arg) -> Variable | tuple[Variable]:
try:
return arg.variables
except AttributeError:
try:
return arg.variable
except AttributeError:
return arg
def _unpack_dict_tuples(
result_vars: Mapping[Any, tuple[Variable, ...]], num_outputs: int
) -> tuple[dict[Hashable, Variable], ...]:
out: tuple[dict[Hashable, Variable], ...] = tuple({} for _ in range(num_outputs))
for name, values in result_vars.items():
for value, results_dict in zip(values, out, strict=True):
results_dict[name] = value
return out
def _check_core_dims(signature, variable_args, name):
"""
Check if an arg has all the core dims required by the signature.
Slightly awkward design, of returning the error message. But we want to
give a detailed error message, which requires inspecting the variable in
the inner loop.
"""
missing = []
for i, (core_dims, variable_arg) in enumerate(
zip(signature.input_core_dims, variable_args, strict=True)
):
# Check whether all the dims are on the variable. Note that we need the
# `hasattr` to check for a dims property, to protect against the case where
# a numpy array is passed in.
if hasattr(variable_arg, "dims") and set(core_dims) - set(variable_arg.dims):
missing += [[i, variable_arg, core_dims]]
if missing:
message = ""
for i, variable_arg, core_dims in missing:
message += f"Missing core dims {set(core_dims) - set(variable_arg.dims)} from arg number {i + 1} on a variable named `{name}`:\n{variable_arg}\n\n"
message += "Either add the core dimension, or if passing a dataset alternatively pass `on_missing_core_dim` as `copy` or `drop`. "
return message
return True
def apply_dict_of_variables_vfunc(
func,
*args,
signature: _UFuncSignature,
join="inner",
fill_value=None,
on_missing_core_dim: MissingCoreDimOptions = "raise",
):
"""Apply a variable level function over dicts of DataArray, DataArray,
Variable and ndarray objects.
"""
args = tuple(_as_variables_or_variable(arg) for arg in args)
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = {}
for name, variable_args in zip(names, grouped_by_name, strict=True):
core_dim_present = _check_core_dims(signature, variable_args, name)
if core_dim_present is True:
result_vars[name] = func(*variable_args)
elif on_missing_core_dim == "raise":
raise ValueError(core_dim_present)
elif on_missing_core_dim == "copy":
result_vars[name] = variable_args[0]
elif on_missing_core_dim == "drop":
pass
else:
raise ValueError(
f"Invalid value for `on_missing_core_dim`: {on_missing_core_dim!r}"
)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars
def _fast_dataset(
variables: dict[Hashable, Variable],
coord_variables: Mapping[Hashable, Variable],
indexes: dict[Hashable, Index],
) -> Dataset:
"""Create a dataset as quickly as possible.
Beware: the `variables` dict is modified INPLACE.
"""
from xarray.core.dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._construct_direct(variables, coord_names, indexes=indexes)
def apply_dataset_vfunc(
func,
*args,
signature: _UFuncSignature,
join="inner",
dataset_join="exact",
fill_value=_NO_FILL_VALUE,
exclude_dims=frozenset(),
keep_attrs="override",
on_missing_core_dim: MissingCoreDimOptions = "raise",
) -> Dataset | tuple[Dataset, ...]:
"""Apply a variable level function over Dataset, dict of DataArray,
DataArray, Variable and/or ndarray objects.
"""
from xarray.core.dataset import Dataset
if dataset_join not in _JOINS_WITHOUT_FILL_VALUES and fill_value is _NO_FILL_VALUE:
raise TypeError(
"to apply an operation to datasets with different "
"data variables with apply_ufunc, you must supply the "
"dataset_fill_value argument."
)
objs = _all_of_type(args, Dataset)
if len(args) > 1:
args = tuple(
deep_align(
args,
join=join,
copy=False,
exclude=exclude_dims,
raise_on_invalid=False,
)
)
list_of_coords, list_of_indexes = build_output_coords_and_indexes(
args, signature, exclude_dims, combine_attrs=keep_attrs
)
args = tuple(getattr(arg, "data_vars", arg) for arg in args)
result_vars = apply_dict_of_variables_vfunc(
func,
*args,
signature=signature,
join=dataset_join,
fill_value=fill_value,
on_missing_core_dim=on_missing_core_dim,
)
out: Dataset | tuple[Dataset, ...]
if signature.num_outputs > 1:
out = tuple(
itertools.starmap(
_fast_dataset,
zip(result_vars, list_of_coords, list_of_indexes, strict=True),
)
)
else:
(coord_vars,) = list_of_coords
(indexes,) = list_of_indexes
out = _fast_dataset(result_vars, coord_vars, indexes=indexes)
attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs)
if isinstance(out, tuple):
for ds in out:
ds.attrs = attrs
else:
out.attrs = attrs
return out
def _iter_over_selections(obj, dim, values):
"""Iterate over selections of an xarray object in the provided order."""
from xarray.core.groupby import _dummy_copy
dummy = None
for value in values:
try:
obj_sel = obj.sel(**{dim: value})
except (KeyError, IndexError):
if dummy is None:
dummy = _dummy_copy(obj)
obj_sel = dummy
yield obj_sel
def apply_groupby_func(func, *args):
"""Apply a dataset or datarray level function over GroupBy, Dataset,
DataArray, Variable and/or ndarray objects.
"""
from xarray.core.groupby import GroupBy, peek_at
groupbys = [arg for arg in args if isinstance(arg, GroupBy)]
assert groupbys, "must have at least one groupby to iterate over"
first_groupby = groupbys[0]
(grouper,) = first_groupby.groupers
if any(not grouper.group.equals(gb.groupers[0].group) for gb in groupbys[1:]): # type: ignore[union-attr]
raise ValueError(
"apply_ufunc can only perform operations over "
"multiple GroupBy objects at once if they are all "
"grouped the same way"
)
grouped_dim = grouper.name
unique_values = grouper.unique_coord.values
iterators = []
for arg in args:
iterator: Iterator[Any]
if isinstance(arg, GroupBy):
iterator = (value for _, value in arg)
elif hasattr(arg, "dims") and grouped_dim in arg.dims:
if isinstance(arg, Variable):
raise ValueError(
"groupby operations cannot be performed with "
"xarray.Variable objects that share a dimension with "
"the grouped dimension"
)
iterator = _iter_over_selections(arg, grouped_dim, unique_values)
else:
iterator = itertools.repeat(arg)
iterators.append(iterator)
applied: Iterator = itertools.starmap(func, zip(*iterators, strict=False))
applied_example, applied = peek_at(applied)
combine = first_groupby._combine # type: ignore[attr-defined]
if isinstance(applied_example, tuple):
combined = tuple(combine(output) for output in zip(*applied, strict=True))
else:
combined = combine(applied)
return combined
def unified_dim_sizes(
variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()
) -> dict[Hashable, int]:
dim_sizes: dict[Hashable, int] = {}
for var in variables:
if len(set(var.dims)) < len(var.dims):
raise ValueError(
"broadcasting cannot handle duplicate "
f"dimensions on a variable: {list(var.dims)}"
)
for dim, size in zip(var.dims, var.shape, strict=True):
if dim not in exclude_dims:
if dim not in dim_sizes:
dim_sizes[dim] = size
elif dim_sizes[dim] != size:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension "
f"{dim}: {dim_sizes[dim]} vs {size}"
)
return dim_sizes
SLICE_NONE = slice(None)
def broadcast_compat_data(
variable: Variable,
broadcast_dims: tuple[Hashable, ...],
core_dims: tuple[Hashable, ...],
) -> Any:
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError(
"operand to apply_ufunc encountered unexpected "
f"dimensions {unexpected_dims!r} on an input variable: these are core "
"dimensions on other input or output variables"
)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts: list[slice | None] = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
def _vectorize(func, signature, output_dtypes, exclude_dims):
if signature.all_core_dims:
func = np.vectorize(
func,
otypes=output_dtypes,
signature=signature.to_gufunc_string(exclude_dims),
)
else:
func = np.vectorize(func, otypes=output_dtypes)
return func
def apply_variable_ufunc(
func,
*args,
signature: _UFuncSignature,
exclude_dims=frozenset(),
dask="forbidden",
output_dtypes=None,
vectorize=False,
keep_attrs="override",
dask_gufunc_kwargs=None,
) -> Variable | tuple[Variable, ...]:
"""Apply a ndarray level function over Variable and/or ndarray objects."""
from xarray.core.formatting import short_array_repr
from xarray.core.variable import as_compatible_data
dim_sizes = unified_dim_sizes(
(a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims
)
broadcast_dims = tuple(
dim for dim in dim_sizes if dim not in signature.all_core_dims
)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [
(
broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
)
for arg, core_dims in zip(args, signature.input_core_dims, strict=True)
]
if any(is_chunked_array(array) for array in input_data):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a chunked array on an "
"argument, but handling for chunked arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
chunkmanager = get_chunked_array_type(*input_data)
numpy_func = func
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
allow_rechunk = dask_gufunc_kwargs.get("allow_rechunk", None)
if allow_rechunk is None:
for n, (data, core_dims) in enumerate(
zip(input_data, signature.input_core_dims, strict=True)
):
if is_chunked_array(data):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
f"dimension {dim} on {n}th function argument to "
"apply_ufunc with dask='parallelized' consists of "
"multiple chunks, but is also a core dimension. To "
"fix, either rechunk into a single array chunk along "
f"this dimension, i.e., ``.chunk(dict({dim}=-1))``, or "
"pass ``allow_rechunk=True`` in ``dask_gufunc_kwargs`` "
"but beware that this may significantly increase memory usage."
)
dask_gufunc_kwargs["allow_rechunk"] = True
output_sizes = dask_gufunc_kwargs.pop("output_sizes", {})
if output_sizes:
output_sizes_renamed = {}
for key, value in output_sizes.items():
if key not in signature.all_output_core_dims:
raise ValueError(
f"dimension '{key}' in 'output_sizes' must correspond to output_core_dims"
)
output_sizes_renamed[signature.dims_map[key]] = value
dask_gufunc_kwargs["output_sizes"] = output_sizes_renamed
for key in signature.all_output_core_dims:
if (
key not in signature.all_input_core_dims or key in exclude_dims
) and key not in output_sizes:
raise ValueError(
f"dimension '{key}' in 'output_core_dims' needs corresponding (dim, size) in 'output_sizes'"
)
def func(*arrays):
res = chunkmanager.apply_gufunc(
numpy_func,
signature.to_gufunc_string(exclude_dims),
*arrays,
vectorize=vectorize,
output_dtypes=output_dtypes,
**dask_gufunc_kwargs,
)
return res
elif dask == "allowed":
pass
else:
raise ValueError(
f"unknown setting for chunked array handling in apply_ufunc: {dask}"
)
elif vectorize:
func = _vectorize(
func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims
)
result_data = func(*input_data)
if signature.num_outputs == 1:
result_data = (result_data,)
elif (
not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs
):
raise ValueError(
f"applied function does not have the number of "
f"outputs specified in the ufunc signature. "
f"Received a {type(result_data)} with {len(result_data)} elements. "
f"Expected a tuple of {signature.num_outputs} elements:\n\n"
f"{limit_lines(repr(result_data), limit=10)}"
)
objs = _all_of_type(args, Variable)
attrs = merge_attrs(
[obj.attrs for obj in objs],
combine_attrs=keep_attrs,
)
output: list[Variable] = []
for dims, data in zip(output_dims, result_data, strict=True):
data = as_compatible_data(data)
if data.ndim != len(dims):
raise ValueError(
"applied function returned data with an unexpected "
f"number of dimensions. Received {data.ndim} dimension(s) but "
f"expected {len(dims)} dimensions with names {dims!r}, from:\n\n"
f"{short_array_repr(data)}"
)
var = Variable(dims, data, fastpath=True)
for dim, new_size in var.sizes.items():
if dim in dim_sizes and new_size != dim_sizes[dim]:
raise ValueError(
f"size of dimension '{dim}' on inputs was unexpectedly "
f"changed by applied function from {dim_sizes[dim]} to {new_size}. Only "
"dimensions specified in ``exclude_dims`` with "
"xarray.apply_ufunc are allowed to change size. "
"The data returned was:\n\n"
f"{short_array_repr(data)}"
)
var.attrs = attrs
output.append(var)
if signature.num_outputs == 1:
return output[0]
else:
return tuple(output)
def apply_array_ufunc(func, *args, dask="forbidden"):
"""Apply a ndarray level function over ndarray objects."""
if any(is_chunked_array(arg) for arg in args):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
raise ValueError(
"cannot use dask='parallelized' for apply_ufunc "
"unless at least one input is an xarray object"
)
elif dask == "allowed":
pass
else:
raise ValueError(f"unknown setting for dask array handling: {dask}")
return func(*args)
def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Sequence[Sequence] | None = None,
output_core_dims: Sequence[Sequence] | None = ((),),
exclude_dims: AbstractSet = frozenset(),
vectorize: bool = False,
join: JoinOptions = "exact",
dataset_join: str = "exact",
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool | str | None = None,
kwargs: Mapping | None = None,
dask: Literal["forbidden", "allowed", "parallelized"] = "forbidden",
output_dtypes: Sequence | None = None,
output_sizes: Mapping[Any, int] | None = None,
meta: Any = None,
dask_gufunc_kwargs: dict[str, Any] | None = None,
on_missing_core_dim: MissingCoreDimOptions = "raise",
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, \
numpy.ndarray, dask.array.Array or scalar
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : sequence of sequence, optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : list of tuple, optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", "override"} or bool, optional
- 'drop' or False: empty attrs on returned xarray object.
- 'identical': all attrs must be the same on every object.
- 'no_conflicts': attrs from all objects are combined, any that have the same name must also have the same value.
- 'drop_conflicts': attrs from all objects are combined, any that have the same name but different values are dropped.
- 'override' or True: skip comparing and copy attrs from the first object to the result.
kwargs : dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden"
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``. Prefer this option if
``func`` natively supports dask arrays.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output
arguments are supported. Only use this option if ``func`` does not natively
support dask arrays (e.g. converts them to numpy arrays).
dask_gufunc_kwargs : dict, optional
Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if
dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk``
and ``meta``.
output_dtypes : list of dtype, optional
Optional list of output dtypes. Only used if ``dask='parallelized'`` or
``vectorize=True``.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs``
parameter. It will be removed as direct parameter in a future version.
meta : optional
Size-0 object representing the type of array wrapped by dask array. Passed on to
:py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the
``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter
a future version.
on_missing_core_dim : {"raise", "copy", "drop"}, default: "raise"
How to handle missing core dimensions on input variables.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Notes
-----
This function is designed for the more common case where ``func`` can work on numpy
arrays. If ``func`` needs to manipulate a whole xarray object subset to each block
it is possible to use :py:func:`xarray.map_blocks`.
Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x**2 + y**2)
... return xr.apply_ufunc(func, a, b)
...
You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset`
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)> Size: 24B
array([1.41421356, 2.82842712, 4.24264069])
Coordinates:
* x (x) float64 24B 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(3, 4)
np.float64(5.0)
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)> Size: 24B
array([1., 2., 3.])
Coordinates:
* x (x) float64 24B 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension:
>>> def mean(obj, dim):
... # note: apply always moves core dimensions to the end
... return apply_ufunc(
... np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1}
... )
...
Inner product over a specific dimension (like :py:func:`dot`):
>>> def _inner(x, y):
... result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
... return result[..., 0, 0]
...
>>> def inner_product(a, b, dim):
... return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
...
Stack objects along a new dimension (like :py:func:`concat`):
>>> def stack(objects, dim, new_coord):
... # note: this version does not stack coordinates
... func = lambda *x: np.stack(x, axis=-1)
... result = apply_ufunc(
... func,
... *objects,
... output_core_dims=[[dim]],
... join="outer",
... dataset_fill_value=np.nan
... )
... result[dim] = new_coord
... return result
...
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors:
>>> import scipy.stats
>>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"):
... return apply_ufunc(
... scipy.stats.wasserstein_distance,
... first_samples,
... second_samples,
... input_core_dims=[[dim], [dim]],
... vectorize=True,
... )
...
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in ``apply_ufunc``. You may find helper functions such as
:py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also
works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`.
See Also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
dask.array.apply_gufunc
xarray.map_blocks
Notes
-----
:ref:`dask.automatic-parallelization`
User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`.
:doc:`xarray-tutorial:advanced/apply_ufunc/apply_ufunc`
Advanced Tutorial on applying numpy function using :py:func:`apply_ufunc`
References
----------
.. [1] https://numpy.org/doc/stable/reference/ufuncs.html
.. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html
"""
from xarray.core.dataarray import DataArray
from xarray.core.groupby import GroupBy
from xarray.core.variable import Variable
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
elif len(input_core_dims) != len(args):
raise ValueError(
f"input_core_dims must be None or a tuple with the length same to "
f"the number of arguments. "
f"Given {len(input_core_dims)} input_core_dims: {input_core_dims}, "
f" but number of args is {len(args)}."
)
if kwargs is None:
kwargs = {}
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims:
if not isinstance(exclude_dims, set):
raise TypeError(
f"Expected exclude_dims to be a 'set'. Received '{type(exclude_dims).__name__}' instead."
)
if not exclude_dims <= signature.all_core_dims:
raise ValueError(
f"each dimension in `exclude_dims` must also be a "
f"core dimension in the function signature. "
f"Please make {(exclude_dims - signature.all_core_dims)} a core dimension"
)
# handle dask_gufunc_kwargs
if dask == "parallelized":
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
# todo: remove warnings after deprecation cycle
if meta is not None:
warnings.warn(
"``meta`` should be given in the ``dask_gufunc_kwargs`` parameter."
" It will be removed as direct parameter in a future version.",
FutureWarning,
stacklevel=2,
)
dask_gufunc_kwargs.setdefault("meta", meta)
if output_sizes is not None:
warnings.warn(
"``output_sizes`` should be given in the ``dask_gufunc_kwargs`` "
"parameter. It will be removed as direct parameter in a future "
"version.",
FutureWarning,
stacklevel=2,
)
dask_gufunc_kwargs.setdefault("output_sizes", output_sizes)
if kwargs:
if "where" in kwargs and isinstance(kwargs["where"], DataArray):
kwargs["where"] = kwargs["where"].data # type:ignore[index]
func = functools.partial(func, **kwargs)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
if isinstance(keep_attrs, bool):
keep_attrs = "override" if keep_attrs else "drop"
variables_vfunc = functools.partial(
apply_variable_ufunc,
func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
vectorize=vectorize,
output_dtypes=output_dtypes,
dask_gufunc_kwargs=dask_gufunc_kwargs,
)
# feed groupby-apply_ufunc through apply_groupby_func
if any(isinstance(a, GroupBy) for a in args):
this_apply = functools.partial(
apply_ufunc,
func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask,
vectorize=vectorize,
output_dtypes=output_dtypes,
dask_gufunc_kwargs=dask_gufunc_kwargs,
)
return apply_groupby_func(this_apply, *args)
# feed datasets apply_variable_ufunc through apply_dataset_vfunc
elif any(is_dict_like(a) for a in args):
return apply_dataset_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
dataset_join=dataset_join,
fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
on_missing_core_dim=on_missing_core_dim,
)
# feed DataArray apply_variable_ufunc through apply_dataarray_vfunc
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
)
# feed Variables directly through apply_variable_ufunc
elif any(isinstance(a, Variable) for a in args):
return variables_vfunc(*args)
else:
# feed anything else through apply_array_ufunc
return apply_array_ufunc(func, *args, dask=dask)
| _UFuncSignature |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 157961,
"end": 163117
} | class ____(Response):
"""
Response of tasks.delete endpoint.
:param deleted: Indicates whether the task was deleted
:type deleted: bool
:param updated_children: Number of child tasks whose parent property was
updated
:type updated_children: int
:param updated_models: Number of models whose task property was updated
:type updated_models: int
:param events: Response from events.delete_for_task
:type events: dict
:param urls: The urls of the files that were uploaded by this task. Returned if
the 'return_file_urls' was set to 'true'
:type urls: TaskUrls
"""
_service = "tasks"
_action = "delete"
_version = "2.20"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"properties": {
"deleted": {
"description": "Indicates whether the task was deleted",
"type": ["boolean", "null"],
},
"events": {
"additionalProperties": True,
"description": "Response from events.delete_for_task",
"type": ["object", "null"],
},
"updated_children": {
"description": "Number of child tasks whose parent property was updated",
"type": ["integer", "null"],
},
"updated_models": {
"description": "Number of models whose task property was updated",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by this task. Returned if the 'return_file_urls' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self,
deleted: Optional[bool] = None,
updated_children: Optional[int] = None,
updated_models: Optional[int] = None,
events: Optional[dict] = None,
urls: Any = None,
**kwargs: Any
) -> None:
super(DeleteResponse, self).__init__(**kwargs)
self.deleted = deleted
self.updated_children = updated_children
self.updated_models = updated_models
self.events = events
self.urls = urls
@schema_property("deleted")
def deleted(self) -> Optional[bool]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[bool]) -> None:
if value is None:
self._property_deleted = None
return
self.assert_isinstance(value, "deleted", (bool,))
self._property_deleted = value
@schema_property("updated_children")
def updated_children(self) -> Optional[int]:
return self._property_updated_children
@updated_children.setter
def updated_children(self, value: Optional[int]) -> None:
if value is None:
self._property_updated_children = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_children", six.integer_types)
self._property_updated_children = value
@schema_property("updated_models")
def updated_models(self) -> Optional[int]:
return self._property_updated_models
@updated_models.setter
def updated_models(self, value: Optional[int]) -> None:
if value is None:
self._property_updated_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_models", six.integer_types)
self._property_updated_models = value
@schema_property("events")
def events(self) -> Optional[dict]:
return self._property_events
@events.setter
def events(self, value: Optional[dict]) -> None:
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (dict,))
self._property_events = value
@schema_property("urls")
def urls(self) -> Any:
return self._property_urls
@urls.setter
def urls(self, value: Any) -> None:
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
| DeleteResponse |
python | kamyu104__LeetCode-Solutions | Python/count-integers-with-even-digit-sum.py | {
"start": 396,
"end": 782
} | class ____(object):
def countEven(self, num):
"""
:type num: int
:rtype: int
"""
def parity(x):
result = 0
while x:
result += x%10
x //= 10
return result%2
return sum(parity(x) == 0 for x in xrange(1, num+1))
# Time: O(nlogn)
# Space: O(logn)
# brute force
| Solution2 |
python | MongoEngine__mongoengine | mongoengine/base/metaclasses.py | {
"start": 466,
"end": 8999
} | class ____(type):
"""Metaclass for all documents."""
# TODO lower complexity of this method
def __new__(mcs, name, bases, attrs):
flattened_bases = mcs._get_bases(bases)
super_new = super().__new__
# If a base class just call super
metaclass = attrs.get("my_metaclass")
if metaclass and issubclass(metaclass, DocumentMetaclass):
return super_new(mcs, name, bases, attrs)
attrs["_is_document"] = attrs.get("_is_document", False)
attrs["_cached_reference_fields"] = []
# EmbeddedDocuments could have meta data for inheritance
if "meta" in attrs:
attrs["_meta"] = attrs.pop("meta")
# EmbeddedDocuments should inherit meta data
if "_meta" not in attrs:
meta = MetaDict()
for base in flattened_bases[::-1]:
# Add any mixin metadata from plain objects
if hasattr(base, "meta"):
meta.merge(base.meta)
elif hasattr(base, "_meta"):
meta.merge(base._meta)
attrs["_meta"] = meta
attrs["_meta"][
"abstract"
] = False # 789: EmbeddedDocument shouldn't inherit abstract
# If allow_inheritance is True, add a "_cls" string field to the attrs
if attrs["_meta"].get("allow_inheritance"):
StringField = _import_class("StringField")
attrs["_cls"] = StringField()
# Handle document Fields
# Merge all fields from subclasses
doc_fields = {}
for base in flattened_bases[::-1]:
if hasattr(base, "_fields"):
doc_fields.update(base._fields)
# Standard object mixin - merge in any Fields
if not hasattr(base, "_meta"):
base_fields = {}
for attr_name, attr_value in base.__dict__.items():
if not isinstance(attr_value, BaseField):
continue
attr_value.name = attr_name
if not attr_value.db_field:
attr_value.db_field = attr_name
base_fields[attr_name] = attr_value
doc_fields.update(base_fields)
# Discover any document fields
field_names = {}
for attr_name, attr_value in attrs.items():
if not isinstance(attr_value, BaseField):
continue
attr_value.name = attr_name
if not attr_value.db_field:
attr_value.db_field = attr_name
doc_fields[attr_name] = attr_value
# Count names to ensure no db_field redefinitions
field_names[attr_value.db_field] = (
field_names.get(attr_value.db_field, 0) + 1
)
# Ensure no duplicate db_fields
duplicate_db_fields = [k for k, v in field_names.items() if v > 1]
if duplicate_db_fields:
msg = "Multiple db_fields defined for: %s " % ", ".join(duplicate_db_fields)
raise InvalidDocumentError(msg)
# Set _fields and db_field maps
attrs["_fields"] = doc_fields
attrs["_db_field_map"] = {
k: getattr(v, "db_field", k) for k, v in doc_fields.items()
}
attrs["_reverse_db_field_map"] = {
v: k for k, v in attrs["_db_field_map"].items()
}
attrs["_fields_ordered"] = tuple(
i[1]
for i in sorted((v.creation_counter, v.name) for v in doc_fields.values())
)
#
# Set document hierarchy
#
superclasses = ()
class_name = [name]
for base in flattened_bases:
if not getattr(base, "_is_base_cls", True) and not getattr(
base, "_meta", {}
).get("abstract", True):
# Collate hierarchy for _cls and _subclasses
class_name.append(base.__name__)
if hasattr(base, "_meta"):
# Warn if allow_inheritance isn't set and prevent
# inheritance of classes where inheritance is set to False
allow_inheritance = base._meta.get("allow_inheritance")
if not allow_inheritance and not base._meta.get("abstract"):
raise ValueError(
"Document %s may not be subclassed. "
'To enable inheritance, use the "allow_inheritance" meta attribute.'
% base.__name__
)
# Get superclasses from last base superclass
document_bases = [b for b in flattened_bases if hasattr(b, "_class_name")]
if document_bases:
superclasses = document_bases[0]._superclasses
superclasses += (document_bases[0]._class_name,)
_cls = ".".join(reversed(class_name))
attrs["_class_name"] = _cls
attrs["_superclasses"] = superclasses
attrs["_subclasses"] = (_cls,)
attrs["_types"] = attrs["_subclasses"] # TODO depreciate _types
# Create the new_class
new_class = super_new(mcs, name, bases, attrs)
# Set _subclasses
for base in document_bases:
if _cls not in base._subclasses:
base._subclasses += (_cls,)
base._types = base._subclasses # TODO depreciate _types
(
Document,
EmbeddedDocument,
DictField,
CachedReferenceField,
) = mcs._import_classes()
if issubclass(new_class, Document):
new_class._collection = None
# Add class to the _document_registry
_DocumentRegistry.register(new_class)
# Handle delete rules
for field in new_class._fields.values():
f = field
if f.owner_document is None:
f.owner_document = new_class
delete_rule = getattr(f, "reverse_delete_rule", DO_NOTHING)
if isinstance(f, CachedReferenceField):
if issubclass(new_class, EmbeddedDocument):
raise InvalidDocumentError(
"CachedReferenceFields is not allowed in EmbeddedDocuments"
)
if f.auto_sync:
f.start_listener()
f.document_type._cached_reference_fields.append(f)
if isinstance(f, ComplexBaseField) and hasattr(f, "field"):
delete_rule = getattr(f.field, "reverse_delete_rule", DO_NOTHING)
if isinstance(f, DictField) and delete_rule != DO_NOTHING:
msg = (
"Reverse delete rules are not supported "
"for %s (field: %s)" % (field.__class__.__name__, field.name)
)
raise InvalidDocumentError(msg)
f = field.field
if delete_rule != DO_NOTHING:
if issubclass(new_class, EmbeddedDocument):
msg = (
"Reverse delete rules are not supported for "
"EmbeddedDocuments (field: %s)" % field.name
)
raise InvalidDocumentError(msg)
f.document_type.register_delete_rule(new_class, field.name, delete_rule)
if (
field.name
and hasattr(Document, field.name)
and EmbeddedDocument not in new_class.mro()
):
msg = "%s is a document method and not a valid field name" % field.name
raise InvalidDocumentError(msg)
return new_class
@classmethod
def _get_bases(mcs, bases):
if isinstance(bases, BasesTuple):
return bases
seen = []
bases = mcs.__get_bases(bases)
unique_bases = (b for b in bases if not (b in seen or seen.append(b)))
return BasesTuple(unique_bases)
@classmethod
def __get_bases(mcs, bases):
for base in bases:
if base is object:
continue
yield base
yield from mcs.__get_bases(base.__bases__)
@classmethod
def _import_classes(mcs):
Document = _import_class("Document")
EmbeddedDocument = _import_class("EmbeddedDocument")
DictField = _import_class("DictField")
CachedReferenceField = _import_class("CachedReferenceField")
return Document, EmbeddedDocument, DictField, CachedReferenceField
| DocumentMetaclass |
python | keras-team__keras | keras/src/layers/preprocessing/pipeline_test.py | {
"start": 164,
"end": 619
} | class ____(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x, training=False, mask=None):
self.training = training
if mask is not None:
self.received_mask = True
return x
def compute_mask(self, x, mask=None):
return x
def compute_output_shape(self, input_shape):
return input_shape
| CanaryLayer |
python | ray-project__ray | python/ray/llm/_internal/batch/stages/configs.py | {
"start": 1072,
"end": 1374
} | class ____(_StageConfigBase):
model_source: Optional[str] = Field(
default=None, description="Model source/identifier for this stage."
)
chat_template: Optional[str] = Field(default=None)
chat_template_kwargs: Optional[Dict[str, Any]] = Field(default=None)
| ChatTemplateStageConfig |
python | cython__cython | runtests.py | {
"start": 68694,
"end": 75658
} | class ____(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
source_dirs = ['Cython', 'Demos', 'docs', 'pyximport', 'tests']
import pycodestyle
@pycodestyle.register_check
def breakpoint_check(physical_line):
if 'breakpoint()' not in physical_line:
return None
idx = physical_line.find('breakpoint()')
return idx, "Z001 Stray 'breakpoint' call"
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file = os.path.join(os.path.dirname(__file__), "setup.cfg")
total_errors = 0
# checks for .py files
paths = []
for codedir in source_dirs:
paths += glob.iglob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
total_errors += result.total_errors
# checks for non-Python source files
paths = []
for codedir in ['Cython', 'Demos', 'pyximport']: # source_dirs:
paths += glob.iglob(os.path.join(self.cython_dir, codedir + "/**/*.p[yx][xdi]"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file, select=[
'E711',
'E713',
'E714',
#'E501',
'W291',
'W292',
'E502',
'E703',
# whitespace
'W1',
'W2',
'W3',
#'E211',
'E223',
'E224',
#'E227',
'E228',
'E242',
#'E261',
'E273',
'E274',
#'E275',
# indentation
'E101',
'E111',
'E112',
#'E113',
'E117',
'E121',
'E125',
'E129',
])
print("") # Fix the first line of the report.
result = style.check_files(paths)
total_errors += result.total_errors
# checks for non-Python test source files
paths = []
for codedir in ['tests']: # source_dirs:
paths += glob.iglob(os.path.join(self.cython_dir, codedir + "/**/*.p[yx][xdi]"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file, select=[
#'E711',
#'E713',
#'E714',
#'E501',
#'E502',
#'E703',
# whitespace
#'W1',
#'W2',
#'W3',
#'W291',
'W292',
#'E211',
'E223',
'E224',
#'E227',
#'E228',
'E242',
#'E261',
'E273',
'E274',
#'E275',
# indentation
'E101',
#'E111',
'E112',
'E113',
#'E117',
#'E121',
#'E125',
#'E129',
],
exclude=[
"*badindent*",
"*tabspace*",
],
)
print("") # Fix the first line of the report.
result = style.check_files(paths)
total_errors += result.total_errors
self.assertEqual(total_errors, 0, "Found code style errors.")
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
from importlib import import_module
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = import_module(modulename)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
excludelist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in excludelist)
import doctest
from importlib import import_module
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = import_module(modulename)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
| TestCodeFormat |
python | django__django | tests/invalid_models_tests/test_relative_fields.py | {
"start": 45622,
"end": 48639
} | class ____(SimpleTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey("Target", models.CASCADE),
)
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey("Another", models.CASCADE),
relative=models.ForeignKey("Target", models.CASCADE),
)
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField("Another"),
relative=models.ForeignKey("Target", models.CASCADE),
)
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(), relative=models.ManyToManyField("Target")
)
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey("Another", models.CASCADE),
relative=models.ManyToManyField("Target"),
)
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField("Another"),
relative=models.ManyToManyField("Target"),
)
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
self.assertEqual(
Model.check(),
[
Error(
"Reverse query name for 'invalid_models_tests.Model.rel' "
"clashes with field name 'invalid_models_tests.Target.model'.",
hint=(
"Rename field 'invalid_models_tests.Target.model', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.rel'."
),
obj=Model._meta.get_field("rel"),
id="fields.E303",
),
],
)
@modify_settings(INSTALLED_APPS={"append": "basic"})
@isolate_apps("basic", "invalid_models_tests")
def test_no_clash_across_apps_without_accessor(self):
class Target(models.Model):
class Meta:
app_label = "invalid_models_tests"
class Model(models.Model):
m2m = models.ManyToManyField(Target, related_name="+")
class Meta:
app_label = "basic"
def _test():
# Define model with the same name.
class Model(models.Model):
m2m = models.ManyToManyField(Target, related_name="+")
class Meta:
app_label = "invalid_models_tests"
self.assertEqual(Model.check(), [])
_test()
self.assertEqual(Model.check(), [])
@isolate_apps("invalid_models_tests")
| ReverseQueryNameClashTests |
python | TheAlgorithms__Python | data_structures/trie/trie.py | {
"start": 340,
"end": 3614
} | class ____:
def __init__(self) -> None:
self.nodes: dict[str, TrieNode] = {} # Mapping from char to TrieNode
self.is_leaf = False
def insert_many(self, words: list[str]) -> None:
"""
Inserts a list of words into the Trie
:param words: list of string words
:return: None
"""
for word in words:
self.insert(word)
def insert(self, word: str) -> None:
"""
Inserts a word into the Trie
:param word: word to be inserted
:return: None
"""
curr = self
for char in word:
if char not in curr.nodes:
curr.nodes[char] = TrieNode()
curr = curr.nodes[char]
curr.is_leaf = True
def find(self, word: str) -> bool:
"""
Tries to find word in a Trie
:param word: word to look for
:return: Returns True if word is found, False otherwise
"""
curr = self
for char in word:
if char not in curr.nodes:
return False
curr = curr.nodes[char]
return curr.is_leaf
def delete(self, word: str) -> None:
"""
Deletes a word in a Trie
:param word: word to delete
:return: None
"""
def _delete(curr: TrieNode, word: str, index: int) -> bool:
if index == len(word):
# If word does not exist
if not curr.is_leaf:
return False
curr.is_leaf = False
return len(curr.nodes) == 0
char = word[index]
char_node = curr.nodes.get(char)
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
delete_curr = _delete(char_node, word, index + 1)
if delete_curr:
del curr.nodes[char]
return len(curr.nodes) == 0
return delete_curr
_delete(self, word, 0)
def print_words(node: TrieNode, word: str) -> None:
"""
Prints all the words in a Trie
:param node: root node of Trie
:param word: Word variable should be empty at start
:return: None
"""
if node.is_leaf:
print(word, end=" ")
for key, value in node.nodes.items():
print_words(value, word + key)
def test_trie() -> bool:
words = "banana bananas bandana band apple all beast".split()
root = TrieNode()
root.insert_many(words)
# print_words(root, "")
assert all(root.find(word) for word in words)
assert root.find("banana")
assert not root.find("bandanas")
assert not root.find("apps")
assert root.find("apple")
assert root.find("all")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def print_results(msg: str, passes: bool) -> None:
print(str(msg), "works!" if passes else "doesn't work :(")
def pytests() -> None:
assert test_trie()
def main() -> None:
"""
>>> pytests()
"""
print_results("Testing trie functionality", test_trie())
if __name__ == "__main__":
main()
| TrieNode |
python | scikit-learn__scikit-learn | sklearn/feature_selection/_univariate_selection.py | {
"start": 28332,
"end": 31023
} | class ____(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
alpha : float, default=5e-2
Features with p-values less than `alpha` are selected.
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
chi2 : Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
f_regression : F-value between label/feature for regression tasks.
mutual_info_regression : Mutual information for a continuous target.
SelectPercentile : Select features based on percentile of the highest
scores.
SelectKBest : Select features based on the k highest scores.
SelectFdr : Select features based on an estimated false discovery rate.
SelectFwe : Select features based on family-wise error rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFpr, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 16)
"""
_parameter_constraints: dict = {
**_BaseFilter._parameter_constraints,
"alpha": [Interval(Real, 0, 1, closed="both")],
}
def __init__(self, score_func=f_classif, *, alpha=5e-2):
super().__init__(score_func=score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self)
return self.pvalues_ < self.alpha
| SelectFpr |
python | getsentry__sentry | tests/sentry/sudo/test_utils.py | {
"start": 1918,
"end": 3571
} | class ____(BaseTestCase):
def test_untouched(self) -> None:
self.assertFalse(has_sudo_privileges(self.request))
def test_granted(self) -> None:
self.login()
grant_sudo_privileges(self.request)
self.assertTrue(has_sudo_privileges(self.request))
def test_revoked(self) -> None:
self.login()
grant_sudo_privileges(self.request)
revoke_sudo_privileges(self.request)
self.assertFalse(has_sudo_privileges(self.request))
def test_cookie_and_token_match(self) -> None:
self.login()
def get_signed_cookie(key, salt="", max_age=None) -> str:
return "abc123"
self.request.session[COOKIE_NAME] = "abc123"
self.request.get_signed_cookie = get_signed_cookie
self.assertTrue(has_sudo_privileges(self.request))
def test_cookie_and_token_mismatch(self) -> None:
self.login()
def get_signed_cookie(key, salt="", max_age=None) -> str:
return "nope"
self.request.session[COOKIE_NAME] = "abc123"
self.request.get_signed_cookie = get_signed_cookie
self.assertFalse(has_sudo_privileges(self.request))
def test_cookie_bad_signature(self) -> None:
self.login()
def get_signed_cookie(key, salt="", max_age=None):
raise BadSignature
self.request.session[COOKIE_NAME] = "abc123"
self.request.get_signed_cookie = get_signed_cookie
self.assertFalse(has_sudo_privileges(self.request))
def test_missing_keys(self) -> None:
self.login()
self.assertFalse(has_sudo_privileges(self.request))
| HasSudoPrivilegesTestCase |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-path-with-edge-reversals.py | {
"start": 79,
"end": 977
} | class ____(object):
def minCost(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
def dijkstra():
best = [float("inf")]*len(adj)
best[0] = 0
min_heap = [(best[0], 0)]
while min_heap:
curr, u = heapq.heappop(min_heap)
if curr != best[u]:
continue
if u == len(adj)-1:
return curr
for v, w in adj[u]:
if not (best[v] > curr+w):
continue
best[v] = curr+w
heapq.heappush(min_heap, (best[v], v))
return -1
adj = [[] for _ in xrange(n)]
for u, v, w in edges:
adj[u].append((v, w))
adj[v].append((u, 2*w))
return dijkstra()
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/athena/resources.py | {
"start": 3669,
"end": 3776
} | class ____(AthenaClient):
"""This class was used by the function-style Athena resource."""
| AthenaResource |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 119779,
"end": 119987
} | class ____(Double[_N]):
"""The SQL DOUBLE type.
.. versionadded:: 2.0
.. seealso::
:class:`_types.Double` - documentation for the base type.
"""
__visit_name__ = "DOUBLE"
| DOUBLE |
python | pytorch__pytorch | torch/nn/modules/padding.py | {
"start": 21808,
"end": 23907
} | class ____(_ReplicationPadNd):
r"""Pads the input tensor using replication of the input boundary.
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
:math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
Note that the output dimensions must remain positive.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
:math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
:math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
Examples::
>>> m = nn.ReplicationPad2d(2)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
>>> input
tensor([[[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]]])
>>> m(input)
tensor([[[[0., 0., 0., 1., 2., 2., 2.],
[0., 0., 0., 1., 2., 2., 2.],
[0., 0., 0., 1., 2., 2., 2.],
[3., 3., 3., 4., 5., 5., 5.],
[6., 6., 6., 7., 8., 8., 8.],
[6., 6., 6., 7., 8., 8., 8.],
[6., 6., 6., 7., 8., 8., 8.]]]])
>>> # using different paddings for different sides
>>> m = nn.ReplicationPad2d((1, 1, 2, 0))
>>> m(input)
tensor([[[[0., 0., 1., 2., 2.],
[0., 0., 1., 2., 2.],
[0., 0., 1., 2., 2.],
[3., 3., 4., 5., 5.],
[6., 6., 7., 8., 8.]]]])
"""
# pyrefly: ignore [bad-override]
padding: tuple[int, int, int, int]
def __init__(self, padding: _size_4_t) -> None:
super().__init__()
self.padding = _quadruple(padding)
| ReplicationPad2d |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_trace.py | {
"start": 5417,
"end": 5526
} | class ____(TypedDict):
orphan_errors: list[TraceError]
transactions: list[FullResponse]
| SerializedTrace |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_resource_pool.py | {
"start": 383,
"end": 7818
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'generation': 'int',
'name': 'str',
'resource_slice_count': 'int'
}
attribute_map = {
'generation': 'generation',
'name': 'name',
'resource_slice_count': 'resourceSliceCount'
}
def __init__(self, generation=None, name=None, resource_slice_count=None, local_vars_configuration=None): # noqa: E501
"""V1beta1ResourcePool - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._generation = None
self._name = None
self._resource_slice_count = None
self.discriminator = None
self.generation = generation
self.name = name
self.resource_slice_count = resource_slice_count
@property
def generation(self):
"""Gets the generation of this V1beta1ResourcePool. # noqa: E501
Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted. Combined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state. # noqa: E501
:return: The generation of this V1beta1ResourcePool. # noqa: E501
:rtype: int
"""
return self._generation
@generation.setter
def generation(self, generation):
"""Sets the generation of this V1beta1ResourcePool.
Generation tracks the change in a pool over time. Whenever a driver changes something about one or more of the resources in a pool, it must change the generation in all ResourceSlices which are part of that pool. Consumers of ResourceSlices should only consider resources from the pool with the highest generation number. The generation may be reset by drivers, which should be fine for consumers, assuming that all ResourceSlices in a pool are updated to match or deleted. Combined with ResourceSliceCount, this mechanism enables consumers to detect pools which are comprised of multiple ResourceSlices and are in an incomplete state. # noqa: E501
:param generation: The generation of this V1beta1ResourcePool. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and generation is None: # noqa: E501
raise ValueError("Invalid value for `generation`, must not be `None`") # noqa: E501
self._generation = generation
@property
def name(self):
"""Gets the name of this V1beta1ResourcePool. # noqa: E501
Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required. It must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable. # noqa: E501
:return: The name of this V1beta1ResourcePool. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta1ResourcePool.
Name is used to identify the pool. For node-local devices, this is often the node name, but this is not required. It must not be longer than 253 characters and must consist of one or more DNS sub-domains separated by slashes. This field is immutable. # noqa: E501
:param name: The name of this V1beta1ResourcePool. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def resource_slice_count(self):
"""Gets the resource_slice_count of this V1beta1ResourcePool. # noqa: E501
ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero. Consumers can use this to check whether they have seen all ResourceSlices belonging to the same pool. # noqa: E501
:return: The resource_slice_count of this V1beta1ResourcePool. # noqa: E501
:rtype: int
"""
return self._resource_slice_count
@resource_slice_count.setter
def resource_slice_count(self, resource_slice_count):
"""Sets the resource_slice_count of this V1beta1ResourcePool.
ResourceSliceCount is the total number of ResourceSlices in the pool at this generation number. Must be greater than zero. Consumers can use this to check whether they have seen all ResourceSlices belonging to the same pool. # noqa: E501
:param resource_slice_count: The resource_slice_count of this V1beta1ResourcePool. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and resource_slice_count is None: # noqa: E501
raise ValueError("Invalid value for `resource_slice_count`, must not be `None`") # noqa: E501
self._resource_slice_count = resource_slice_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1ResourcePool):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1ResourcePool):
return True
return self.to_dict() != other.to_dict()
| V1beta1ResourcePool |
python | lazyprogrammer__machine_learning_examples | cnn_class2/tf_resnet_first_layers.py | {
"start": 1854,
"end": 1960
} | class ____:
def forward(self, X):
return tf.nn.relu(X)
def get_params(self):
return []
| ReLULayer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/bigquery_to_mysql.py | {
"start": 1198,
"end": 3662
} | class ____(BigQueryToSqlBaseOperator):
"""
Fetch data from a BigQuery table (alternatively fetch selected columns) and insert it into a MySQL table.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryToMySqlOperator`
:param mysql_table: target MySQL table, use dot notation to target a
specific database. It is deprecated: use target_table_name instead. (templated)
:param target_table_name: target MySQL table. It takes precedence over mysql_table. (templated)
:param mysql_conn_id: Reference to :ref:`mysql connection id <howto/connection:mysql>`.
.. warning::
The `mysql_table` parameter has been deprecated. Use `target_table_name` instead.
"""
template_fields: Sequence[str] = (*BigQueryToSqlBaseOperator.template_fields, "dataset_id", "table_id")
def __init__(
self,
*,
mysql_table: str | None = None,
target_table_name: str | None = None,
mysql_conn_id: str = "mysql_default",
dataset_id: str | None = None,
table_id: str | None = None,
**kwargs,
) -> None:
if mysql_table is not None:
warnings.warn(
"The `mysql_table` parameter has been deprecated. Use `target_table_name` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
if target_table_name is not None:
raise ValueError(
f"Cannot set both arguments: mysql_table={mysql_table!r} and "
f"target_table_name={target_table_name!r}."
)
target_table_name = mysql_table
super().__init__(
target_table_name=target_table_name, dataset_id=dataset_id, table_id=table_id, **kwargs
)
self.mysql_conn_id = mysql_conn_id
@cached_property
def mysql_hook(self) -> MySqlHook:
return MySqlHook(schema=self.database, mysql_conn_id=self.mysql_conn_id)
def get_sql_hook(self) -> MySqlHook:
return self.mysql_hook
def execute(self, context):
# Set source_project_dataset_table here, after hooks are initialized and project_id is available
project_id = self.bigquery_hook.project_id
self.source_project_dataset_table = f"{project_id}.{self.dataset_id}.{self.table_id}"
return super().execute(context)
| BigQueryToMySqlOperator |
python | google__pytype | pytype_extensions/instrumentation_for_testing_test.py | {
"start": 224,
"end": 357
} | class ____:
def __init__(self):
raise RuntimeError("Meant to be inaccessible")
def Mul100(self, i):
return i * 100
| NoCtor |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofworkv2.py | {
"start": 27527,
"end": 43578
} | class ____(UOWTest):
def teardown_test(self):
engines.testing_reaper.rollback_all()
# mysql can't handle delete from nodes
# since it doesn't deal with the FKs correctly,
# so wipe out the parent_id first
with testing.db.begin() as conn:
conn.execute(self.tables.nodes.update().values(parent_id=None))
def test_one_to_many_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node, nodes, properties={"children": relationship(Node)}
)
sess = fixture_session()
n2, n3 = Node(data="n2"), Node(data="n3")
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
{"parent_id": None, "data": "n1"},
),
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data) RETURNING nodes.id",
lambda ctx: [
{"parent_id": n1.id, "data": "n2"},
{"parent_id": n1.id, "data": "n3"},
],
),
],
[
AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n2"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n3"},
),
),
],
),
)
def test_one_to_many_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node, nodes, properties={"children": relationship(Node)}
)
sess = fixture_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
sess.flush()
sess.delete(n1)
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n2.id}, {"id": n3.id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_one_to_many_delete_parent(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node, nodes, properties={"children": relationship(Node)}
)
sess = fixture_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2, n3])
sess.add(n1)
sess.flush()
sess.delete(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE nodes SET parent_id=:parent_id "
"WHERE nodes.id = :nodes_id",
lambda ctx: [
{"nodes_id": n3.id, "parent_id": None},
{"nodes_id": n2.id, "parent_id": None},
],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_many_to_one_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
sess = fixture_session()
n1 = Node(data="n1")
n2, n3 = Node(data="n2", parent=n1), Node(data="n3", parent=n1)
sess.add_all([n2, n3])
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
{"parent_id": None, "data": "n1"},
),
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data) RETURNING nodes.id",
lambda ctx: [
{"parent_id": n1.id, "data": "n2"},
{"parent_id": n1.id, "data": "n3"},
],
),
],
[
AllOf(
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n2"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n3"},
),
),
],
),
)
def test_many_to_one_delete_all(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
sess = fixture_session()
n1 = Node(data="n1")
n2, n3 = Node(data="n2", parent=n1), Node(data="n3", parent=n1)
sess.add_all([n2, n3])
sess.flush()
sess.delete(n1)
sess.delete(n2)
sess.delete(n3)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": n2.id}, {"id": n3.id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": n1.id},
),
)
def test_many_to_one_set_null_unloaded(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
with fixture_session() as sess:
n1 = Node(data="n1")
n2 = Node(data="n2", parent=n1)
sess.add_all([n1, n2])
sess.commit()
with fixture_session() as sess:
n2 = sess.query(Node).filter_by(data="n2").one()
n2.parent = None
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"UPDATE nodes SET parent_id=:parent_id WHERE "
"nodes.id = :nodes_id",
lambda ctx: {"parent_id": None, "nodes_id": n2.id},
),
)
def test_cycle_rowswitch(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node, nodes, properties={"children": relationship(Node)}
)
sess = fixture_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2])
sess.add(n1)
sess.flush()
sess.delete(n2)
n3.id = n2.id
n1.children.append(n3)
sess.flush()
def test_bidirectional_mutations_one(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node, backref=backref("parent", remote_side=nodes.c.id)
)
},
)
sess = fixture_session()
n2, n3 = Node(data="n2", children=[]), Node(data="n3", children=[])
n1 = Node(data="n1", children=[n2])
sess.add(n1)
sess.flush()
sess.delete(n2)
n1.children.append(n3)
sess.flush()
sess.delete(n1)
sess.delete(n3)
sess.flush()
def test_bidirectional_multilevel_save(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node, backref=backref("parent", remote_side=nodes.c.id)
)
},
)
sess = fixture_session()
n1 = Node(data="n1")
n1.children.append(Node(data="n11"))
n12 = Node(data="n12")
n1.children.append(n12)
n1.children.append(Node(data="n13"))
n1.children[1].children.append(Node(data="n121"))
n1.children[1].children.append(Node(data="n122"))
n1.children[1].children.append(Node(data="n123"))
sess.add(n1)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": None, "data": "n1"},
),
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data) RETURNING nodes.id",
lambda ctx: [
{"parent_id": n1.id, "data": "n11"},
{"parent_id": n1.id, "data": "n12"},
{"parent_id": n1.id, "data": "n13"},
],
),
],
[
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n11"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n12"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n1.id, "data": "n13"},
),
],
),
Conditional(
testing.db.dialect.insert_executemany_returning,
[
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data) RETURNING nodes.id",
lambda ctx: [
{"parent_id": n12.id, "data": "n121"},
{"parent_id": n12.id, "data": "n122"},
{"parent_id": n12.id, "data": "n123"},
],
),
],
[
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n121"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n122"},
),
CompiledSQL(
"INSERT INTO nodes (parent_id, data) VALUES "
"(:parent_id, :data)",
lambda ctx: {"parent_id": n12.id, "data": "n123"},
),
],
),
)
def test_singlecycle_flush_size(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node, nodes, properties={"children": relationship(Node)}
)
with fixture_session() as sess:
n1 = Node(data="ed")
sess.add(n1)
self._assert_uow_size(sess, 2)
sess.flush()
n1.data = "jack"
self._assert_uow_size(sess, 2)
sess.flush()
n2 = Node(data="foo")
sess.add(n2)
sess.flush()
n1.children.append(n2)
self._assert_uow_size(sess, 3)
sess.commit()
sess = fixture_session(autoflush=False)
n1 = sess.query(Node).first()
n1.data = "ed"
self._assert_uow_size(sess, 2)
n1.children
self._assert_uow_size(sess, 2)
def test_delete_unloaded_m2o(self):
Node, nodes = self.classes.Node, self.tables.nodes
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={"parent": relationship(Node, remote_side=nodes.c.id)},
)
parent = Node()
c1, c2 = Node(parent=parent), Node(parent=parent)
session = fixture_session()
session.add_all([c1, c2])
session.add(parent)
session.flush()
pid = parent.id
c1id = c1.id
c2id = c2.id
session.expire(parent)
session.expire(c1)
session.expire(c2)
session.delete(c1)
session.delete(c2)
session.delete(parent)
# testing that relationships
# are loaded even if all ids/references are
# expired
self.assert_sql_execution(
testing.db,
session.flush,
AllOf(
# ensure all three m2os are loaded.
# the selects here are in fact unexpiring
# each row - the m2o comes from the identity map.
CompiledSQL(
"SELECT nodes.id, nodes.parent_id, nodes.data "
"FROM nodes "
"WHERE nodes.id = :pk_1",
lambda ctx: {"pk_1": pid},
),
CompiledSQL(
"SELECT nodes.id, nodes.parent_id, nodes.data "
"FROM nodes "
"WHERE nodes.id = :pk_1",
lambda ctx: {"pk_1": c1id},
),
CompiledSQL(
"SELECT nodes.id, nodes.parent_id, nodes.data "
"FROM nodes "
"WHERE nodes.id = :pk_1",
lambda ctx: {"pk_1": c2id},
),
AllOf(
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: [{"id": c1id}, {"id": c2id}],
),
CompiledSQL(
"DELETE FROM nodes WHERE nodes.id = :id",
lambda ctx: {"id": pid},
),
),
),
)
| SingleCycleTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.