language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/_inductor/codegen/multi_kernel.py | {
"start": 11316,
"end": 18906
} | class ____:
"""
This class is called at run time to actually run the kernel
"""
def __init__(self, multi_kernel_name, kernels, arg_index):
assert len(kernels) >= 1
self._kernels = kernels
self.multi_kernel_name = multi_kernel_name
self.disable_cache = os.environ.get(
"TORCHINDUCTOR_DISABLE_MULTI_KERNEL_CACHE"
) == "1" or is_metric_table_enabled("persistent_red_perf")
self.picked_kernel = None
self.arg_index = arg_index
if config.triton.multi_kernel > 1:
# manually force a subkernel to ease perf testing
picked_by_config = config.triton.multi_kernel - 2
assert picked_by_config < len(self._kernels)
# pyrefly: ignore [bad-assignment]
self.picked_kernel = picked_by_config
elif not self.disable_cache:
self.load_cache()
self._recorded = False
def cache_file_path(self):
key = code_hash(
",".join(
[
f"{k.fn.cache_key}{k.size_hints!r}{k.triton_meta!r}"
for k in self.kernels
]
)
)
_, _, path = get_path(key, "picked_kernel")
return pathlib.Path(path)
def load_cache(self):
assert self.picked_kernel is None
path = self.cache_file_path()
if path.exists():
with path.open() as fd:
# pyrefly: ignore [bad-assignment]
self.picked_kernel = int(fd.read())
# pyrefly: ignore [unsupported-operation]
assert self.picked_kernel >= 0 and self.picked_kernel < len(
self._kernels
)
log.debug(
"Load picked kernel %d from cache file %s", self.picked_kernel, path
)
def store_cache(self):
assert self.picked_kernel is not None
path = self.cache_file_path()
path.parent.mkdir(parents=True, exist_ok=True)
write_atomic(path, str(self.picked_kernel))
log.debug("Store picked kernel %d to cache file %s", self.picked_kernel, path)
@property
def kernels(self):
"""
Read results from future.
This should be called after parallel compilation is done.
In case you call this before compilation is done,
it may slow down the parallel compilation.
"""
for i, kernel in enumerate(self._kernels):
if isinstance(kernel, CodeCacheFuture):
self._kernels[i] = kernel.result()
return self._kernels
def benchmark_sub_kernels(self, *args, **kwargs):
"""
Benchmark all the sub kernels and return the execution time
(in milliseconds) for each of time.
Unit test may mock this method to force a specific kernel to
be picked.
"""
def wrap_fn(kernel, index):
def inner():
filtered_args = self._get_filtered_args(args, index)
args_clone, kwargs_clone = kernel.clone_args(*filtered_args, **kwargs)
return kernel.run(*args_clone, **kwargs_clone)
return inner
return [
benchmarker.benchmark(
wrap_fn(kernel, index),
# Currently the kernel type must be a CachingAutotuner
device=kernel.device_props.type,
rep=40,
)
for index, kernel in enumerate(self.kernels)
]
def _get_filtered_args(self, args, index):
"""
We pass in all arguments to all kernels into the MultiKernelCall
so when invoking a particular kernel we need to filter to only the
arguments for that specific kernel.
"""
# This is sometimes invoked at runtime where V.graph is
# a NullHandler
if hasattr(V.graph, "cpp_wrapper") and V.graph.cpp_wrapper:
# for cpp-wrapper, we should not filter the args since
# we already have chosen a single kernel and arg set.
return args
return [item for s in self.arg_index[index] for item in args[s]]
# record_choice and lookup_choice are helper functions for cpp-wrapper
# codegen. The first pass use record_choice to keep the choice and
# the second pass do lookup by calling lookup_choice.
#
# An alternative that reused the multi-kernel cache does not work well
# since during codegen of the second pass, it's very hard to know the
# path for the cache file. Also reading the cache file need do some IO
# which can be slower.
@staticmethod
def record_choice(multi_kernel_name: str, picked_kernel_name: str):
"""
Record the multi-kernel choice for cpp-wrapper after autotuning
We should do nothing if this function is not called during codegen.
"""
from torch._inductor.graph import GraphLowering
if not isinstance(V.graph, GraphLowering):
return
if not V.graph.record_multi_kernel_choice:
return
V.graph.multi_kernel_to_choice[multi_kernel_name] = picked_kernel_name
@staticmethod
def lookup_choice(multi_kernel_name: str) -> str:
# this should always been done during cpp-wrapper codegen
assert (
V.graph.record_multi_kernel_choice
and multi_kernel_name in V.graph.multi_kernel_to_choice
)
# there should be no miss
return V.graph.multi_kernel_to_choice[multi_kernel_name]
def run(self, *args, **kwargs):
if self.picked_kernel is None:
timings = self.benchmark_sub_kernels(*args, **kwargs)
self.picked_kernel = timings.index(min(timings))
k0 = self.kernels[0]
log.debug(
"pick %dth sub-kernel in %s. Size hints %s. Reduction hint %s. Timings %s",
self.picked_kernel,
[k.inductor_meta.get("kernel_name") for k in self.kernels],
k0.size_hints,
k0.inductor_meta.get("reduction_hint"),
timings,
)
get_metric_table("persistent_red_perf").add_row(
functools.partial(self._metrics_table_row, timings)
)
if not self.disable_cache:
self.store_cache()
if not self._recorded:
self._recorded = True
picked_kernel_name = self.kernels[self.picked_kernel].inductor_meta.get(
"kernel_name"
)
assert picked_kernel_name is not None
self.record_choice(self.multi_kernel_name, picked_kernel_name)
run = self.kernels[self.picked_kernel].run # type: ignore[method-assign]
filtered_args = self._get_filtered_args(args, self.picked_kernel)
run(*filtered_args, **kwargs)
def _metrics_table_row(self, timings):
def get_kernel_path(k):
return k.fn.fn.__code__.co_filename
k0 = self.kernels[0]
row = {
"size_hints": k0.size_hints,
"reduction_hint": k0.inductor_meta.get("reduction_hint"),
}
max_kernels = 4
assert len(timings) <= max_kernels
for i in range(max_kernels):
if i < len(self.kernels):
row[f"kernel{i}_path"] = get_kernel_path(self.kernels[i])
row[f"kernel{i}_latency"] = timings[i]
else:
row[f"kernel{i}_path"] = ""
row[f"kernel{i}_latency"] = ""
return row
| MultiKernelCall |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_bincount_ops_test.py | {
"start": 18303,
"end": 20261
} | class ____(test_util.TensorFlowTestCase):
def test_dense_input_ragged_weights_fails(self):
x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
weights = ragged_factory_ops.constant([[6, 0.5, 2], [14], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(ValueError, "must be a tf.Tensor"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_sparse_input_ragged_weights_fails(self):
x = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
weights = ragged_factory_ops.constant([[6, 0.5, 2], [14], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_dense_weights_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_sparse_weights_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = sparse_ops.from_dense(
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
def test_ragged_input_different_shape_fails(self):
x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
weights = ragged_factory_ops.constant([[6, 0.5, 2], [], [10, 0.25, 5, 3]])
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must have the same row splits"):
self.evaluate(sparse_ops.sparse_bincount(x, weights=weights, axis=-1))
if __name__ == "__main__":
test.main()
| TestSparseCountFailureModes |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 2737,
"end": 3283
} | class ____(ProcessSpiderInputSpiderWithoutErrback):
name = "ProcessSpiderInputSpiderWithErrback"
async def start(self):
yield Request(
self.mockserver.url("/status?n=200"), self.parse, errback=self.errback
)
def errback(self, failure):
self.logger.info("Got a Failure on the Request errback")
return {"from": "errback"}
# ================================================================================
# (2) exceptions from a spider callback (generator)
| ProcessSpiderInputSpiderWithErrback |
python | sympy__sympy | sympy/tensor/array/expressions/array_expressions.py | {
"start": 12945,
"end": 27862
} | class ____(_CodegenArrayAbstract):
r"""
Class to represent permutation of axes of arrays.
Examples
========
>>> from sympy.tensor.array import permutedims
>>> from sympy import MatrixSymbol
>>> M = MatrixSymbol("M", 3, 3)
>>> cg = permutedims(M, [1, 0])
The object ``cg`` represents the transposition of ``M``, as the permutation
``[1, 0]`` will act on its indices by switching them:
`M_{ij} \Rightarrow M_{ji}`
This is evident when transforming back to matrix form:
>>> from sympy.tensor.array.expressions.from_array_to_matrix import convert_array_to_matrix
>>> convert_array_to_matrix(cg)
M.T
>>> N = MatrixSymbol("N", 3, 2)
>>> cg = permutedims(N, [1, 0])
>>> cg.shape
(2, 3)
There are optional parameters that can be used as alternative to the permutation:
>>> from sympy.tensor.array.expressions import ArraySymbol, PermuteDims
>>> M = ArraySymbol("M", (1, 2, 3, 4, 5))
>>> expr = PermuteDims(M, index_order_old="ijklm", index_order_new="kijml")
>>> expr
PermuteDims(M, (0 2 1)(3 4))
>>> expr.shape
(3, 1, 2, 5, 4)
Permutations of tensor products are simplified in order to achieve a
standard form:
>>> from sympy.tensor.array import tensorproduct
>>> M = MatrixSymbol("M", 4, 5)
>>> tp = tensorproduct(M, N)
>>> tp.shape
(4, 5, 3, 2)
>>> perm1 = permutedims(tp, [2, 3, 1, 0])
The args ``(M, N)`` have been sorted and the permutation has been
simplified, the expression is equivalent:
>>> perm1.expr.args
(N, M)
>>> perm1.shape
(3, 2, 5, 4)
>>> perm1.permutation
(2 3)
The permutation in its array form has been simplified from
``[2, 3, 1, 0]`` to ``[0, 1, 3, 2]``, as the arguments of the tensor
product `M` and `N` have been switched:
>>> perm1.permutation.array_form
[0, 1, 3, 2]
We can nest a second permutation:
>>> perm2 = permutedims(perm1, [1, 0, 2, 3])
>>> perm2.shape
(2, 3, 5, 4)
>>> perm2.permutation.array_form
[1, 0, 3, 2]
"""
def __new__(cls, expr, permutation=None, index_order_old=None, index_order_new=None, **kwargs):
from sympy.combinatorics import Permutation
expr = _sympify(expr)
expr_rank = get_rank(expr)
permutation = cls._get_permutation_from_arguments(permutation, index_order_old, index_order_new, expr_rank)
permutation = Permutation(permutation)
permutation_size = permutation.size
if permutation_size != expr_rank:
raise ValueError("Permutation size must be the length of the shape of expr")
canonicalize = kwargs.pop("canonicalize", False)
obj = Basic.__new__(cls, expr, permutation)
obj._subranks = [get_rank(expr)]
shape = get_shape(expr)
if shape is None:
obj._shape = None
else:
obj._shape = tuple(shape[permutation(i)] for i in range(len(shape)))
if canonicalize:
return obj._canonicalize()
return obj
def _canonicalize(self):
expr = self.expr
permutation = self.permutation
if isinstance(expr, PermuteDims):
subexpr = expr.expr
subperm = expr.permutation
permutation = permutation * subperm
expr = subexpr
if isinstance(expr, ArrayContraction):
expr, permutation = self._PermuteDims_denestarg_ArrayContraction(expr, permutation)
if isinstance(expr, ArrayTensorProduct):
expr, permutation = self._PermuteDims_denestarg_ArrayTensorProduct(expr, permutation)
if isinstance(expr, (ZeroArray, ZeroMatrix)):
return ZeroArray(*[expr.shape[i] for i in permutation.array_form])
plist = permutation.array_form
if plist == sorted(plist):
return expr
return self.func(expr, permutation, canonicalize=False)
@property
def expr(self):
return self.args[0]
@property
def permutation(self):
return self.args[1]
@classmethod
def _PermuteDims_denestarg_ArrayTensorProduct(cls, expr, permutation):
# Get the permutation in its image-form:
perm_image_form = _af_invert(permutation.array_form)
args = list(expr.args)
# Starting index global position for every arg:
cumul = list(accumulate([0] + expr.subranks))
# Split `perm_image_form` into a list of list corresponding to the indices
# of every argument:
perm_image_form_in_components = [perm_image_form[cumul[i]:cumul[i+1]] for i in range(len(args))]
# Create an index, target-position-key array:
ps = [(i, sorted(comp)) for i, comp in enumerate(perm_image_form_in_components)]
# Sort the array according to the target-position-key:
# In this way, we define a canonical way to sort the arguments according
# to the permutation.
ps.sort(key=lambda x: x[1])
# Read the inverse-permutation (i.e. image-form) of the args:
perm_args_image_form = [i[0] for i in ps]
# Apply the args-permutation to the `args`:
args_sorted = [args[i] for i in perm_args_image_form]
# Apply the args-permutation to the array-form of the permutation of the axes (of `expr`):
perm_image_form_sorted_args = [perm_image_form_in_components[i] for i in perm_args_image_form]
new_permutation = Permutation(_af_invert([j for i in perm_image_form_sorted_args for j in i]))
return _array_tensor_product(*args_sorted), new_permutation
@classmethod
def _PermuteDims_denestarg_ArrayContraction(cls, expr, permutation):
if not isinstance(expr, ArrayContraction):
return expr, permutation
if not isinstance(expr.expr, ArrayTensorProduct):
return expr, permutation
args = expr.expr.args
subranks = [get_rank(arg) for arg in expr.expr.args]
contraction_indices = expr.contraction_indices
contraction_indices_flat = [j for i in contraction_indices for j in i]
cumul = list(accumulate([0] + subranks))
# Spread the permutation in its array form across the args in the corresponding
# tensor-product arguments with free indices:
permutation_array_blocks_up = []
image_form = _af_invert(permutation.array_form)
counter = 0
for i in range(len(subranks)):
current = []
for j in range(cumul[i], cumul[i+1]):
if j in contraction_indices_flat:
continue
current.append(image_form[counter])
counter += 1
permutation_array_blocks_up.append(current)
# Get the map of axis repositioning for every argument of tensor-product:
index_blocks = [list(range(cumul[i], cumul[i+1])) for i, e in enumerate(expr.subranks)]
index_blocks_up = expr._push_indices_up(expr.contraction_indices, index_blocks)
inverse_permutation = permutation**(-1)
index_blocks_up_permuted = [[inverse_permutation(j) for j in i if j is not None] for i in index_blocks_up]
# Sorting key is a list of tuple, first element is the index of `args`, second element of
# the tuple is the sorting key to sort `args` of the tensor product:
sorting_keys = list(enumerate(index_blocks_up_permuted))
sorting_keys.sort(key=lambda x: x[1])
# Now we can get the permutation acting on the args in its image-form:
new_perm_image_form = [i[0] for i in sorting_keys]
# Apply the args-level permutation to various elements:
new_index_blocks = [index_blocks[i] for i in new_perm_image_form]
new_index_perm_array_form = _af_invert([j for i in new_index_blocks for j in i])
new_args = [args[i] for i in new_perm_image_form]
new_contraction_indices = [tuple(new_index_perm_array_form[j] for j in i) for i in contraction_indices]
new_expr = _array_contraction(_array_tensor_product(*new_args), *new_contraction_indices)
new_permutation = Permutation(_af_invert([j for i in [permutation_array_blocks_up[k] for k in new_perm_image_form] for j in i]))
return new_expr, new_permutation
@classmethod
def _check_permutation_mapping(cls, expr, permutation):
subranks = expr.subranks
index2arg = [i for i, arg in enumerate(expr.args) for j in range(expr.subranks[i])]
permuted_indices = [permutation(i) for i in range(expr.subrank())]
new_args = list(expr.args)
arg_candidate_index = index2arg[permuted_indices[0]]
current_indices = []
new_permutation = []
inserted_arg_cand_indices = set()
for i, idx in enumerate(permuted_indices):
if index2arg[idx] != arg_candidate_index:
new_permutation.extend(current_indices)
current_indices = []
arg_candidate_index = index2arg[idx]
current_indices.append(idx)
arg_candidate_rank = subranks[arg_candidate_index]
if len(current_indices) == arg_candidate_rank:
new_permutation.extend(sorted(current_indices))
local_current_indices = [j - min(current_indices) for j in current_indices]
i1 = index2arg[i]
new_args[i1] = _permute_dims(new_args[i1], Permutation(local_current_indices))
inserted_arg_cand_indices.add(arg_candidate_index)
current_indices = []
new_permutation.extend(current_indices)
# TODO: swap args positions in order to simplify the expression:
# TODO: this should be in a function
args_positions = list(range(len(new_args)))
# Get possible shifts:
maps = {}
cumulative_subranks = [0] + list(accumulate(subranks))
for i in range(len(subranks)):
s = {index2arg[new_permutation[j]] for j in range(cumulative_subranks[i], cumulative_subranks[i+1])}
if len(s) != 1:
continue
elem = next(iter(s))
if i != elem:
maps[i] = elem
# Find cycles in the map:
lines = []
current_line = []
while maps:
if len(current_line) == 0:
k, v = maps.popitem()
current_line.append(k)
else:
k = current_line[-1]
if k not in maps:
current_line = []
continue
v = maps.pop(k)
if v in current_line:
lines.append(current_line)
current_line = []
continue
current_line.append(v)
for line in lines:
for i, e in enumerate(line):
args_positions[line[(i + 1) % len(line)]] = e
# TODO: function in order to permute the args:
permutation_blocks = [[new_permutation[cumulative_subranks[i] + j] for j in range(e)] for i, e in enumerate(subranks)]
new_args = [new_args[i] for i in args_positions]
new_permutation_blocks = [permutation_blocks[i] for i in args_positions]
new_permutation2 = [j for i in new_permutation_blocks for j in i]
return _array_tensor_product(*new_args), Permutation(new_permutation2) # **(-1)
@classmethod
def _check_if_there_are_closed_cycles(cls, expr, permutation):
args = list(expr.args)
subranks = expr.subranks
cyclic_form = permutation.cyclic_form
cumulative_subranks = [0] + list(accumulate(subranks))
cyclic_min = [min(i) for i in cyclic_form]
cyclic_max = [max(i) for i in cyclic_form]
cyclic_keep = []
for i, cycle in enumerate(cyclic_form):
flag = True
for j in range(len(cumulative_subranks) - 1):
if cyclic_min[i] >= cumulative_subranks[j] and cyclic_max[i] < cumulative_subranks[j+1]:
# Found a sinkable cycle.
args[j] = _permute_dims(args[j], Permutation([[k - cumulative_subranks[j] for k in cycle]]))
flag = False
break
if flag:
cyclic_keep.append(cycle)
return _array_tensor_product(*args), Permutation(cyclic_keep, size=permutation.size)
def nest_permutation(self):
r"""
DEPRECATED.
"""
ret = self._nest_permutation(self.expr, self.permutation)
if ret is None:
return self
return ret
@classmethod
def _nest_permutation(cls, expr, permutation):
if isinstance(expr, ArrayTensorProduct):
return _permute_dims(*cls._check_if_there_are_closed_cycles(expr, permutation))
elif isinstance(expr, ArrayContraction):
# Invert tree hierarchy: put the contraction above.
cycles = permutation.cyclic_form
newcycles = ArrayContraction._convert_outer_indices_to_inner_indices(expr, *cycles)
newpermutation = Permutation(newcycles)
new_contr_indices = [tuple(newpermutation(j) for j in i) for i in expr.contraction_indices]
return _array_contraction(PermuteDims(expr.expr, newpermutation), *new_contr_indices)
elif isinstance(expr, ArrayAdd):
return _array_add(*[PermuteDims(arg, permutation) for arg in expr.args])
return None
def as_explicit(self):
expr = self.expr
if hasattr(expr, "as_explicit"):
expr = expr.as_explicit()
return permutedims(expr, self.permutation)
@classmethod
def _get_permutation_from_arguments(cls, permutation, index_order_old, index_order_new, dim):
if permutation is None:
if index_order_new is None or index_order_old is None:
raise ValueError("Permutation not defined")
return PermuteDims._get_permutation_from_index_orders(index_order_old, index_order_new, dim)
else:
if index_order_new is not None:
raise ValueError("index_order_new cannot be defined with permutation")
if index_order_old is not None:
raise ValueError("index_order_old cannot be defined with permutation")
return permutation
@classmethod
def _get_permutation_from_index_orders(cls, index_order_old, index_order_new, dim):
if len(set(index_order_new)) != dim:
raise ValueError("wrong number of indices in index_order_new")
if len(set(index_order_old)) != dim:
raise ValueError("wrong number of indices in index_order_old")
if len(set.symmetric_difference(set(index_order_new), set(index_order_old))) > 0:
raise ValueError("index_order_new and index_order_old must have the same indices")
permutation = [index_order_old.index(i) for i in index_order_new]
return permutation
| PermuteDims |
python | huggingface__transformers | tests/models/cohere/test_tokenization_cohere.py | {
"start": 872,
"end": 15227
} | class ____(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = CohereTokenizer
from_pretrained_vocab_key = "tokenizer_file"
from_pretrained_id = "hf-internal-testing/tiny-random-CohereForCausalLM"
special_tokens_map = {
"bos_token": "<BOS_TOKEN>",
"eos_token": "<|END_OF_TURN_TOKEN|>",
"unk_token": "<UNK>",
"pad_token": "<PAD>",
}
integration_expected_tokens = ['T', 'h', 'is', 'Ġis', 'Ġa', 'Ġt', 'est', 'Ġ', 'Ł', 'ĺ', 'Ĭ', 'Ċ', 'I', 'Ġwas', 'Ġb', 'orn', 'Ġin', 'Ġ', '9', '2', '0', '0', '0', ',', 'Ġand', 'Ġthis', 'Ġis', 'Ġf', 'als', 'é', '.', 'Ċ', 'ç', 'Ķ', 'Ł', 'æ', '´', '»', 'ç', 'ļ', 'Ħ', 'ç', 'ľ', 'Ł', 'è', '°', 'Ľ', 'æ', 'ĺ', '¯', 'Ċ', 'H', 'i', 'Ġ', 'ĠH', 'ell', 'o', 'Ċ', 'H', 'i', 'Ġ', 'Ġ', 'ĠH', 'ell', 'o', 'Ċ', 'Ċ', 'ĠĊ', 'Ġ', 'ĠĊ', 'ĠH', 'ell', 'o', 'Ċ', '<', 's', '>', 'Ċ', 'h', 'i', '<', 's', '>', 't', 'he', 're', 'Ċ', 'T', 'he', 'Ġfollow', 'ing', 'Ġst', 'r', 'ing', 'Ġsh', 'ould', 'Ġbe', 'Ġpro', 'per', 'ly', 'Ġen', 'c', 'od', 'ed', ':', 'ĠH', 'ell', 'o', '.', 'Ċ', 'B', 'ut', 'Ġ', 'ird', 'Ġand', 'Ġ', 'à', '¸', 'Ľ', 'à', '¸', 'µ', 'Ġ', 'Ġ', 'Ġ', 'ird', 'Ġ', 'Ġ', 'Ġ', 'à', '¸', 'Ķ', 'Ċ', 'H', 'ey', 'Ġh', 'ow', 'Ġare', 'Ġy', 'ou', 'Ġdo', 'ing'] # fmt: skip
integration_expected_token_ids = [60, 80, 223, 307, 204, 202, 333, 167, 199, 192, 178, 166, 49, 265, 227, 712, 229, 167, 33, 26, 24, 24, 24, 20, 233, 524, 307, 222, 632, 1018, 22, 166, 160, 188, 199, 159, 120, 127, 160, 194, 172, 160, 196, 199, 161, 116, 195, 159, 192, 115, 166, 48, 81, 167, 289, 420, 87, 166, 48, 81, 167, 167, 289, 420, 87, 166, 166, 259, 167, 259, 289, 420, 87, 166, 36, 91, 38, 166, 80, 81, 36, 91, 38, 92, 203, 210, 166, 60, 203, 765, 231, 292, 90, 231, 396, 458, 299, 348, 474, 271, 551, 75, 339, 212, 34, 289, 420, 87, 22, 166, 42, 293, 167, 813, 233, 167, 153, 124, 195, 153, 124, 121, 167, 167, 167, 813, 167, 167, 167, 153, 124, 188, 166, 48, 634, 240, 291, 394, 411, 243, 793, 231] # fmt: skip
expected_tokens_from_ids = ['T', 'h', 'is', 'Ġis', 'Ġa', 'Ġt', 'est', 'Ġ', 'Ł', 'ĺ', 'Ĭ', 'Ċ', 'I', 'Ġwas', 'Ġb', 'orn', 'Ġin', 'Ġ', '9', '2', '0', '0', '0', ',', 'Ġand', 'Ġthis', 'Ġis', 'Ġf', 'als', 'é', '.', 'Ċ', 'ç', 'Ķ', 'Ł', 'æ', '´', '»', 'ç', 'ļ', 'Ħ', 'ç', 'ľ', 'Ł', 'è', '°', 'Ľ', 'æ', 'ĺ', '¯', 'Ċ', 'H', 'i', 'Ġ', 'ĠH', 'ell', 'o', 'Ċ', 'H', 'i', 'Ġ', 'Ġ', 'ĠH', 'ell', 'o', 'Ċ', 'Ċ', 'ĠĊ', 'Ġ', 'ĠĊ', 'ĠH', 'ell', 'o', 'Ċ', '<', 's', '>', 'Ċ', 'h', 'i', '<', 's', '>', 't', 'he', 're', 'Ċ', 'T', 'he', 'Ġfollow', 'ing', 'Ġst', 'r', 'ing', 'Ġsh', 'ould', 'Ġbe', 'Ġpro', 'per', 'ly', 'Ġen', 'c', 'od', 'ed', ':', 'ĠH', 'ell', 'o', '.', 'Ċ', 'B', 'ut', 'Ġ', 'ird', 'Ġand', 'Ġ', 'à', '¸', 'Ľ', 'à', '¸', 'µ', 'Ġ', 'Ġ', 'Ġ', 'ird', 'Ġ', 'Ġ', 'Ġ', 'à', '¸', 'Ķ', 'Ċ', 'H', 'ey', 'Ġh', 'ow', 'Ġare', 'Ġy', 'ou', 'Ġdo', 'ing'] # fmt: skip
integration_expected_decoded_text = "This is a test ���\nI was born in 92000, and this is falsé.\n生活的真谛是\nHi Hello\nHi Hello\n\n \n \n Hello\n<s>\nhi<s>there\nThe following string should be properly encoded: Hello.\nBut ird and ปี ird ด\nHey how are you doing"
# This gives CPU OOM on a single-gpu runner (~60G RAM). On multi-gpu runner, it has ~180G RAM which is enough.
@require_torch_multi_accelerator
def test_torch_encode_plus_sent_to_model(self):
super().test_torch_encode_plus_sent_to_model()
def test_encodings_from_sample_data(self):
"""
Assert that the created tokens are the same than the hard-coded ones
"""
tokenizer = self.get_tokenizer()
INPUT_SENTENCES = ["The quick brown fox<|END_OF_TURN_TOKEN|>", "jumps over the lazy dog<|END_OF_TURN_TOKEN|>"]
TARGET_TOKENS = [
[5, 60, 203, 746, 666, 980, 571, 222, 87, 96, 8],
[5, 82, 332, 88, 91, 544, 206, 257, 930, 97, 239, 435, 8],
]
computed_tokens = tokenizer(INPUT_SENTENCES)["input_ids"]
self.assertListEqual(TARGET_TOKENS, computed_tokens)
INPUT_SENTENCES_W_BOS = [
"<BOS_TOKEN>The quick brown fox<|END_OF_TURN_TOKEN|>",
"<BOS_TOKEN>jumps over the lazy dog<|END_OF_TURN_TOKEN|>",
]
decoded_tokens = tokenizer.decode(computed_tokens)
self.assertListEqual(decoded_tokens, INPUT_SENTENCES_W_BOS)
def test_pretrained_model_lists(self):
# No `max_model_input_sizes` for Cohere model
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1)
@require_jinja
def test_tokenization_for_chat(self):
tokenizer = self.get_tokenizer()
test_chats = [
[{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}],
[
{"role": "system", "content": "You are a helpful chatbot."},
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Nice to meet you."},
],
]
tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats]
# fmt: off
expected_tokens = [
[5, 36, 99, 59, 60, 41, 58, 60, 71, 55, 46, 71, 60, 61, 58, 54, 71, 60, 55, 51, 45, 54, 99, 38, 36, 99, 59, 65, 59, 60, 45, 53, 71, 60, 55, 51, 45, 54, 99, 38, 65, 243, 394, 204, 336, 84, 88, 887, 374, 216, 74, 286, 22, 8, 36, 99, 59, 60, 41, 58, 60, 71, 55, 46, 71, 60, 61, 58, 54, 71, 60, 55, 51, 45, 54, 99, 38, 36, 99, 61, 59, 45, 58, 71, 60, 55, 51, 45, 54, 99, 38, 48, 420, 87, 9, 8],
[5, 36, 99, 59, 60, 41, 58, 60, 71, 55, 46, 71, 60, 61, 58, 54, 71, 60, 55, 51, 45, 54, 99, 38, 36, 99, 59, 65,
59, 60, 45, 53, 71, 60, 55, 51, 45, 54, 99, 38, 65, 243, 394, 204, 336, 84, 88, 887, 374, 216, 74, 286, 22, 8,
36, 99, 59, 60, 41, 58, 60, 71, 55, 46, 71, 60, 61, 58, 54, 71, 60, 55, 51, 45, 54, 99, 38, 36, 99, 61, 59,
45, 58, 71, 60, 55, 51, 45, 54, 99, 38, 48, 420, 87, 9, 8, 36, 99, 59, 60, 41, 58, 60, 71, 55, 46, 71, 60, 61,
58, 54, 71, 60, 55, 51, 45, 54, 99, 38, 36, 99, 43, 48, 41, 60, 42, 55, 60, 71, 60, 55, 51, 45, 54, 99, 38,
54, 567, 235, 693, 276, 411, 243, 22, 8]
]
# fmt: on
for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens):
self.assertListEqual(tokenized_chat, expected_tokens)
@require_jinja
def test_tokenization_for_tool_use(self):
tokenizer = self.get_tokenizer()
conversation = [{"role": "user", "content": "Whats the biggest penguin in the world?"}]
tools = [
{
"name": "internet_search",
"description": "Returns a list of relevant document snippets for a textual query retrieved from the internet",
"parameter_definitions": {
"query": {"description": "Query to search the internet with", "type": "str", "required": True}
},
},
{
"name": "directly_answer",
"description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history",
"parameter_definitions": {},
},
]
tool_use_prompt = tokenizer.apply_tool_use_template(
conversation,
tools=tools,
tokenize=False,
add_generation_prompt=True,
)
expected_prompt = '''<BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
# System Preamble
## Basic Rules
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
# User Preamble
## Task and Context
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
## Style Guide
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.
## Available Tools
Here is a list of tools that you have available to you:
```python
def internet_search(query: str) -> List[Dict]:
"""Returns a list of relevant document snippets for a textual query retrieved from the internet
Args:
query (str): Query to search the internet with
"""
pass
```
```python
def directly_answer() -> List[Dict]:
"""Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history
"""
pass
```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:
```json
[
{
"tool_name": title of the tool in the specification,
"parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters
}
]```<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>'''
self.assertEqual(tool_use_prompt, expected_prompt)
@require_jinja
def test_tokenization_for_grounded_generation(self):
tokenizer = self.get_tokenizer()
conversation = [{"role": "user", "content": "Whats the biggest penguin in the world?"}]
documents = [
{"title": "Tall penguins", "text": "Emperor penguins are the tallest growing up to 122 cm in height."},
{"title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."},
]
grounded_generation_prompt = tokenizer.apply_grounded_generation_template(
conversation,
documents=documents,
citation_mode="accurate", # or "fast"
tokenize=False,
add_generation_prompt=True,
)
expected_prompt = """<BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble
The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.
# System Preamble
## Basic Rules
You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.
# User Preamble
## Task and Context
You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.
## Style Guide
Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><results>
Document: 0
title: Tall penguins
text: Emperor penguins are the tallest growing up to 122 cm in height.
Document: 1
title: Penguin habitats
text: Emperor penguins only live in Antarctica.
</results><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line.
Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'.
Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'.
Thirdly, Write 'Answer:' followed by a response to the user's last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup.
Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols <co: doc> and </co: doc> to indicate when a fact comes from a document in the search result, e.g <co: 0>my fact</co: 0> for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"""
self.assertEqual(grounded_generation_prompt, expected_prompt)
def test_add_prefix_space_fast(self):
tokenizer_w_prefix = self.get_tokenizer(add_prefix_space=True)
tokenizer_wo_prefix = self.get_tokenizer(add_prefix_space=False)
tokens_w_prefix = tokenizer_w_prefix.tokenize("Hey")
tokens_wo_prefix = tokenizer_wo_prefix.tokenize("Hey")
self.assertNotEqual(tokens_w_prefix, tokens_wo_prefix)
| CohereTokenizationTest |
python | tensorflow__tensorflow | tensorflow/python/framework/python_op_gen_annotation_test.py | {
"start": 945,
"end": 1808
} | class ____(googletest.TestCase):
def test_type_annotation_not_empty_for_internal_op(self):
for internal_op in [
data_flow_ops.dynamic_stitch,
gen_nn_ops._fused_batch_norm,
gen_math_ops.add,
]:
sig = inspect.signature(internal_op)
for key in sig.parameters:
if key == "name":
continue
assert sig.parameters[key].annotation != inspect.Signature.empty
def test_type_annotation_empty_for_imported_op(self):
for imported_op in [
data_flow_ops.DynamicStitch,
gen_nn_ops.FusedBatchNorm,
gen_math_ops.Add,
]:
sig = inspect.signature(imported_op)
for key in sig.parameters:
if key == "name":
continue
assert sig.parameters[key].annotation == inspect.Signature.empty
if __name__ == "__main__":
googletest.main()
| PythonOpGetTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 853340,
"end": 854627
} | class ____(sgqlc.types.Type):
"""The value of a reviewers field in a Project item."""
__schema__ = github_schema
__field_names__ = ("field", "reviewers")
field = sgqlc.types.Field(sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field")
"""The field that contains this value."""
reviewers = sgqlc.types.Field(
"RequestedReviewerConnection",
graphql_name="reviewers",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""The reviewers for this field.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
| ProjectV2ItemFieldReviewerValue |
python | kamyu104__LeetCode-Solutions | Python/find-the-minimum-area-to-cover-all-ones-i.py | {
"start": 41,
"end": 542
} | class ____(object):
def minimumArea(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
min_r, max_r, min_c, max_c = len(grid), -1, len(grid[0]), -1
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if grid[i][j] == 0:
continue
min_r, max_r, min_c, max_c = min(min_r, i), max(max_r, i), min(min_c, j), max(max_c, j)
return (max_r-min_r+1)*(max_c-min_c+1)
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis45.py | {
"start": 315,
"end": 1790
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis45.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [108178048, 108321408]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis(
{
"name": "XXX",
"name_font": {"rotation": 270, "baseline": -1},
"num_font": {"rotation": 270, "baseline": -1},
}
)
chart.set_y_axis(
{
"name": "YYY",
"name_font": {"rotation": 270, "baseline": -1},
"num_font": {"rotation": 270, "baseline": -1},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_repr_returned.py | {
"start": 1029,
"end": 1126
} | class ____:
""" Uninferable return value """
__repr__ = lambda self: Missing
| AmbiguousRepr |
python | numpy__numpy | numpy/_core/tests/test_casting_unittests.py | {
"start": 2134,
"end": 4546
} | class ____(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
same_value = 64
same_value_dtypes = tuple(type(np.dtype(c)) for c in "?bhilqBHILQefdgFDG")
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = = = = = = = = = = = = = . =
H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
e . . . . . . . . . . . # = = = = = = = = = = . .
f . . . . . . . . . . . ~ # = = = = = = = = = . .
d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
F . . . . . . . . . . . . . . . # = = = = = = . .
D . . . . . . . . . . . . . . . ~ # = = = = = . .
G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
S . . . . . . . . . . . . . . . . . . # = = = . .
U . . . . . . . . . . . . . . . . . . . # = = . .
V . . . . . . . . . . . . . . . . . . . . # = . .
O . . . . . . . . . . . . . . . . . . . . = # . .
M . . . . . . . . . . . . . . . . . . . . = = # .
m . . . . . . . . . . . . . . . . . . . . = = . #
""").strip().split("\n")
dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
cancast = {}
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
# Of the types checked, numeric cast support same-value
if from_dt in same_value_dtypes and to_dt in same_value_dtypes:
cancast[from_dt][to_dt] |= Casting.same_value
return cancast
CAST_TABLE = _get_cancast_table()
| Casting |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 5787,
"end": 11937
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
new_num_attention_heads = config.num_attention_heads // config.head_ratio
if new_num_attention_heads < 1:
self.head_ratio = config.num_attention_heads
self.num_attention_heads = 1
else:
self.num_attention_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.conv_kernel_size = config.conv_kernel_size
if config.hidden_size % self.num_attention_heads != 0:
raise ValueError("hidden_size should be divisible by num_attention_heads")
self.attention_head_size = (config.hidden_size // self.num_attention_heads) // 2
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.key_conv_attn_layer = SeparableConv1D(
config, config.hidden_size, self.all_head_size, self.conv_kernel_size
)
self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size)
self.conv_out_layer = nn.Linear(config.hidden_size, self.all_head_size)
self.unfold = nn.Unfold(
kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0]
)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
batch_size, seq_length, _ = hidden_states.shape
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states.transpose(1, 2))
mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2)
mixed_query_layer = self.query(hidden_states)
query_layer = mixed_query_layer.view(
batch_size, -1, self.num_attention_heads, self.attention_head_size
).transpose(1, 2)
key_layer = mixed_key_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(
1, 2
)
value_layer = mixed_value_layer.view(
batch_size, -1, self.num_attention_heads, self.attention_head_size
).transpose(1, 2)
conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1)
conv_out_layer = self.conv_out_layer(hidden_states)
conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1)
conv_out_layer = nn.functional.unfold(
conv_out_layer,
kernel_size=[self.conv_kernel_size, 1],
dilation=1,
padding=[(self.conv_kernel_size - 1) // 2, 0],
stride=1,
)
conv_out_layer = conv_out_layer.transpose(1, 2).reshape(
batch_size, -1, self.all_head_size, self.conv_kernel_size
)
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in ConvBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = torch.cat([context_layer, conv_out], 2)
# conv and context
new_context_layer_shape = context_layer.size()[:-2] + (
self.num_attention_heads * self.attention_head_size * 2,
)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
| ConvBertSelfAttention |
python | walkccc__LeetCode | solutions/2345. Finding the Number of Visible Mountains/2345.py | {
"start": 0,
"end": 451
} | class ____:
def visibleMountains(self, peaks: list[list[int]]) -> int:
ans = 0
maxRightFoot = 0
peaks.sort(key=lambda x: (x[0] - x[1], -x[0]))
for i, peak in enumerate(peaks):
overlapWithNext = i + 1 < len(peaks) and peak == peaks[i + 1]
currRightFoot = peak[0] + peak[1]
if currRightFoot > maxRightFoot:
if not overlapWithNext:
ans += 1
maxRightFoot = currRightFoot
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor.py | {
"start": 5331,
"end": 28615
} | class ____(internal.NativeObject, core_tf_types.Symbol):
"""A `tf.Tensor` represents a multidimensional array of elements.
All elements are of a single known data type.
When writing a TensorFlow program, the main object that is
manipulated and passed around is the `tf.Tensor`.
A `tf.Tensor` has the following properties:
* a single data type (float32, int32, or string, for example)
* a shape
TensorFlow supports eager execution and graph execution. In eager
execution, operations are evaluated immediately. In graph
execution, a computational graph is constructed for later
evaluation.
TensorFlow defaults to eager execution. In the example below, the
matrix multiplication results are calculated immediately.
>>> # Compute some values using a Tensor
>>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
>>> e = tf.matmul(c, d)
>>> print(e)
tf.Tensor(
[[1. 3.]
[3. 7.]], shape=(2, 2), dtype=float32)
Note that during eager execution, you may discover your `Tensors` are actually
of type `EagerTensor`. This is an internal detail, but it does give you
access to a useful function, `numpy`:
>>> type(e)
<class '...ops.EagerTensor'>
>>> print(e.numpy())
[[1. 3.]
[3. 7.]]
In TensorFlow, `tf.function`s are a common way to define graph execution.
A Tensor's shape (that is, the rank of the Tensor and the size of
each dimension) may not always be fully known. In `tf.function`
definitions, the shape may only be partially known.
Most operations produce tensors of fully-known shapes if the shapes of their
inputs are also fully known, but in some cases it's only possible to find the
shape of a tensor at execution time.
A number of specialized tensors are available: see `tf.Variable`,
`tf.constant`, `tf.placeholder`, `tf.sparse.SparseTensor`, and
`tf.RaggedTensor`.
Caution: when constructing a tensor from a numpy array or pandas dataframe
the underlying buffer may be re-used:
```python
a = np.array([1, 2, 3])
b = tf.constant(a)
a[0] = 4
print(b) # tf.Tensor([4 2 3], shape=(3,), dtype=int64)
```
Note: this is an implementation detail that is subject to change and users
should not rely on this behaviour.
For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor).
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = tf2.enabled()
def __getattr__(self, name):
if name in {"T", "astype", "ravel", "transpose", "reshape", "clip", "size",
"tolist", "data"}:
# TODO(wangpeng): Export the enable_numpy_behavior knob
raise AttributeError(
f"{type(self).__name__} object has no attribute '{name}'. " + """
If you are looking for numpy-related methods, please run the following:
tf.experimental.numpy.experimental_enable_numpy_behavior()
""")
self.__getattribute__(name)
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def name(self):
return self._name
@property
def shape(self) -> tensor_shape.TensorShape:
"""Returns a `tf.TensorShape` that represents the shape of this tensor.
>>> t = tf.constant([1,2,3,4,5])
>>> t.shape
TensorShape([5])
`tf.Tensor.shape` is equivalent to `tf.Tensor.get_shape()`.
In a `tf.function` or when building a model using
`tf.keras.Input`, they return the build-time shape of the
tensor, which may be partially unknown.
A `tf.TensorShape` is not a tensor. Use `tf.shape(t)` to get a tensor
containing the shape, calculated at runtime.
See `tf.Tensor.get_shape()`, and `tf.TensorShape` for details and examples.
"""
if self._shape_val is None:
dims, unknown_shape = self._shape
if unknown_shape:
self._shape_val = tensor_shape.unknown_shape()
else:
self._shape_val = tensor_shape.TensorShape(dims)
return self._shape_val
@property
def ndim(self):
return self.shape.rank
def _disallow(self, task):
raise errors.OperatorNotAllowedInGraphError(
f"{task} is not allowed."
" You can attempt the following resolutions to the problem:"
" If you are running in Graph mode, use Eager execution mode"
" or decorate this function with @tf.function."
" If you are using AutoGraph, you can try decorating this function"
" with @tf.function. If that does not work, then you may be using"
" an unsupported feature or your source code may not be visible"
" to AutoGraph. See"
" https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/limitations.md#access-to-source-code"
" for more information.")
def _disallow_bool_casting(self):
self._disallow("Using a symbolic `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
self._disallow("Iterating over a symbolic `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
first_dim = self._get_first_dim()
return _TensorIterator(self, first_dim)
def _get_first_dim(self):
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
return shape[0]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _record_tape(self, capture):
"""Connect this graph tensor with capture for gradients calculation."""
record.record_operation(
"captured_value",
[self], [capture],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
def get_shape(self) -> tensor_shape.TensorShape:
"""Returns a `tf.TensorShape` that represents the shape of this tensor.
In eager execution the shape is always fully-known.
>>> a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> print(a.shape)
(2, 3)
`tf.Tensor.get_shape()` is equivalent to `tf.Tensor.shape`.
When executing in a `tf.function` or building a model using
`tf.keras.Input`, `Tensor.shape` may return a partial shape (including
`None` for unknown dimensions). See `tf.TensorShape` for more details.
>>> inputs = tf.keras.Input(shape = [10])
>>> # Unknown batch size
>>> print(inputs.shape)
(None, 10)
The shape is computed using shape inference functions that are
registered for each `tf.Operation`.
The returned `tf.TensorShape` is determined at *build* time, without
executing the underlying kernel. It is not a `tf.Tensor`. If you need a
shape *tensor*, either convert the `tf.TensorShape` to a `tf.constant`, or
use the `tf.shape(tensor)` function, which returns the tensor's shape at
*execution* time.
This is useful for debugging and providing early errors. For
example, when tracing a `tf.function`, no ops are being executed, shapes
may be unknown (See the [Concrete Functions
Guide](https://www.tensorflow.org/guide/concrete_function) for details).
>>> @tf.function
... def my_matmul(a, b):
... result = a@b
... # the `print` executes during tracing.
... print("Result shape: ", result.shape)
... return result
The shape inference functions propagate shapes to the extent possible:
>>> f = my_matmul.get_concrete_function(
... tf.TensorSpec([None,3]),
... tf.TensorSpec([3,5]))
Result shape: (None, 5)
Tracing may fail if a shape mismatch can be detected:
>>> cf = my_matmul.get_concrete_function(
... tf.TensorSpec([None,3]),
... tf.TensorSpec([4,5]))
Traceback (most recent call last):
...
ValueError: Dimensions must be equal, but are 3 and 4 for 'matmul' (op:
'MatMul') with input shapes: [?,3], [4,5].
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `tf.ensure_shape` or `Tensor.set_shape()` can be used to augment
the inferred shape.
>>> @tf.function
... def my_fun(a):
... a = tf.ensure_shape(a, [5, 5])
... # the `print` executes during tracing.
... print("Result shape: ", a.shape)
... return a
>>> cf = my_fun.get_concrete_function(
... tf.TensorSpec([None, None]))
Result shape: (5, 5)
Returns:
A `tf.TensorShape` representing the shape of this tensor.
"""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
Note: It is recommended to use `tf.ensure_shape` instead of
`Tensor.set_shape`, because `tf.ensure_shape` provides better checking for
programming errors and can create guarantees for compiler
optimization.
With eager execution this operates as a shape assertion.
Here the shapes match:
>>> t = tf.constant([[1,2,3]])
>>> t.set_shape([1, 3])
Passing a `None` in the new shape allows any value for that axis:
>>> t.set_shape([1,None])
An error is raised if an incompatible shape is passed.
>>> t.set_shape([1,5])
Traceback (most recent call last):
...
ValueError: Tensor's shape (1, 3) is not compatible with supplied
shape [1, 5]
When executing in a `tf.function`, or building a model using
`tf.keras.Input`, `Tensor.set_shape` will *merge* the given `shape` with
the current shape of this tensor, and set the tensor's shape to the
merged value (see `tf.TensorShape.merge_with` for details):
>>> t = tf.keras.Input(shape=[None, None, 3])
>>> print(t.shape)
(None, None, None, 3)
Dimensions set to `None` are not updated:
>>> t.set_shape([None, 224, 224, None])
>>> print(t.shape)
(None, 224, 224, 3)
The main use case for this is to provide additional shape information
that cannot be inferred from the graph alone.
For example if you know all the images in a dataset have shape [28,28,3] you
can set it with `tf.set_shape`:
>>> @tf.function
... def load_image(filename):
... raw = tf.io.read_file(filename)
... image = tf.image.decode_png(raw, channels=3)
... # the `print` executes during tracing.
... print("Initial shape: ", image.shape)
... image.set_shape([28, 28, 3])
... print("Final shape: ", image.shape)
... return image
Trace the function, see the [Concrete Functions
Guide](https://www.tensorflow.org/guide/concrete_function) for details.
>>> cf = load_image.get_concrete_function(
... tf.TensorSpec([], dtype=tf.string))
Initial shape: (None, None, 3)
Final shape: (28, 28, 3)
Similarly the `tf.io.parse_tensor` function could return a tensor with
any shape, even the `tf.rank` is unknown. If you know that all your
serialized tensors will be 2d, set it with `set_shape`:
>>> @tf.function
... def my_parse(string_tensor):
... result = tf.io.parse_tensor(string_tensor, out_type=tf.float32)
... # the `print` executes during tracing.
... print("Initial shape: ", result.shape)
... result.set_shape([None, None])
... print("Final shape: ", result.shape)
... return result
Trace the function
>>> concrete_parse = my_parse.get_concrete_function(
... tf.TensorSpec([], dtype=tf.string))
Initial shape: <unknown>
Final shape: (None, None)
Make sure it works:
>>> t = tf.ones([5,3], dtype=tf.float32)
>>> serialized = tf.io.serialize_tensor(t)
>>> print(serialized.dtype)
<dtype: 'string'>
>>> print(serialized.shape)
()
>>> t2 = concrete_parse(serialized)
>>> print(t2.shape)
(5, 3)
Caution: `set_shape` ensures that the applied shape is compatible with
the existing shape, but it does not check at runtime. Setting
incorrect shapes can result in inconsistencies between the
statically-known graph and the runtime value of tensors. For runtime
validation of the shape, use `tf.ensure_shape` instead. It also modifies
the `shape` of the tensor.
>>> # Serialize a rank-3 tensor
>>> t = tf.ones([5,5,5], dtype=tf.float32)
>>> serialized = tf.io.serialize_tensor(t)
>>> # The function still runs, even though it `set_shape([None,None])`
>>> t2 = concrete_parse(serialized)
>>> print(t2.shape)
(5, 5, 5)
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
self._set_shape(dim_list, unknown_shape)
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
assert self._op.name
if self.value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self.value_index)
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and (g is None or g.building_function)):
raise TypeError("Tensor is unhashable. "
"Instead, use tensor.ref() as the key.")
else:
return id(self)
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self, dtype=None):
del dtype
raise NotImplementedError(
f"Cannot convert a symbolic tf.Tensor ({self.name}) to a numpy array."
f" This error may indicate that you're trying to pass a Tensor to"
f" a NumPy call, which is not supported.")
def __len__(self):
raise TypeError(f"len is not well defined for a symbolic Tensor "
f"({self.name}). Please call `x.shape` rather than "
f"`len(x)` for shape information.")
# TODO(mdan): This convoluted machinery is hard to maintain. Clean up.
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self): # pylint: disable=invalid-bool-returned
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Note: If you are not using `compat.v1` libraries, you should not need this,
(or `feed_dict` or `Session`). In eager execution (or within `tf.function`)
you do not need to call `eval`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
@deprecation.deprecated(None, "Use ref() instead.")
def experimental_ref(self):
return self.ref()
def ref(self):
# tf.Variable also has the same ref() API. If you update the
# documentation here, please update tf.Variable.ref() as well.
"""Returns a hashable reference object to this Tensor.
The primary use case for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
The following will raise an exception starting 2.0
>>> x = tf.constant(5)
>>> y = tf.constant(10)
>>> z = tf.constant(10)
>>> tensor_set = {x, y, z}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
>>> tensor_dict = {x: 'five', y: 'ten'}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
Instead, we can use `tensor.ref()`.
>>> tensor_set = {x.ref(), y.ref(), z.ref()}
>>> x.ref() in tensor_set
True
>>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}
>>> tensor_dict[y.ref()]
'ten'
Also, the reference object provides `.deref()` function that returns the
original Tensor.
>>> x = tf.constant(5)
>>> x.ref().deref()
<tf.Tensor: shape=(), dtype=int32, numpy=5>
"""
return object_identity.Reference(self)
def __tf_tracing_type__(self, signature_context):
if self.dtype == dtypes.resource or self.dtype == dtypes.variant:
shape_inference_handle_data = handle_data_util.get_handle_data(self)
handle_data = (
dtypes.HandleData(shape_inference_handle_data)
if shape_inference_handle_data
else None
)
dtype = dtypes.DType(self.dtype._type_enum, handle_data)
else:
dtype = self.dtype
spec = TensorSpec(self.shape, dtype)
return spec
def __tf_tensor__(
self, dtype: Optional[dtypes.DType] = None, name: Optional[str] = None
) -> "Tensor":
if dtype is not None and not dtype.is_compatible_with(self.dtype):
raise ValueError(
_add_error_prefix(
f"Tensor conversion requested dtype {dtype.name} "
f"for Tensor with dtype {self.dtype.name}: {self!r}",
name=name))
return self
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
logging.vlog(1, "Enabling tensor equality")
_tensor_equality_api_usage_gauge.get_cell().set(True)
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
logging.vlog(1, "Disabling tensor equality")
_tensor_equality_api_usage_gauge.get_cell().set(False)
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
# TODO(b/249802365): Sanitize all TensorSpec names.
def sanitize_spec_name(name: str) -> str:
"""Sanitizes Spec names. Matches Graph Node and Python naming conventions.
Without sanitization, names that are not legal Python parameter names can be
set which makes it challenging to represent callables supporting the named
calling capability.
Args:
name: The name to sanitize.
Returns:
A string that meets Python parameter conventions.
"""
if not name:
return "unknown"
# Lower case and replace non-alphanumeric chars with '_'
swapped = "".join([c if c.isalnum() else "_" for c in name.lower()])
if swapped[0].isalpha():
return swapped
else:
return "tensor_" + swapped
def get_op_name(tensor_name):
"""Extract the Op name from a Tensor name.
The Op name is everything before a colon, if present,
not including any ^ prefix denoting a control dependency.
Args:
tensor_name: the full name of a Tensor in the graph.
Returns:
The name of the Op of which the given Tensor is an output.
Raises:
ValueError: if tensor_name is None or empty.
"""
if not tensor_name:
raise ValueError(
f"Tensor name cannot be empty or None. Received: {tensor_name}.")
# Control dependency inputs start with ^.
if tensor_name.startswith("^"):
tensor_name = tensor_name[1:]
if ":" in tensor_name:
op_name, _ = tensor_name.split(":")
return op_name
return tensor_name
| Tensor |
python | openai__openai-python | src/openai/resources/fine_tuning/jobs/checkpoints.py | {
"start": 7197,
"end": 7442
} | class ____:
def __init__(self, checkpoints: AsyncCheckpoints) -> None:
self._checkpoints = checkpoints
self.list = async_to_streamed_response_wrapper(
checkpoints.list,
)
| AsyncCheckpointsWithStreamingResponse |
python | doocs__leetcode | lcof2/剑指 Offer II 071. 按权重生成随机数/Solution.py | {
"start": 0,
"end": 650
} | class ____:
def __init__(self, w: List[int]):
n = len(w)
self.presum = [0] * (n + 1)
for i in range(n):
self.presum[i + 1] = self.presum[i] + w[i]
def pickIndex(self) -> int:
n = len(self.presum)
x = random.randint(1, self.presum[-1])
left, right = 0, n - 2
while left < right:
mid = (left + right) >> 1
if self.presum[mid + 1] >= x:
right = mid
else:
left = mid + 1
return left
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
| Solution |
python | anthropics__anthropic-sdk-python | tests/lib/test_vertex.py | {
"start": 364,
"end": 5771
} | class ____:
client = AnthropicVertex(region="region", project_id="project", access_token="my-access-token")
@pytest.mark.respx()
def test_messages_retries(self, respx_mock: MockRouter) -> None:
request_url = "https://region-aiplatform.googleapis.com/v1/projects/project/locations/region/publishers/anthropic/models/claude-3-sonnet@20240229:rawPredict"
respx_mock.post(request_url).mock(
side_effect=[
httpx.Response(500, json={"error": "server error"}, headers={"retry-after-ms": "10"}),
httpx.Response(200, json={"foo": "bar"}),
]
)
self.client.messages.create(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-3-sonnet@20240229",
)
calls = cast("list[MockRequestCall]", respx_mock.calls)
assert len(calls) == 2
assert calls[0].request.url == request_url
assert calls[1].request.url == request_url
def test_copy(self) -> None:
copied = self.client.copy()
assert id(copied) != id(self.client)
copied = self.client.copy(region="another-region", project_id="another-project")
assert copied.region == "another-region"
assert self.client.region == "region"
assert copied.project_id == "another-project"
assert self.client.project_id == "project"
def test_with_options(self) -> None:
copied = self.client.with_options(region="another-region", project_id="another-project")
assert copied.region == "another-region"
assert self.client.region == "region"
assert copied.project_id == "another-project"
assert self.client.project_id == "project"
def test_copy_default_options(self) -> None:
# options that have a default are overridden correctly
copied = self.client.copy(max_retries=7)
assert copied.max_retries == 7
assert self.client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
assert copied.max_retries == 7
# timeout
assert isinstance(self.client.timeout, httpx.Timeout)
copied = self.client.copy(timeout=None)
assert copied.timeout is None
assert isinstance(self.client.timeout, httpx.Timeout)
def test_copy_default_headers(self) -> None:
client = AnthropicVertex(
base_url=base_url,
region="region",
project_id="project",
_strict_response_validation=True,
default_headers={"X-Foo": "bar"},
)
assert client.default_headers["X-Foo"] == "bar"
# does not override the already given value when not specified
copied = client.copy()
assert copied.default_headers["X-Foo"] == "bar"
# merges already given headers
copied = client.copy(default_headers={"X-Bar": "stainless"})
assert copied.default_headers["X-Foo"] == "bar"
assert copied.default_headers["X-Bar"] == "stainless"
# uses new values for any already given headers
copied = client.copy(default_headers={"X-Foo": "stainless"})
assert copied.default_headers["X-Foo"] == "stainless"
# set_default_headers
# completely overrides already set values
copied = client.copy(set_default_headers={})
assert copied.default_headers.get("X-Foo") is None
copied = client.copy(set_default_headers={"X-Bar": "Robert"})
assert copied.default_headers["X-Bar"] == "Robert"
with pytest.raises(
ValueError,
match="`default_headers` and `set_default_headers` arguments are mutually exclusive",
):
client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"})
def test_global_region_base_url(self) -> None:
"""Test that global region uses the correct base URL."""
client = AnthropicVertex(region="global", project_id="test-project", access_token="fake-token")
assert str(client.base_url).rstrip("/") == "https://aiplatform.googleapis.com/v1"
@pytest.mark.parametrize("region", ["us-central1", "europe-west1", "asia-southeast1"])
def test_regional_base_url(self, region: str) -> None:
"""Test that regional endpoints use the correct base URL format."""
client = AnthropicVertex(region=region, project_id="test-project", access_token="fake-token")
expected_url = f"https://{region}-aiplatform.googleapis.com/v1"
assert str(client.base_url).rstrip("/") == expected_url
def test_env_var_base_url_override(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that ANTHROPIC_VERTEX_BASE_URL environment variable does not override client arg."""
test_url = "https://custom-endpoint.googleapis.com/v1"
monkeypatch.setenv("ANTHROPIC_VERTEX_BASE_URL", test_url)
client = AnthropicVertex(
region="global", # we expect this to get ignored since the user is providing a base_url
project_id="test-project",
access_token="fake-token",
base_url="https://test.googleapis.com/v1",
)
assert str(client.base_url).rstrip("/") == "https://test.googleapis.com/v1"
| TestAnthropicVertex |
python | huggingface__transformers | src/transformers/testing_utils.py | {
"start": 83434,
"end": 95657
} | class ____:
"""
Helper class that will count all requests made online.
Might not be robust if urllib3 changes its logging format but should be good enough for us.
Usage:
```py
with RequestCounter() as counter:
_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
assert counter["GET"] == 0
assert counter["HEAD"] == 1
assert counter.total_calls == 1
```
"""
def __enter__(self):
self._counter = defaultdict(int)
self._thread_id = threading.get_ident()
self._extra_info = []
def patched_with_thread_info(func):
def wrap(*args, **kwargs):
self._extra_info.append(threading.get_ident())
return func(*args, **kwargs)
return wrap
self.patcher = patch.object(
urllib3.connectionpool.log, "debug", side_effect=patched_with_thread_info(urllib3.connectionpool.log.debug)
)
self.mock = self.patcher.start()
return self
def __exit__(self, *args, **kwargs) -> None:
assert len(self.mock.call_args_list) == len(self._extra_info)
for thread_id, call in zip(self._extra_info, self.mock.call_args_list):
if thread_id != self._thread_id:
continue
# code 307: the URL being requested by the user has moved to a temporary location
if call.args[-2] == 307:
continue
log = call.args[0] % call.args[1:]
for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
if method in log:
self._counter[method] += 1
break
self.patcher.stop()
def __getitem__(self, key: str) -> int:
return self._counter[key]
@property
def total_calls(self) -> int:
return sum(self._counter.values())
def is_flaky(max_attempts: int = 5, wait_before_retry: float | None = None, description: str | None = None):
"""
To decorate flaky tests. They will be retried on failures.
Please note that our push tests use `pytest-rerunfailures`, which prompts the CI to rerun certain types of
failed tests. More specifically, if the test exception contains any substring in `FLAKY_TEST_FAILURE_PATTERNS`
(in `.circleci/create_circleci_config.py`), it will be rerun. If you find a recurrent pattern of failures,
expand `FLAKY_TEST_FAILURE_PATTERNS` in our CI configuration instead of using `is_flaky`.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*):
If provided, will wait that number of seconds before retrying the test.
description (`str`, *optional*):
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
etc.)
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
except Exception as err:
logger.error(f"Test failed with {err} at try {retry_count}/{max_attempts}.")
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return unittest.skipUnless(_run_flaky_tests, "test is flaky")(wrapper)
return decorator
def hub_retry(max_attempts: int = 5, wait_before_retry: float | None = 2):
"""
To decorate tests that download from the Hub. They can fail due to a
variety of network issues such as timeouts, connection resets, etc.
Args:
max_attempts (`int`, *optional*, defaults to 5):
The maximum number of attempts to retry the flaky test.
wait_before_retry (`float`, *optional*, defaults to 2):
If provided, will wait that number of seconds before retrying the test.
"""
def decorator(test_func_ref):
@functools.wraps(test_func_ref)
def wrapper(*args, **kwargs):
retry_count = 1
while retry_count < max_attempts:
try:
return test_func_ref(*args, **kwargs)
# We catch all exceptions related to network issues from httpx
except (
httpx.HTTPError,
httpx.RequestError,
httpx.TimeoutException,
httpx.ReadTimeout,
httpx.ConnectError,
httpx.NetworkError,
) as err:
logger.error(
f"Test failed with {err} at try {retry_count}/{max_attempts} as it couldn't connect to the specified Hub repository."
)
if wait_before_retry is not None:
time.sleep(wait_before_retry)
retry_count += 1
return test_func_ref(*args, **kwargs)
return wrapper
return decorator
def run_first(test_case):
"""
Decorator marking a test with order(1). When pytest-order plugin is installed, tests marked with this decorator
are guaranteed to run first.
This is especially useful in some test settings like on a Gaudi instance where a Gaudi device can only be used by a
single process at a time. So we make sure all tests that run in a subprocess are launched first, to avoid device
allocation conflicts.
"""
import pytest
return pytest.mark.order(1)(test_case)
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
"""
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
Args:
test_case (`unittest.TestCase`):
The test that will run `target_func`.
target_func (`Callable`):
The function implementing the actual testing logic.
inputs (`dict`, *optional*, defaults to `None`):
The inputs that will be passed to `target_func` through an (input) queue.
timeout (`int`, *optional*, defaults to `None`):
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
"""
if timeout is None:
timeout = int(os.environ.get("PYTEST_TIMEOUT", "600"))
start_methohd = "spawn"
ctx = multiprocessing.get_context(start_methohd)
input_queue = ctx.Queue(1)
output_queue = ctx.JoinableQueue(1)
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
input_queue.put(inputs, timeout=timeout)
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
process.start()
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
# the test to exit properly.
try:
results = output_queue.get(timeout=timeout)
output_queue.task_done()
except Exception as e:
process.terminate()
test_case.fail(e)
process.join(timeout=timeout)
if results["error"] is not None:
test_case.fail(f"{results['error']}")
def run_test_using_subprocess(func):
"""
To decorate a test to run in a subprocess using the `subprocess` module. This could avoid potential GPU memory
issues (GPU OOM or a test that causes many subsequential failing with `CUDA error: device-side assert triggered`).
"""
import pytest
@functools.wraps(func)
def wrapper(*args, **kwargs):
if os.getenv("_INSIDE_SUB_PROCESS", None) == "1":
func(*args, **kwargs)
else:
test = " ".join(os.environ.get("PYTEST_CURRENT_TEST").split(" ")[:-1])
try:
env = copy.deepcopy(os.environ)
env["_INSIDE_SUB_PROCESS"] = "1"
# This prevents the entries in `short test summary info` given by the subprocess being truncated. so the
# full information can be passed to the parent pytest process.
# See: https://docs.pytest.org/en/stable/explanation/ci.html
env["CI"] = "true"
# If not subclass of `unitTest.TestCase` and `pytestconfig` is used: try to grab and use the arguments
if "pytestconfig" in kwargs:
command = list(kwargs["pytestconfig"].invocation_params.args)
for idx, x in enumerate(command):
if x in kwargs["pytestconfig"].args:
test = test.split("::")[1:]
command[idx] = "::".join([f"{func.__globals__['__file__']}"] + test)
command = [f"{sys.executable}", "-m", "pytest"] + command
command = [x for x in command if x != "--no-summary"]
# Otherwise, simply run the test with no option at all
else:
command = [f"{sys.executable}", "-m", "pytest", f"{test}"]
subprocess.run(command, env=env, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
exception_message = e.stdout.decode()
lines = exception_message.split("\n")
# Add a first line with more informative information instead of just `= test session starts =`.
# This makes the `short test summary info` section more useful.
if "= test session starts =" in lines[0]:
text = ""
for line in lines[1:]:
if line.startswith("FAILED "):
text = line[len("FAILED ") :]
text = "".join(text.split(" - ")[1:])
elif line.startswith("=") and line.endswith("=") and " failed in " in line:
break
elif len(text) > 0:
text += f"\n{line}"
text = "(subprocess) " + text
lines = [text] + lines
exception_message = "\n".join(lines)
raise pytest.fail(exception_message, pytrace=False)
return wrapper
"""
The following contains utils to run the documentation tests without having to overwrite any files.
The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
made as a print would otherwise fail the corresponding line.
To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
"""
def preprocess_string(string, skip_cuda_tests):
"""Prepare a docstring or a `.md` file to be run by doctest.
The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
`string`.
"""
codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )(.*?```)"
codeblocks = re.split(codeblock_pattern, string, flags=re.DOTALL)
is_cuda_found = False
for i, codeblock in enumerate(codeblocks):
if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
if (
(">>>" in codeblock or "..." in codeblock)
and re.search(r"cuda|to\(0\)|device=0", codeblock)
and skip_cuda_tests
):
is_cuda_found = True
break
modified_string = ""
if not is_cuda_found:
modified_string = "".join(codeblocks)
return modified_string
| RequestCounter |
python | coleifer__peewee | tests/db_tests.py | {
"start": 33406,
"end": 34057
} | class ____(BaseTestCase):
def test_model_property(self):
database = get_in_memory_db()
class M1(database.Model): pass
class M2(database.Model): pass
class CM1(M1): pass
for M in (M1, M2, CM1):
self.assertTrue(M._meta.database is database)
def test_model_property_on_proxy(self):
db = DatabaseProxy()
class M1(db.Model): pass
class M2(db.Model): pass
class CM1(M1): pass
test_db = get_in_memory_db()
db.initialize(test_db)
for M in (M1, M2, CM1):
self.assertEqual(M._meta.database.database, ':memory:')
| TestModelPropertyHelper |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 124516,
"end": 125508
} | class ____:
@mock.patch("google.cloud.aiplatform_v1.types.PipelineJob.to_dict")
@mock.patch(VERTEX_AI_PATH.format("pipeline_job.PipelineJobHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = GetPipelineJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
pipeline_job_id=TEST_PIPELINE_JOB_ID,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.get_pipeline_job.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
pipeline_job_id=TEST_PIPELINE_JOB_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestVertexAIGetPipelineJobOperator |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py | {
"start": 14532,
"end": 17048
} | class ____(AwsBaseOperator[EmrHook]):
"""
An operator that stops a running EMR notebook execution.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrStopNotebookExecutionOperator`
:param notebook_execution_id: The unique identifier of the notebook execution.
:param wait_for_completion: If True, the operator will wait for the notebook.
to be in a STOPPED or FINISHED state. Defaults to False.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param waiter_max_attempts: Maximum number of tries before failing.
:param waiter_delay: Number of seconds between polling the state of the notebook.
"""
aws_hook_class = EmrHook
template_fields: Sequence[str] = aws_template_fields(
"notebook_execution_id",
"waiter_delay",
"waiter_max_attempts",
)
def __init__(
self,
notebook_execution_id: str,
wait_for_completion: bool = False,
waiter_max_attempts: int | None = None,
waiter_delay: int | None = None,
**kwargs: Any,
):
super().__init__(**kwargs)
self.notebook_execution_id = notebook_execution_id
self.wait_for_completion = wait_for_completion
self.waiter_max_attempts = waiter_max_attempts or 25
self.waiter_delay = waiter_delay or 60
def execute(self, context: Context) -> None:
self.hook.conn.stop_notebook_execution(NotebookExecutionId=self.notebook_execution_id)
if self.wait_for_completion:
self.hook.get_waiter("notebook_stopped").wait(
NotebookExecutionId=self.notebook_execution_id,
WaiterConfig=prune_dict(
{
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
}
),
)
| EmrStopNotebookExecutionOperator |
python | PyCQA__pycodestyle | testing/data/E30not.py | {
"start": 8,
"end": 87
} | class ____:
pass
#: Okay
def foo():
pass
#: Okay
# -*- coding: utf-8 -*-
| X |
python | simplejson__simplejson | simplejson/tests/test_iterable.py | {
"start": 269,
"end": 1390
} | class ____(unittest.TestCase):
def test_iterable(self):
for l in ([], [1], [1, 2], [1, 2, 3]):
for opts in [{}, {'indent': 2}]:
for dumps in (json.dumps, iter_dumps, sio_dump):
expect = dumps(l, **opts)
default_expect = dumps(sum(l), **opts)
# Default is False
self.assertRaises(TypeError, dumps, iter(l), **opts)
self.assertRaises(TypeError, dumps, iter(l), iterable_as_array=False, **opts)
self.assertEqual(expect, dumps(iter(l), iterable_as_array=True, **opts))
# Ensure that the "default" gets called
self.assertEqual(default_expect, dumps(iter(l), default=sum, **opts))
self.assertEqual(default_expect, dumps(iter(l), iterable_as_array=False, default=sum, **opts))
# Ensure that the "default" does not get called
self.assertEqual(
expect,
dumps(iter(l), iterable_as_array=True, default=sum, **opts))
| TestIterable |
python | fluentpython__example-code-2e | 13-protocol-abc/typing/randompickload.py | {
"start": 110,
"end": 218
} | class ____(RandomPicker, Protocol): # <2>
def load(self, Iterable) -> None: ... # <3>
| LoadableRandomPicker |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 938387,
"end": 938791
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("RepositoryInvitation", graphql_name="node")
"""The item at the end of the edge."""
| RepositoryInvitationEdge |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/legacy_storage.py | {
"start": 6142,
"end": 14755
} | class ____(RunStorage, ConfigurableClass):
def __init__(self, storage: DagsterStorage, inst_data: Optional[ConfigurableClassData] = None):
self._storage = check.inst_param(storage, "storage", DagsterStorage)
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
super().__init__()
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@property
def _instance(self) -> Optional["DagsterInstance"]: # pyright: ignore[reportIncompatibleMethodOverride]
return self._storage._instance # noqa: SLF001
def register_instance(self, instance: "DagsterInstance") -> None:
if not self._storage.has_instance:
self._storage.register_instance(instance)
@classmethod
def config_type(cls) -> UserConfigSchema:
return {
"module_name": str,
"class_name": str,
"config_yaml": str,
}
@classmethod
def from_config_value(
cls, inst_data: Optional[ConfigurableClassData], config_value: Mapping[str, str]
) -> "LegacyRunStorage":
storage = ConfigurableClassData(
module_name=config_value["module_name"],
class_name=config_value["class_name"],
config_yaml=config_value["config_yaml"],
).rehydrate(as_type=DagsterStorage)
return LegacyRunStorage(storage, inst_data=inst_data)
def add_run(self, dagster_run: "DagsterRun") -> "DagsterRun":
return self._storage.run_storage.add_run(dagster_run)
def add_historical_run(
self, dagster_run: "DagsterRun", run_creation_time: datetime
) -> "DagsterRun":
return self._storage.run_storage.add_historical_run(dagster_run, run_creation_time)
def handle_run_event(
self, run_id: str, event: "DagsterEvent", update_timestamp: Optional[datetime] = None
) -> None:
return self._storage.run_storage.handle_run_event(run_id, event, update_timestamp)
def get_runs( # pyright: ignore[reportIncompatibleMethodOverride]
self,
filters: Optional["RunsFilter"] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None,
bucket_by: Optional[Union["JobBucket", "TagBucket"]] = None,
ascending: bool = False,
) -> Iterable["DagsterRun"]:
return self._storage.run_storage.get_runs(filters, cursor, limit, bucket_by, ascending)
def get_run_ids( # pyright: ignore[reportIncompatibleMethodOverride]
self,
filters: Optional["RunsFilter"] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None,
) -> Iterable[str]:
return self._storage.run_storage.get_run_ids(filters, cursor=cursor, limit=limit)
def get_runs_count(self, filters: Optional["RunsFilter"] = None) -> int:
return self._storage.run_storage.get_runs_count(filters)
def get_run_group(self, run_id: str) -> Optional[tuple[str, Iterable["DagsterRun"]]]: # pyright: ignore[reportIncompatibleMethodOverride]
return self._storage.run_storage.get_run_group(run_id)
def get_run_records(
self,
filters: Optional["RunsFilter"] = None,
limit: Optional[int] = None,
order_by: Optional[str] = None,
ascending: bool = False,
cursor: Optional[str] = None,
bucket_by: Optional[Union["JobBucket", "TagBucket"]] = None,
) -> Sequence["RunRecord"]:
return self._storage.run_storage.get_run_records(
filters, limit, order_by, ascending, cursor, bucket_by
)
def get_run_tags(
self,
tag_keys: Sequence[str],
value_prefix: Optional[str] = None,
limit: Optional[int] = None,
) -> Sequence[tuple[str, set[str]]]:
return self._storage.run_storage.get_run_tags(tag_keys, value_prefix, limit)
def get_run_tag_keys(self) -> Sequence[str]:
return self._storage.run_storage.get_run_tag_keys()
def add_run_tags(self, run_id: str, new_tags: Mapping[str, str]):
return self._storage.run_storage.add_run_tags(run_id, new_tags)
def has_run(self, run_id: str) -> bool:
return self._storage.run_storage.has_run(run_id)
def add_snapshot(
self,
snapshot: Union["JobSnap", "ExecutionPlanSnapshot"],
) -> None:
return self._storage.run_storage.add_snapshot(snapshot)
def has_snapshot(self, snapshot_id: str) -> bool:
return self._storage.run_storage.has_snapshot(snapshot_id)
def has_job_snapshot(self, job_snapshot_id: str) -> bool:
return self._storage.run_storage.has_job_snapshot(job_snapshot_id)
def add_job_snapshot(self, job_snapshot: "JobSnap") -> str:
return self._storage.run_storage.add_job_snapshot(job_snapshot)
def get_job_snapshot(self, job_snapshot_id: str) -> "JobSnap":
return self._storage.run_storage.get_job_snapshot(job_snapshot_id)
def has_execution_plan_snapshot(self, execution_plan_snapshot_id: str) -> bool:
return self._storage.run_storage.has_execution_plan_snapshot(execution_plan_snapshot_id)
def add_execution_plan_snapshot(self, execution_plan_snapshot: "ExecutionPlanSnapshot") -> str:
return self._storage.run_storage.add_execution_plan_snapshot(execution_plan_snapshot)
def get_execution_plan_snapshot(
self, execution_plan_snapshot_id: str
) -> "ExecutionPlanSnapshot":
return self._storage.run_storage.get_execution_plan_snapshot(execution_plan_snapshot_id)
def wipe(self) -> None:
return self._storage.run_storage.wipe()
def delete_run(self, run_id: str) -> None:
return self._storage.run_storage.delete_run(run_id)
def migrate(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:
return self._storage.run_storage.migrate(print_fn, force_rebuild_all)
def optimize(self, print_fn: Optional[PrintFn] = None, force_rebuild_all: bool = False) -> None:
return self._storage.run_storage.optimize(print_fn, force_rebuild_all)
def dispose(self) -> None:
return self._storage.run_storage.dispose()
def optimize_for_webserver(
self, statement_timeout: int, pool_recycle: int, max_overflow: int
) -> None:
return self._storage.run_storage.optimize_for_webserver(
statement_timeout, pool_recycle, max_overflow
)
def add_daemon_heartbeat(self, daemon_heartbeat: "DaemonHeartbeat") -> None:
return self._storage.run_storage.add_daemon_heartbeat(daemon_heartbeat)
def get_daemon_heartbeats(self) -> Mapping[str, "DaemonHeartbeat"]:
return self._storage.run_storage.get_daemon_heartbeats()
def wipe_daemon_heartbeats(self) -> None:
return self._storage.run_storage.wipe_daemon_heartbeats()
def get_backfills(
self,
filters: Optional["BulkActionsFilter"] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None,
status: Optional["BulkActionStatus"] = None,
) -> Sequence["PartitionBackfill"]:
return self._storage.run_storage.get_backfills(
cursor=cursor, limit=limit, filters=filters, status=status
)
def get_backfills_count(self, filters: Optional["BulkActionsFilter"] = None) -> int:
return self._storage.run_storage.get_backfills_count(filters=filters)
def get_backfill(self, backfill_id: str) -> Optional["PartitionBackfill"]:
return self._storage.run_storage.get_backfill(backfill_id)
def add_backfill(self, partition_backfill: "PartitionBackfill") -> None:
return self._storage.run_storage.add_backfill(partition_backfill)
def update_backfill(self, partition_backfill: "PartitionBackfill") -> None:
return self._storage.run_storage.update_backfill(partition_backfill)
def get_run_partition_data(self, runs_filter: "RunsFilter") -> Sequence["RunPartitionData"]:
return self._storage.run_storage.get_run_partition_data(runs_filter)
def get_cursor_values(self, keys: set[str]) -> Mapping[str, str]:
return self._storage.run_storage.get_cursor_values(keys)
def set_cursor_values(self, pairs: Mapping[str, str]) -> None:
return self._storage.run_storage.set_cursor_values(pairs)
def replace_job_origin(self, run: "DagsterRun", job_origin: "RemoteJobOrigin") -> None:
return self._storage.run_storage.replace_job_origin(run, job_origin)
def alembic_version(self) -> Optional[AlembicVersion]:
return self._storage.run_storage.alembic_version()
| LegacyRunStorage |
python | getsentry__sentry | tests/sentry/monitors/endpoints/test_project_processing_errors_details.py | {
"start": 299,
"end": 1762
} | class ____(MonitorTestCase, APITestCase):
endpoint = "sentry-api-0-project-processing-errors-details"
method = "delete"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def test_empty(self) -> None:
self.get_error_response(self.organization.slug, self.project.slug, "hi")
def test(self) -> None:
monitor_error = build_checkin_processing_error(
[{"type": ProcessingErrorType.CHECKIN_INVALID_GUID}],
message_overrides={"project_id": self.project.id},
)
store_error(monitor_error, None)
assert len(get_errors_for_projects([self.project])) == 1
self.get_success_response(self.organization.slug, self.project.slug, monitor_error.id)
assert len(get_errors_for_projects([self.project])) == 0
def test_invalid_project(self) -> None:
monitor_error = build_checkin_processing_error(
[{"type": ProcessingErrorType.CHECKIN_INVALID_GUID}],
message_overrides={"project_id": self.project.id},
)
unrelated_project = self.create_project()
store_error(monitor_error, None)
assert len(get_errors_for_projects([self.project])) == 1
self.get_error_response(
self.organization.slug, unrelated_project.slug, monitor_error.id, status_code=400
)
assert len(get_errors_for_projects([self.project])) == 1
| ProjectProcessingErrorsDetailsEndpointTest |
python | django__django | tests/model_fields/test_imagefield.py | {
"start": 3565,
"end": 7477
} | class ____(ImageFieldTestMixin, TestCase):
"""
Tests for ImageField that don't need to be run with each of the
different test model classes.
"""
def test_equal_notequal_hash(self):
"""
Bug #9786: Ensure '==' and '!=' work correctly.
Bug #9508: make sure hash() works as expected (equal items must
hash to the same value).
"""
# Create two Persons with different mugshots.
p1 = self.PersonModel(name="Joe")
p1.mugshot.save("mug", self.file1)
p2 = self.PersonModel(name="Bob")
p2.mugshot.save("mug", self.file2)
self.assertIs(p1.mugshot == p2.mugshot, False)
self.assertIs(p1.mugshot != p2.mugshot, True)
# Test again with an instance fetched from the db.
p1_db = self.PersonModel.objects.get(name="Joe")
self.assertIs(p1_db.mugshot == p2.mugshot, False)
self.assertIs(p1_db.mugshot != p2.mugshot, True)
# Instance from db should match the local instance.
self.assertIs(p1_db.mugshot == p1.mugshot, True)
self.assertEqual(hash(p1_db.mugshot), hash(p1.mugshot))
self.assertIs(p1_db.mugshot != p1.mugshot, False)
def test_instantiate_missing(self):
"""
If the underlying file is unavailable, still create instantiate the
object without error.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
p = self.PersonModel.objects.get(name="Joan")
path = p.mugshot.path
shutil.move(path, path + ".moved")
self.PersonModel.objects.get(name="Joan")
def test_delete_when_missing(self):
"""
Bug #8175: correctly delete an object where the file no longer
exists on the file system.
"""
p = self.PersonModel(name="Fred")
p.mugshot.save("shot", self.file1)
os.remove(p.mugshot.path)
p.delete()
def test_size_method(self):
"""
Bug #8534: FileField.size should not leave the file open.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
# Get a "clean" model instance
p = self.PersonModel.objects.get(name="Joan")
# It won't have an opened file.
self.assertIs(p.mugshot.closed, True)
# After asking for the size, the file should still be closed.
p.mugshot.size
self.assertIs(p.mugshot.closed, True)
def test_pickle(self):
"""
ImageField can be pickled, unpickled, and that the image of
the unpickled version is the same as the original.
"""
import pickle
p = Person(name="Joe")
p.mugshot.save("mug", self.file1)
dump = pickle.dumps(p)
loaded_p = pickle.loads(dump)
self.assertEqual(p.mugshot, loaded_p.mugshot)
self.assertEqual(p.mugshot.url, loaded_p.mugshot.url)
self.assertEqual(p.mugshot.storage, loaded_p.mugshot.storage)
self.assertEqual(p.mugshot.instance, loaded_p.mugshot.instance)
self.assertEqual(p.mugshot.field, loaded_p.mugshot.field)
mugshot_dump = pickle.dumps(p.mugshot)
loaded_mugshot = pickle.loads(mugshot_dump)
self.assertEqual(p.mugshot, loaded_mugshot)
self.assertEqual(p.mugshot.url, loaded_mugshot.url)
self.assertEqual(p.mugshot.storage, loaded_mugshot.storage)
self.assertEqual(p.mugshot.instance, loaded_mugshot.instance)
self.assertEqual(p.mugshot.field, loaded_mugshot.field)
def test_defer(self):
self.PersonModel.objects.create(name="Joe", mugshot=self.file1)
with self.assertNumQueries(1):
qs = list(self.PersonModel.objects.defer("mugshot"))
with self.assertNumQueries(0):
self.assertEqual(qs[0].name, "Joe")
@skipIf(Image is None, "Pillow is required to test ImageField")
| ImageFieldTests |
python | wandb__wandb | wandb/sdk/data_types/table.py | {
"start": 689,
"end": 774
} | class ____:
def set_table(self, table):
self._table = table
| _TableLinkMixin |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 83273,
"end": 83505
} | class ____(ExecutionControlCommandBase, PythonStepperMixin):
"Step through Python code."
stepinto = True
@dont_suppress_errors
def invoke(self, args, from_tty):
self.python_step(stepinto=self.stepinto)
| PyStep |
python | allegroai__clearml | clearml/utilities/py3_interop.py | {
"start": 184,
"end": 1386
} | class ____(object):
"""An abstract base class for context managers. Supported in contextlib from python 3.6 and up"""
def __enter__(self) -> "AbstractContextManager":
"""Return `self` upon entering the runtime context."""
return self
@abc.abstractmethod
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
"""Raise any exception triggered within the runtime context."""
return None
@classmethod
def __subclasshook__(cls, C: type) -> Optional[bool]:
if cls is AbstractContextManager:
if any("__enter__" in B.__dict__ for B in C.__mro__) and any("__exit__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
try:
from abc import abstractclassmethod
except ImportError:
class abstractclassmethod(classmethod):
__isabstractmethod__ = True
def __init__(self, callable: callable) -> None:
callable.__isabstractmethod__ = True
super(abstractclassmethod, self).__init__(callable)
| AbstractContextManager |
python | doocs__leetcode | solution/3500-3599/3506.Find Time Required to Eliminate Bacterial Strains/Solution.py | {
"start": 0,
"end": 265
} | class ____:
def minEliminationTime(self, timeReq: List[int], splitTime: int) -> int:
heapify(timeReq)
while len(timeReq) > 1:
heappop(timeReq)
heappush(timeReq, heappop(timeReq) + splitTime)
return timeReq[0]
| Solution |
python | networkx__networkx | networkx/utils/heaps.py | {
"start": 161,
"end": 3216
} | class ____:
"""Base class for min-heaps.
A MinHeap stores a collection of key-value pairs ordered by their values.
It supports querying the minimum pair, inserting a new pair, decreasing the
value in an existing pair and deleting the minimum pair.
"""
class _Item:
"""Used by subclassess to represent a key-value pair."""
__slots__ = ("key", "value")
def __init__(self, key, value):
self.key = key
self.value = value
def __repr__(self):
return repr((self.key, self.value))
def __init__(self):
"""Initialize a new min-heap."""
self._dict = {}
def min(self):
"""Query the minimum key-value pair.
Returns
-------
key, value : tuple
The key-value pair with the minimum value in the heap.
Raises
------
NetworkXError
If the heap is empty.
"""
raise NotImplementedError
def pop(self):
"""Delete the minimum pair in the heap.
Returns
-------
key, value : tuple
The key-value pair with the minimum value in the heap.
Raises
------
NetworkXError
If the heap is empty.
"""
raise NotImplementedError
def get(self, key, default=None):
"""Returns the value associated with a key.
Parameters
----------
key : hashable object
The key to be looked up.
default : object
Default value to return if the key is not present in the heap.
Default value: None.
Returns
-------
value : object.
The value associated with the key.
"""
raise NotImplementedError
def insert(self, key, value, allow_increase=False):
"""Insert a new key-value pair or modify the value in an existing
pair.
Parameters
----------
key : hashable object
The key.
value : object comparable with existing values.
The value.
allow_increase : bool
Whether the value is allowed to increase. If False, attempts to
increase an existing value have no effect. Default value: False.
Returns
-------
decreased : bool
True if a pair is inserted or the existing value is decreased.
"""
raise NotImplementedError
def __nonzero__(self):
"""Returns whether the heap if empty."""
return bool(self._dict)
def __bool__(self):
"""Returns whether the heap if empty."""
return bool(self._dict)
def __len__(self):
"""Returns the number of key-value pairs in the heap."""
return len(self._dict)
def __contains__(self, key):
"""Returns whether a key exists in the heap.
Parameters
----------
key : any hashable object.
The key to be looked up.
"""
return key in self._dict
| MinHeap |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 23544,
"end": 23862
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
skews = helper_functions.get_value("Skewnesses")
maximum = np.nanmax(skews) if len(skews) > 0 else 0
return maximum if np.isfinite(maximum) else 0
@metafeatures.define("SkewnessMean", dependency="Skewnesses")
| SkewnessMax |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/apple/tests.py | {
"start": 4276,
"end": 9827
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = AppleProvider.id
def get_apple_id_token_payload(self):
now = int(time.time())
return {
"iss": "https://appleid.apple.com",
"aud": "app123id", # Matches `setup_app`
"exp": now + 60 * 60,
"iat": now,
"sub": "000313.c9720f41e9434e18987a.1218",
"at_hash": "CkaUPjk4MJinaAq6Z0tGUA",
"email": "test@privaterelay.appleid.com",
"email_verified": "true",
"is_private_email": "true",
"auth_time": 1234345345, # not converted automatically by pyjwt
}
def test_verify_token(self):
id_token = sign_id_token(self.get_apple_id_token_payload())
with mocked_response(self.get_mocked_response()):
sociallogin = self.provider.verify_token(None, {"id_token": id_token})
assert sociallogin.user.email == "test@privaterelay.appleid.com"
def get_login_response_json(self, with_refresh_token=True):
"""
`with_refresh_token` is not optional for apple, so it's ignored.
"""
id_token = sign_id_token(self.get_apple_id_token_payload())
return json.dumps(
{
"access_token": "testac", # Matches OAuth2TestsMixin value
"expires_in": 3600,
"id_token": id_token,
"refresh_token": "testrt", # Matches OAuth2TestsMixin value
"token_type": "Bearer",
}
)
def get_mocked_response(self):
"""
Apple is unusual in that the `id_token` contains all the user info
so no profile info request is made. However, it does need the
public key verification, so this mocked response is the public
key request in order to verify the authenticity of the id_token.
"""
return MockedResponse(
HTTPStatus.OK, KEY_SERVER_RESP_JSON, {"content-type": "application/json"}
)
def get_expected_to_str(self):
return "A B"
def get_complete_parameters(self, auth_request_params):
"""
Add apple specific response parameters which they include in the
form_post response.
https://developer.apple.com/documentation/sign_in_with_apple/sign_in_with_apple_js/incorporating_sign_in_with_apple_into_other_platforms
"""
params = super().get_complete_parameters(auth_request_params)
params.update(
{
"id_token": sign_id_token(self.get_apple_id_token_payload()),
"user": json.dumps(
{
"email": "private@appleid.apple.com",
"name": {
"firstName": "A",
"lastName": "B",
},
}
),
}
)
return params
def login(self, resp_mock, process="login", with_refresh_token=True):
resp = self.client.post(
reverse(self.provider.id + "_login")
+ "?"
+ urlencode(dict(process=process))
)
p = urlparse(resp["location"])
q = parse_qs(p.query)
complete_url = reverse(self.provider.id + "_callback")
self.assertGreater(q["redirect_uri"][0].find(complete_url), 0)
response_json = self.get_login_response_json(
with_refresh_token=with_refresh_token
)
with mocked_response(
MockedResponse(
HTTPStatus.OK, response_json, {"content-type": "application/json"}
),
resp_mock,
):
resp = self.client.post(
complete_url,
data=self.get_complete_parameters(q),
)
assert reverse("apple_finish_callback") in resp.url
# Follow the redirect
resp = self.client.get(resp.url)
return resp
def test_authentication_error(self):
"""Override base test because apple posts errors"""
resp = self.client.post(
reverse(self.provider.id + "_callback"),
data={"error": "misc", "state": "testingstate123"},
)
assert reverse("apple_finish_callback") in resp.url
# Follow the redirect
resp = self.client.get(resp.url)
self.assertTemplateUsed(
resp,
"socialaccount/authentication_error.%s"
% getattr(settings, "ACCOUNT_TEMPLATE_EXTENSION", "html"),
)
def test_apple_finish(self):
resp = self.login(self.get_mocked_response())
# Check request generating the response
finish_url = reverse("apple_finish_callback")
self.assertEqual(resp.request["PATH_INFO"], finish_url)
self.assertTrue("state" in resp.request["QUERY_STRING"])
self.assertTrue("code" in resp.request["QUERY_STRING"])
# Check have cookie containing apple session
self.assertTrue(APPLE_SESSION_COOKIE_NAME in self.client.cookies)
# Session should have been cleared
apple_session_cookie = self.client.cookies.get(APPLE_SESSION_COOKIE_NAME)
engine = import_module(settings.SESSION_ENGINE)
SessionStore = engine.SessionStore
apple_login_session = SessionStore(apple_session_cookie.value)
self.assertEqual(len(apple_login_session.keys()), 0)
# Check cookie path was correctly set
self.assertEqual(apple_session_cookie.get("path"), finish_url)
| AppleTests |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_south_dakota_zip.py | {
"start": 1782,
"end": 4155
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid South Dakota zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_south_dakota_zip": ["57015", "57379", "57799", "57420"],
"invalid_south_dakota_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_south_dakota_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_south_dakota_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_south_dakota_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidSouthDakotaZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidSouthDakotaZip |
python | python__mypy | mypy/semanal_shared.py | {
"start": 10908,
"end": 15795
} | class ____(BoolTypeQuery):
def __init__(self) -> None:
super().__init__(ANY_STRATEGY)
def visit_placeholder_type(self, t: PlaceholderType) -> bool:
return True
def has_placeholder(typ: Type) -> bool:
"""Check if a type contains any placeholder types (recursively)."""
return typ.accept(HasPlaceholders())
def find_dataclass_transform_spec(node: Node | None) -> DataclassTransformSpec | None:
"""
Find the dataclass transform spec for the given node, if any exists.
Per PEP 681 (https://peps.python.org/pep-0681/#the-dataclass-transform-decorator), dataclass
transforms can be specified in multiple ways, including decorator functions and
metaclasses/base classes. This function resolves the spec from any of these variants.
"""
# The spec only lives on the function/class definition itself, so we need to unwrap down to that
# point
if isinstance(node, CallExpr):
# Like dataclasses.dataclass, transform-based decorators can be applied either with or
# without parameters; ie, both of these forms are accepted:
#
# @typing.dataclass_transform
# class Foo: ...
# @typing.dataclass_transform(eq=True, order=True, ...)
# class Bar: ...
#
# We need to unwrap the call for the second variant.
node = node.callee
if isinstance(node, RefExpr):
node = node.node
if isinstance(node, Decorator):
# typing.dataclass_transform usage must always result in a Decorator; it always uses the
# `@dataclass_transform(...)` syntax and never `@dataclass_transform`
node = node.func
if isinstance(node, OverloadedFuncDef):
# The dataclass_transform decorator may be attached to any single overload, so we must
# search them all.
# Note that using more than one decorator is undefined behavior, so we can just take the
# first that we find.
for candidate in node.items:
spec = find_dataclass_transform_spec(candidate)
if spec is not None:
return spec
return find_dataclass_transform_spec(node.impl)
# For functions, we can directly consult the AST field for the spec
if isinstance(node, FuncDef):
return node.dataclass_transform_spec
if isinstance(node, ClassDef):
node = node.info
if isinstance(node, TypeInfo):
# Search all parent classes to see if any are decorated with `typing.dataclass_transform`
for base in node.mro[1:]:
if base.dataclass_transform_spec is not None:
return base.dataclass_transform_spec
# Check if there is a metaclass that is decorated with `typing.dataclass_transform`
#
# Note that PEP 681 only discusses using a metaclass that is directly decorated with
# `typing.dataclass_transform`; subclasses thereof should be treated with dataclass
# semantics rather than as transforms:
#
# > If dataclass_transform is applied to a class, dataclass-like semantics will be assumed
# > for any class that directly or indirectly derives from the decorated class or uses the
# > decorated class as a metaclass.
#
# The wording doesn't make this entirely explicit, but Pyright (the reference
# implementation for this PEP) only handles directly-decorated metaclasses.
metaclass_type = node.metaclass_type
if metaclass_type is not None and metaclass_type.type.dataclass_transform_spec is not None:
return metaclass_type.type.dataclass_transform_spec
return None
# Never returns `None` if a default is given
@overload
def require_bool_literal_argument(
api: SemanticAnalyzerInterface | SemanticAnalyzerPluginInterface,
expression: Expression,
name: str,
default: Literal[True, False],
) -> bool: ...
@overload
def require_bool_literal_argument(
api: SemanticAnalyzerInterface | SemanticAnalyzerPluginInterface,
expression: Expression,
name: str,
default: None = None,
) -> bool | None: ...
def require_bool_literal_argument(
api: SemanticAnalyzerInterface | SemanticAnalyzerPluginInterface,
expression: Expression,
name: str,
default: bool | None = None,
) -> bool | None:
"""Attempt to interpret an expression as a boolean literal, and fail analysis if we can't."""
value = parse_bool(expression)
if value is None:
api.fail(
f'"{name}" argument must be a True or False literal', expression, code=LITERAL_REQ
)
return default
return value
def parse_bool(expr: Expression) -> bool | None:
if isinstance(expr, NameExpr):
if expr.fullname == "builtins.True":
return True
if expr.fullname == "builtins.False":
return False
return None
| HasPlaceholders |
python | django__django | tests/multiple_database/tests.py | {
"start": 88857,
"end": 97533
} | class ____(TestCase):
databases = {"default", "other"}
class WriteCheckRouter:
def db_for_write(self, model, **hints):
raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints)
def override_router(self):
return override_settings(
DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()]
)
def test_fk_delete(self):
owner = Person.objects.create(name="Someone")
pet = Pet.objects.create(name="fido", owner=owner)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
pet.owner.delete()
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {"instance": owner})
def test_reverse_fk_delete(self):
owner = Person.objects.create(name="Someone")
to_del_qs = owner.pet_set.all()
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
to_del_qs.delete()
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {"instance": owner})
def test_reverse_fk_get_or_create(self):
owner = Person.objects.create(name="Someone")
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
owner.pet_set.get_or_create(name="fido")
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {"instance": owner})
def test_reverse_fk_update(self):
owner = Person.objects.create(name="Someone")
Pet.objects.create(name="fido", owner=owner)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
owner.pet_set.update(name="max")
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {"instance": owner})
def test_m2m_add(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
book.authors.add(auth)
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {"instance": book})
def test_m2m_clear(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
book.authors.add(auth)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
book.authors.clear()
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {"instance": book})
def test_m2m_delete(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
book.authors.add(auth)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
book.authors.all().delete()
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {"instance": book})
def test_m2m_get_or_create(self):
Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
book.authors.get_or_create(name="Someone else")
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {"instance": book})
def test_m2m_remove(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
book.authors.add(auth)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
book.authors.remove(auth)
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {"instance": book})
def test_m2m_update(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
book.authors.add(auth)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
book.authors.update(name="Different")
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {"instance": book})
def test_reverse_m2m_add(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
auth.book_set.add(book)
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {"instance": auth})
def test_reverse_m2m_clear(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
book.authors.add(auth)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
auth.book_set.clear()
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {"instance": auth})
def test_reverse_m2m_delete(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
book.authors.add(auth)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
auth.book_set.all().delete()
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {"instance": auth})
def test_reverse_m2m_get_or_create(self):
auth = Person.objects.create(name="Someone")
Book.objects.create(title="Pro Django", published=datetime.date(2008, 12, 16))
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
auth.book_set.get_or_create(
title="New Book", published=datetime.datetime.now()
)
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {"instance": auth})
def test_reverse_m2m_remove(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
book.authors.add(auth)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
auth.book_set.remove(book)
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {"instance": auth})
def test_reverse_m2m_update(self):
auth = Person.objects.create(name="Someone")
book = Book.objects.create(
title="Pro Django", published=datetime.date(2008, 12, 16)
)
book.authors.add(auth)
with self.assertRaises(RouterUsed) as cm:
with self.override_router():
auth.book_set.update(title="Different")
e = cm.exception
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {"instance": auth})
| RouteForWriteTestCase |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 12964,
"end": 13338
} | class ____(GroupType):
type_id = 1008
slug = "performance_file_io_main_thread"
description = "File IO on Main Thread"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.MOBILE.value
noise_config = NoiseConfig()
default_priority = PriorityLevel.LOW
released = True
@dataclass(frozen=True)
| PerformanceFileIOMainThreadGroupType |
python | pytorch__pytorch | test/quantization/jit/test_ondevice_quantization.py | {
"start": 649,
"end": 935
} | class ____(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.fc1 = torch.nn.Linear(5, 5).float()
self.fc1.weight = weight
self.fc2 = torch.nn.Linear(5, 5).float()
def forward(self, x):
return self.fc2(self.fc1(x))
| myMod |
python | dask__distributed | distributed/tests/test_profile.py | {
"start": 9408,
"end": 9862
} | class ____:
def __init__(self, f_back=None, f_code=None):
self.f_back = self
l = []
self.f_code = l.append
self.f_lineno = 1
def test_builtin():
# https://github.com/dask/distributed/issues/8163
assert identifier(MockFrame()) == "list.append:1"
assert info_frame(MockFrame()) == {
"filename": "<built-in>",
"name": "list.append",
"line_number": 1,
"line": "",
}
| MockFrame |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 302,
"end": 405
} | class ____(BaseModel):
x: float
y: str
model_config = ConfigDict(from_attributes=True)
| Model |
python | ray-project__ray | release/nightly_tests/dataset/image_loader_microbenchmark.py | {
"start": 9458,
"end": 9823
} | class ____(LocalDataset):
def __init__(self, local: str, transforms: Callable) -> None:
super().__init__(local=local)
self.transforms = transforms
def __getitem__(self, idx: int) -> Any:
obj = super().__getitem__(idx)
image = obj["image"]
label = obj["label"]
return self.transforms(image), label
| MosaicDataset |
python | realpython__materials | python-contact-book/source_code_final/rpcontacts/views.py | {
"start": 2921,
"end": 4822
} | class ____(QDialog):
"""Add Contact dialog."""
def __init__(self, parent=None):
"""Initializer."""
super().__init__(parent=parent)
self.setWindowTitle("Add Contact")
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.data = None
self.setupUI()
def setupUI(self):
"""Setup the Add Contact dialog's GUI."""
# Create line edits for data fields
self.nameField = QLineEdit()
self.nameField.setObjectName("Name")
self.jobField = QLineEdit()
self.jobField.setObjectName("Job")
self.emailField = QLineEdit()
self.emailField.setObjectName("Email")
# Lay out the data fields
layout = QFormLayout()
layout.addRow("Name:", self.nameField)
layout.addRow("Job:", self.jobField)
layout.addRow("Email:", self.emailField)
self.layout.addLayout(layout)
# Add standard buttons to the dialog and connect them
self.buttonsBox = QDialogButtonBox(self)
self.buttonsBox.setOrientation(Qt.Horizontal)
self.buttonsBox.setStandardButtons(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel
)
self.buttonsBox.accepted.connect(self.accept)
self.buttonsBox.rejected.connect(self.reject)
self.layout.addWidget(self.buttonsBox)
def accept(self):
"""Accept the data provided through the dialog."""
self.data = []
for field in (self.nameField, self.jobField, self.emailField):
if not field.text():
QMessageBox.critical(
self,
"Error!",
f"You must provide a contact's {field.objectName()}",
)
self.data = None # Reset .data
return
self.data.append(field.text())
super().accept()
| AddDialog |
python | huggingface__transformers | tests/models/yolos/test_modeling_yolos.py | {
"start": 6320,
"end": 13156
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as YOLOS does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": YolosModel, "object-detection": YolosForObjectDetection}
if is_torch_available()
else {}
)
test_resize_embeddings = False
test_torch_exportable = True
# special case for head model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = YolosModelTester(self)
self.config_tester = ConfigTester(self, config_class=YolosConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="YOLOS does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in YOLOS, the seq_len is different
seq_len = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# YOLOS has a different seq_length
seq_length = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_object_detection(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "hustvl/yolos-small"
model = YolosModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| YolosModelTest |
python | kamyu104__LeetCode-Solutions | Python/find-the-minimum-amount-of-time-to-brew-potions.py | {
"start": 54,
"end": 512
} | class ____(object):
def minTime(self, skill, mana):
"""
:type skill: List[int]
:type mana: List[int]
:rtype: int
"""
result = 0
for i in xrange(1, len(mana)):
prefix = mx = 0
for x in skill:
prefix += x
mx = max(mx, mana[i-1]*prefix-mana[i]*(prefix-x))
result += mx
result += mana[-1]*sum(skill)
return result
| Solution |
python | sympy__sympy | sympy/physics/biomechanics/tests/test_curve.py | {
"start": 35041,
"end": 53270
} | class ____:
@pytest.fixture(autouse=True)
def _fiber_force_length_active_arguments_fixture(self):
self.l_M_tilde = Symbol('l_M_tilde')
self.c0 = Symbol('c_0')
self.c1 = Symbol('c_1')
self.c2 = Symbol('c_2')
self.c3 = Symbol('c_3')
self.c4 = Symbol('c_4')
self.c5 = Symbol('c_5')
self.c6 = Symbol('c_6')
self.c7 = Symbol('c_7')
self.c8 = Symbol('c_8')
self.c9 = Symbol('c_9')
self.c10 = Symbol('c_10')
self.c11 = Symbol('c_11')
self.constants = (
self.c0, self.c1, self.c2, self.c3, self.c4, self.c5,
self.c6, self.c7, self.c8, self.c9, self.c10, self.c11,
)
@staticmethod
def test_class():
assert issubclass(FiberForceLengthActiveDeGroote2016, Function)
assert issubclass(FiberForceLengthActiveDeGroote2016, CharacteristicCurveFunction)
assert FiberForceLengthActiveDeGroote2016.__name__ == 'FiberForceLengthActiveDeGroote2016'
def test_instance(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
assert isinstance(fl_M_act, FiberForceLengthActiveDeGroote2016)
assert str(fl_M_act) == (
'FiberForceLengthActiveDeGroote2016(l_M_tilde, c_0, c_1, c_2, c_3, '
'c_4, c_5, c_6, c_7, c_8, c_9, c_10, c_11)'
)
def test_doit(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants).doit()
assert fl_M_act == (
self.c0*exp(-(((self.l_M_tilde - self.c1)/(self.c2 + self.c3*self.l_M_tilde))**2)/2)
+ self.c4*exp(-(((self.l_M_tilde - self.c5)/(self.c6 + self.c7*self.l_M_tilde))**2)/2)
+ self.c8*exp(-(((self.l_M_tilde - self.c9)/(self.c10 + self.c11*self.l_M_tilde))**2)/2)
)
def test_doit_evaluate_false(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants).doit(evaluate=False)
assert fl_M_act == (
self.c0*exp(-((UnevaluatedExpr(self.l_M_tilde - self.c1)/(self.c2 + self.c3*self.l_M_tilde))**2)/2)
+ self.c4*exp(-((UnevaluatedExpr(self.l_M_tilde - self.c5)/(self.c6 + self.c7*self.l_M_tilde))**2)/2)
+ self.c8*exp(-((UnevaluatedExpr(self.l_M_tilde - self.c9)/(self.c10 + self.c11*self.l_M_tilde))**2)/2)
)
def test_with_defaults(self):
constants = (
Float('0.814'),
Float('1.06'),
Float('0.162'),
Float('0.0633'),
Float('0.433'),
Float('0.717'),
Float('-0.0299'),
Float('0.2'),
Float('0.1'),
Float('1.0'),
Float('0.354'),
Float('0.0'),
)
fl_M_act_manual = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *constants)
fl_M_act_constants = FiberForceLengthActiveDeGroote2016.with_defaults(self.l_M_tilde)
assert fl_M_act_manual == fl_M_act_constants
def test_differentiate_wrt_l_M_tilde(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c0*(
self.c3*(self.l_M_tilde - self.c1)**2/(self.c2 + self.c3*self.l_M_tilde)**3
+ (self.c1 - self.l_M_tilde)/((self.c2 + self.c3*self.l_M_tilde)**2)
)*exp(-(self.l_M_tilde - self.c1)**2/(2*(self.c2 + self.c3*self.l_M_tilde)**2))
+ self.c4*(
self.c7*(self.l_M_tilde - self.c5)**2/(self.c6 + self.c7*self.l_M_tilde)**3
+ (self.c5 - self.l_M_tilde)/((self.c6 + self.c7*self.l_M_tilde)**2)
)*exp(-(self.l_M_tilde - self.c5)**2/(2*(self.c6 + self.c7*self.l_M_tilde)**2))
+ self.c8*(
self.c11*(self.l_M_tilde - self.c9)**2/(self.c10 + self.c11*self.l_M_tilde)**3
+ (self.c9 - self.l_M_tilde)/((self.c10 + self.c11*self.l_M_tilde)**2)
)*exp(-(self.l_M_tilde - self.c9)**2/(2*(self.c10 + self.c11*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.l_M_tilde) == expected
def test_differentiate_wrt_c0(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = exp(-(self.l_M_tilde - self.c1)**2/(2*(self.c2 + self.c3*self.l_M_tilde)**2))
assert fl_M_act.doit().diff(self.c0) == expected
def test_differentiate_wrt_c1(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c0*(self.l_M_tilde - self.c1)/(self.c2 + self.c3*self.l_M_tilde)**2
*exp(-(self.l_M_tilde - self.c1)**2/(2*(self.c2 + self.c3*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c1) == expected
def test_differentiate_wrt_c2(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c0*(self.l_M_tilde - self.c1)**2/(self.c2 + self.c3*self.l_M_tilde)**3
*exp(-(self.l_M_tilde - self.c1)**2/(2*(self.c2 + self.c3*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c2) == expected
def test_differentiate_wrt_c3(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c0*self.l_M_tilde*(self.l_M_tilde - self.c1)**2/(self.c2 + self.c3*self.l_M_tilde)**3
*exp(-(self.l_M_tilde - self.c1)**2/(2*(self.c2 + self.c3*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c3) == expected
def test_differentiate_wrt_c4(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = exp(-(self.l_M_tilde - self.c5)**2/(2*(self.c6 + self.c7*self.l_M_tilde)**2))
assert fl_M_act.diff(self.c4) == expected
def test_differentiate_wrt_c5(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c4*(self.l_M_tilde - self.c5)/(self.c6 + self.c7*self.l_M_tilde)**2
*exp(-(self.l_M_tilde - self.c5)**2/(2*(self.c6 + self.c7*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c5) == expected
def test_differentiate_wrt_c6(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c4*(self.l_M_tilde - self.c5)**2/(self.c6 + self.c7*self.l_M_tilde)**3
*exp(-(self.l_M_tilde - self.c5)**2/(2*(self.c6 + self.c7*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c6) == expected
def test_differentiate_wrt_c7(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c4*self.l_M_tilde*(self.l_M_tilde - self.c5)**2/(self.c6 + self.c7*self.l_M_tilde)**3
*exp(-(self.l_M_tilde - self.c5)**2/(2*(self.c6 + self.c7*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c7) == expected
def test_differentiate_wrt_c8(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = exp(-(self.l_M_tilde - self.c9)**2/(2*(self.c10 + self.c11*self.l_M_tilde)**2))
assert fl_M_act.diff(self.c8) == expected
def test_differentiate_wrt_c9(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c8*(self.l_M_tilde - self.c9)/(self.c10 + self.c11*self.l_M_tilde)**2
*exp(-(self.l_M_tilde - self.c9)**2/(2*(self.c10 + self.c11*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c9) == expected
def test_differentiate_wrt_c10(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c8*(self.l_M_tilde - self.c9)**2/(self.c10 + self.c11*self.l_M_tilde)**3
*exp(-(self.l_M_tilde - self.c9)**2/(2*(self.c10 + self.c11*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c10) == expected
def test_differentiate_wrt_c11(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
self.c8*self.l_M_tilde*(self.l_M_tilde - self.c9)**2/(self.c10 + self.c11*self.l_M_tilde)**3
*exp(-(self.l_M_tilde - self.c9)**2/(2*(self.c10 + self.c11*self.l_M_tilde)**2))
)
assert fl_M_act.diff(self.c11) == expected
def test_function_print_latex(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = r'\operatorname{fl}^M_{act} \left( l_{M tilde} \right)'
assert LatexPrinter().doprint(fl_M_act) == expected
def test_expression_print_latex(self):
fl_M_act = FiberForceLengthActiveDeGroote2016(self.l_M_tilde, *self.constants)
expected = (
r'c_{0} e^{- \frac{\left(- c_{1} + l_{M tilde}\right)^{2}}{2 \left(c_{2} + c_{3} l_{M tilde}\right)^{2}}} '
r'+ c_{4} e^{- \frac{\left(- c_{5} + l_{M tilde}\right)^{2}}{2 \left(c_{6} + c_{7} l_{M tilde}\right)^{2}}} '
r'+ c_{8} e^{- \frac{\left(- c_{9} + l_{M tilde}\right)^{2}}{2 \left(c_{10} + c_{11} l_{M tilde}\right)^{2}}}'
)
assert LatexPrinter().doprint(fl_M_act.doit()) == expected
@pytest.mark.parametrize(
'code_printer, expected',
[
(
C89CodePrinter,
(
'(0.81399999999999995*exp(-1.0/2.0*pow(l_M_tilde - 1.0600000000000001, 2)/pow(0.063299999999999995*l_M_tilde + 0.16200000000000001, 2)) + 0.433*exp(-1.0/2.0*pow(l_M_tilde - 0.71699999999999997, 2)/pow(0.20000000000000001*l_M_tilde - 0.029899999999999999, 2)) + 0.10000000000000001*exp(-3.9899134986753491*pow(l_M_tilde - 1.0, 2)))'
),
),
(
C99CodePrinter,
(
'(0.81399999999999995*exp(-1.0/2.0*pow(l_M_tilde - 1.0600000000000001, 2)/pow(0.063299999999999995*l_M_tilde + 0.16200000000000001, 2)) + 0.433*exp(-1.0/2.0*pow(l_M_tilde - 0.71699999999999997, 2)/pow(0.20000000000000001*l_M_tilde - 0.029899999999999999, 2)) + 0.10000000000000001*exp(-3.9899134986753491*pow(l_M_tilde - 1.0, 2)))'
),
),
(
C11CodePrinter,
(
'(0.81399999999999995*exp(-1.0/2.0*pow(l_M_tilde - 1.0600000000000001, 2)/pow(0.063299999999999995*l_M_tilde + 0.16200000000000001, 2)) + 0.433*exp(-1.0/2.0*pow(l_M_tilde - 0.71699999999999997, 2)/pow(0.20000000000000001*l_M_tilde - 0.029899999999999999, 2)) + 0.10000000000000001*exp(-3.9899134986753491*pow(l_M_tilde - 1.0, 2)))'
),
),
(
CXX98CodePrinter,
(
'(0.81399999999999995*exp(-1.0/2.0*std::pow(l_M_tilde - 1.0600000000000001, 2)/std::pow(0.063299999999999995*l_M_tilde + 0.16200000000000001, 2)) + 0.433*exp(-1.0/2.0*std::pow(l_M_tilde - 0.71699999999999997, 2)/std::pow(0.20000000000000001*l_M_tilde - 0.029899999999999999, 2)) + 0.10000000000000001*exp(-3.9899134986753491*std::pow(l_M_tilde - 1.0, 2)))'
),
),
(
CXX11CodePrinter,
(
'(0.81399999999999995*std::exp(-1.0/2.0*std::pow(l_M_tilde - 1.0600000000000001, 2)/std::pow(0.063299999999999995*l_M_tilde + 0.16200000000000001, 2)) + 0.433*std::exp(-1.0/2.0*std::pow(l_M_tilde - 0.71699999999999997, 2)/std::pow(0.20000000000000001*l_M_tilde - 0.029899999999999999, 2)) + 0.10000000000000001*std::exp(-3.9899134986753491*std::pow(l_M_tilde - 1.0, 2)))'
),
),
(
CXX17CodePrinter,
(
'(0.81399999999999995*std::exp(-1.0/2.0*std::pow(l_M_tilde - 1.0600000000000001, 2)/std::pow(0.063299999999999995*l_M_tilde + 0.16200000000000001, 2)) + 0.433*std::exp(-1.0/2.0*std::pow(l_M_tilde - 0.71699999999999997, 2)/std::pow(0.20000000000000001*l_M_tilde - 0.029899999999999999, 2)) + 0.10000000000000001*std::exp(-3.9899134986753491*std::pow(l_M_tilde - 1.0, 2)))'
),
),
(
FCodePrinter,
(
' (0.814d0*exp(-0.5d0*(l_M_tilde - 1.06d0)**2/(\n'
' @ 0.063299999999999995d0*l_M_tilde + 0.16200000000000001d0)**2) +\n'
' @ 0.433d0*exp(-0.5d0*(l_M_tilde - 0.717d0)**2/(\n'
' @ 0.20000000000000001d0*l_M_tilde - 0.029899999999999999d0)**2) +\n'
' @ 0.1d0*exp(-3.9899134986753491d0*(l_M_tilde - 1.0d0)**2))'
),
),
(
OctaveCodePrinter,
(
'(0.814*exp(-(l_M_tilde - 1.06).^2./(2*(0.0633*l_M_tilde + 0.162).^2)) + 0.433*exp(-(l_M_tilde - 0.717).^2./(2*(0.2*l_M_tilde - 0.0299).^2)) + 0.1*exp(-3.98991349867535*(l_M_tilde - 1.0).^2))'
),
),
(
PythonCodePrinter,
(
'(0.814*math.exp(-1/2*(l_M_tilde - 1.06)**2/(0.0633*l_M_tilde + 0.162)**2) + 0.433*math.exp(-1/2*(l_M_tilde - 0.717)**2/(0.2*l_M_tilde - 0.0299)**2) + 0.1*math.exp(-3.98991349867535*(l_M_tilde - 1.0)**2))'
),
),
(
NumPyPrinter,
(
'(0.814*numpy.exp(-1/2*(l_M_tilde - 1.06)**2/(0.0633*l_M_tilde + 0.162)**2) + 0.433*numpy.exp(-1/2*(l_M_tilde - 0.717)**2/(0.2*l_M_tilde - 0.0299)**2) + 0.1*numpy.exp(-3.98991349867535*(l_M_tilde - 1.0)**2))'
),
),
(
SciPyPrinter,
(
'(0.814*numpy.exp(-1/2*(l_M_tilde - 1.06)**2/(0.0633*l_M_tilde + 0.162)**2) + 0.433*numpy.exp(-1/2*(l_M_tilde - 0.717)**2/(0.2*l_M_tilde - 0.0299)**2) + 0.1*numpy.exp(-3.98991349867535*(l_M_tilde - 1.0)**2))'
),
),
(
CuPyPrinter,
(
'(0.814*cupy.exp(-1/2*(l_M_tilde - 1.06)**2/(0.0633*l_M_tilde + 0.162)**2) + 0.433*cupy.exp(-1/2*(l_M_tilde - 0.717)**2/(0.2*l_M_tilde - 0.0299)**2) + 0.1*cupy.exp(-3.98991349867535*(l_M_tilde - 1.0)**2))'
),
),
(
JaxPrinter,
(
'(0.814*jax.numpy.exp(-1/2*(l_M_tilde - 1.06)**2/(0.0633*l_M_tilde + 0.162)**2) + 0.433*jax.numpy.exp(-1/2*(l_M_tilde - 0.717)**2/(0.2*l_M_tilde - 0.0299)**2) + 0.1*jax.numpy.exp(-3.98991349867535*(l_M_tilde - 1.0)**2))'
),
),
(
MpmathPrinter,
(
'(mpmath.mpf((0, 7331860193359167, -53, 53))*mpmath.exp(-mpmath.mpf(1)/mpmath.mpf(2)*(l_M_tilde + mpmath.mpf((1, 2386907802506363, -51, 52)))**2/(mpmath.mpf((0, 2280622851300419, -55, 52))*l_M_tilde + mpmath.mpf((0, 5836665117072163, -55, 53)))**2) + mpmath.mpf((0, 7800234554605699, -54, 53))*mpmath.exp(-mpmath.mpf(1)/mpmath.mpf(2)*(l_M_tilde + mpmath.mpf((1, 6458161865649291, -53, 53)))**2/(mpmath.mpf((0, 3602879701896397, -54, 52))*l_M_tilde + mpmath.mpf((1, 8618088246936181, -58, 53)))**2) + mpmath.mpf((0, 3602879701896397, -55, 52))*mpmath.exp(-mpmath.mpf((0, 8984486472937407, -51, 53))*(l_M_tilde + mpmath.mpf((1, 1, 0, 1)))**2))'
),
),
(
LambdaPrinter,
(
'(0.814*math.exp(-1/2*(l_M_tilde - 1.06)**2/(0.0633*l_M_tilde + 0.162)**2) + 0.433*math.exp(-1/2*(l_M_tilde - 0.717)**2/(0.2*l_M_tilde - 0.0299)**2) + 0.1*math.exp(-3.98991349867535*(l_M_tilde - 1.0)**2))'
),
),
]
)
def test_print_code(self, code_printer, expected):
fl_M_act = FiberForceLengthActiveDeGroote2016.with_defaults(self.l_M_tilde)
assert code_printer().doprint(fl_M_act) == expected
def test_derivative_print_code(self):
fl_M_act = FiberForceLengthActiveDeGroote2016.with_defaults(self.l_M_tilde)
fl_M_act_dl_M_tilde = fl_M_act.diff(self.l_M_tilde)
expected = (
'(0.79798269973507 - 0.79798269973507*l_M_tilde)*math.exp(-3.98991349867535*(l_M_tilde - 1.0)**2) + (0.433*(0.717 - l_M_tilde)/(0.2*l_M_tilde - 0.0299)**2 + 0.0866*(l_M_tilde - 0.717)**2/(0.2*l_M_tilde - 0.0299)**3)*math.exp(-1/2*(l_M_tilde - 0.717)**2/(0.2*l_M_tilde - 0.0299)**2) + (0.814*(1.06 - l_M_tilde)/(0.0633*l_M_tilde + 0.162)**2 + 0.0515262*(l_M_tilde - 1.06)**2/(0.0633*l_M_tilde + 0.162)**3)*math.exp(-1/2*(l_M_tilde - 1.06)**2/(0.0633*l_M_tilde + 0.162)**2)'
)
assert PythonCodePrinter().doprint(fl_M_act_dl_M_tilde) == expected
def test_lambdify(self):
fl_M_act = FiberForceLengthActiveDeGroote2016.with_defaults(self.l_M_tilde)
fl_M_act_callable = lambdify(self.l_M_tilde, fl_M_act)
assert fl_M_act_callable(1.0) == pytest.approx(0.9941398866)
@pytest.mark.skipif(numpy is None, reason='NumPy not installed')
def test_lambdify_numpy(self):
fl_M_act = FiberForceLengthActiveDeGroote2016.with_defaults(self.l_M_tilde)
fl_M_act_callable = lambdify(self.l_M_tilde, fl_M_act, 'numpy')
l_M_tilde = numpy.array([0.0, 0.5, 1.0, 1.5, 2.0])
expected = numpy.array([
0.0018501319,
0.0529122812,
0.9941398866,
0.2312431531,
0.0069595432,
])
numpy.testing.assert_allclose(fl_M_act_callable(l_M_tilde), expected)
@pytest.mark.skipif(jax is None, reason='JAX not installed')
def test_lambdify_jax(self):
fl_M_act = FiberForceLengthActiveDeGroote2016.with_defaults(self.l_M_tilde)
fl_M_act_callable = jax.jit(lambdify(self.l_M_tilde, fl_M_act, 'jax'))
l_M_tilde = jax.numpy.array([0.0, 0.5, 1.0, 1.5, 2.0])
expected = jax.numpy.array([
0.0018501319,
0.0529122812,
0.9941398866,
0.2312431531,
0.0069595432,
])
numpy.testing.assert_allclose(fl_M_act_callable(l_M_tilde), expected)
| TestFiberForceLengthActiveDeGroote2016 |
python | bokeh__bokeh | src/bokeh/core/types.py | {
"start": 2406,
"end": 2471
} | class ____(SpanGeometry):
x: float
y: float
| SpanGeometryData |
python | getsentry__sentry | src/sentry/api/exceptions.py | {
"start": 3574,
"end": 3865
} | class ____(SentryAPIException):
status_code = status.HTTP_401_UNAUTHORIZED
code = "primary-email-verification-required"
message = "Primary email verification required."
def __init__(self, user):
super().__init__(username=user.username)
| PrimaryEmailVerificationRequired |
python | walkccc__LeetCode | solutions/404. Sum of Left Leaves/404.py | {
"start": 0,
"end": 340
} | class ____:
def sumOfLeftLeaves(self, root: TreeNode | None) -> int:
if not root:
return 0
ans = 0
if root.left:
if not root.left.left and not root.left.right:
ans += root.left.val
else:
ans += self.sumOfLeftLeaves(root.left)
ans += self.sumOfLeftLeaves(root.right)
return ans
| Solution |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_memory_tool_20250818_str_replace_command.py | {
"start": 216,
"end": 524
} | class ____(BaseModel):
command: Literal["str_replace"]
"""Command type identifier"""
new_str: str
"""Text to replace with"""
old_str: str
"""Text to search for and replace"""
path: str
"""Path to the file where text should be replaced"""
| BetaMemoryTool20250818StrReplaceCommand |
python | langchain-ai__langchain | libs/partners/groq/tests/unit_tests/fake/callbacks.py | {
"start": 6177,
"end": 6590
} | class ____(FakeCallbackHandler):
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
**kwargs: Any,
) -> Any:
assert all(isinstance(m, BaseMessage) for m in chain(*messages))
self.on_chat_model_start_common()
| FakeCallbackHandlerWithChatStart |
python | walkccc__LeetCode | solutions/3316. Find Maximum Removals From Source String/3316-2.py | {
"start": 0,
"end": 715
} | class ____:
def maxRemovals(
self,
source: str,
pattern: str,
targetIndices: list[int]
) -> int:
m = len(source)
n = len(pattern)
target = set(targetIndices)
# dp[i][j] := the maximum number of operations that can be performed for
# source[i..m) and pattern[j..n)
dp = [[-math.inf] * (n + 1) for _ in range(m + 1)]
dp[m][n] = 0
for i in reversed(range(m)):
dp[i][n] = int(i in target) + dp[i + 1][n]
for j in reversed(range(n)):
pick = dp[i + 1][j + 1] if source[i] == pattern[j] else -math.inf
skip = int(i in target) + dp[i + 1][j]
dp[i][j] = max(pick, skip)
return 0 if dp[0][0] == -math.inf else dp[0][0]
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/cross_device_ops_test.py | {
"start": 4692,
"end": 5789
} | class ____:
def __init__(self, num_processes):
cluster_spec_dict = multi_worker_test_base.create_cluster_spec(
num_workers=num_processes
)
self.runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec_dict)
# Global MultiProcessPoolRunners that can be shared by test cases to avoid
# expensive initialization cost of TensorFlow in new processes.
#
# Note that they have to be globals and can't be owned by test classes because
# usually fn usually captures the test class instance, and test class
# instance can't be pickled if it has mpr as a member (it is not allowed to
# pickle Process objects).
# TODO(crccw): Use `num_workers` combination once it is ready.
global_mpr_2p = MultiProcessPoolRunner(num_processes=2)
global_mpr_1p = MultiProcessPoolRunner(num_processes=1)
def get_global_mpr(num_processes):
if num_processes == 1:
return global_mpr_1p.runner
elif num_processes == 2:
return global_mpr_2p.runner
else:
raise ValueError(
"get_global_mpr: num_processes must be 1 or 2, got %d" % num_processes
)
| MultiProcessPoolRunner |
python | automl__auto-sklearn | autosklearn/pipeline/components/feature_preprocessing/fast_ica.py | {
"start": 540,
"end": 3377
} | class ____(AutoSklearnPreprocessingAlgorithm):
def __init__(self, algorithm, whiten, fun, n_components=None, random_state=None):
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.n_components = n_components
self.random_state = random_state
def fit(self, X, Y=None):
import sklearn.decomposition
self.whiten = check_for_bool(self.whiten)
if check_none(self.n_components):
self.n_components = None
else:
self.n_components = int(self.n_components)
self.preprocessor = sklearn.decomposition.FastICA(
n_components=self.n_components,
algorithm=self.algorithm,
fun=self.fun,
whiten=self.whiten,
random_state=self.random_state,
)
# Make the RuntimeWarning an Exception!
with warnings.catch_warnings():
warnings.filterwarnings(
"error", message="array must not contain infs or NaNs"
)
try:
self.preprocessor.fit(X)
except ValueError as e:
if "array must not contain infs or NaNs" in e.args[0]:
raise ValueError(
"Bug in scikit-learn: "
"https://github.com/scikit-learn/scikit-learn/pull/2738"
)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "FastICA",
"name": "Fast Independent Component Analysis",
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": False,
"input": (DENSE, UNSIGNED_DATA),
"output": (INPUT, UNSIGNED_DATA),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
n_components = UniformIntegerHyperparameter(
"n_components", 10, 2000, default_value=100
)
algorithm = CategoricalHyperparameter(
"algorithm", ["parallel", "deflation"], "parallel"
)
whiten = CategoricalHyperparameter("whiten", ["False", "True"], "False")
fun = CategoricalHyperparameter("fun", ["logcosh", "exp", "cube"], "logcosh")
cs.add_hyperparameters([n_components, algorithm, whiten, fun])
cs.add_condition(EqualsCondition(n_components, whiten, "True"))
return cs
| FastICA |
python | astropy__astropy | astropy/modeling/polynomial.py | {
"start": 43269,
"end": 47825
} | class ____(OrthoPolynomialBase):
r"""
Bivariate Legendre series.
Defined as:
.. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y)
where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
Model formula:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_{i}`` is the corresponding Legendre polynomial.
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
_separable = False
def __init__(
self,
x_degree,
y_degree,
x_domain=None,
x_window=None,
y_domain=None,
y_window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
x_degree,
y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def _fcache(self, x, y):
"""
Calculate the individual Legendre functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = (
(2 * (n - 1) + 1) * x * kfunc[n - 1] - (n - 1) * kfunc[n - 2]
) / n
for n in range(2, y_terms):
kfunc[n + x_terms] = (
(2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1]
- (n - 1) * kfunc[n + x_terms - 2]
) / (n)
return kfunc
def fit_deriv(self, x, y, *params):
"""Derivatives with respect to the coefficients.
This is an array with Legendre polynomials:
Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.ravel()
y = y.ravel()
x_deriv = self._legendderiv1d(x, self.x_degree + 1).T
y_deriv = self._legendderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _legendderiv1d(self, x, deg):
"""Derivative of 1D Legendre polynomial."""
x = np.array(x, dtype=float, copy=COPY_IF_NEEDED, ndmin=1)
d = np.empty((deg + 1,) + x.shape, dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
d[1] = x
for i in range(2, deg + 1):
d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i
return np.rollaxis(d, 0, d.ndim)
| Legendre2D |
python | dask__distributed | distributed/worker.py | {
"start": 4947,
"end": 7434
} | class ____(TypedDict):
status: Literal["OK"]
data: dict[Key, object]
def fail_hard(method: Callable[P, T]) -> Callable[P, T]:
"""
Decorator to close the worker if this method encounters an exception.
"""
reason = f"worker-{method.__name__}-fail-hard"
if iscoroutinefunction(method):
@wraps(method)
async def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> Any:
try:
return await method(self, *args, **kwargs) # type: ignore
except Exception as e:
if self.status not in (Status.closed, Status.closing):
self.log_event("worker-fail-hard", error_message(e))
logger.exception(e)
await _force_close(self, reason)
raise
else:
@wraps(method)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> T:
try:
return method(self, *args, **kwargs)
except Exception as e:
if self.status not in (Status.closed, Status.closing):
self.log_event("worker-fail-hard", error_message(e))
logger.exception(e)
self.loop.add_callback(_force_close, self, reason)
raise
return wrapper # type: ignore
async def _force_close(self, reason: str):
"""
Used with the fail_hard decorator defined above
1. Wait for a worker to close
2. If it doesn't, log and kill the process
"""
try:
await wait_for(
self.close(nanny=False, executor_wait=False, reason=reason),
30,
)
except (KeyboardInterrupt, SystemExit): # pragma: nocover
raise
except BaseException: # pragma: nocover
# Worker is in a very broken state if closing fails. We need to shut down
# immediately, to ensure things don't get even worse and this worker potentially
# deadlocks the cluster.
from distributed import Scheduler
if Scheduler._instances:
# We're likely in a unit test. Don't kill the whole test suite!
raise
logger.critical(
"Error trying close worker in response to broken internal state. "
"Forcibly exiting worker NOW",
exc_info=True,
)
# use `os._exit` instead of `sys.exit` because of uncertainty
# around propagating `SystemExit` from asyncio callbacks
os._exit(1)
| GetDataSuccess |
python | doocs__leetcode | solution/2500-2599/2566.Maximum Difference by Remapping a Digit/Solution.py | {
"start": 0,
"end": 245
} | class ____:
def minMaxDifference(self, num: int) -> int:
s = str(num)
mi = int(s.replace(s[0], '0'))
for c in s:
if c != '9':
return int(s.replace(c, '9')) - mi
return num - mi
| Solution |
python | SmileyChris__easy-thumbnails | easy_thumbnails/templatetags/thumbnail.py | {
"start": 1016,
"end": 10636
} | class ____(Node):
def __init__(self, source_var, opts, context_name=None):
self.source_var = source_var
self.opts = opts
self.context_name = context_name
def render(self, context):
# Note that this isn't a global constant because we need to change the
# value for tests.
raise_errors = settings.THUMBNAIL_DEBUG
# Get the source file.
try:
source = self.source_var.resolve(context)
except VariableDoesNotExist:
if raise_errors:
raise VariableDoesNotExist(
"Variable '%s' does not exist." % self.source_var)
return self.bail_out(context)
if not source:
if raise_errors:
raise TemplateSyntaxError(
"Variable '%s' is an invalid source." % self.source_var)
return self.bail_out(context)
# Resolve the thumbnail option values.
try:
opts = {}
for key, value in self.opts.items():
if hasattr(value, 'resolve'):
value = value.resolve(context)
opts[str(key)] = value
except Exception:
if raise_errors:
raise
return self.bail_out(context)
# Size variable can be either a tuple/list of two integers or a
# valid string.
size = opts['size']
if isinstance(size, str):
m = RE_SIZE.match(size)
if m:
opts['size'] = (int(m.group(1)), int(m.group(2)))
else:
# Size variable may alternatively be referencing an alias.
alias = aliases.get(size, target=source)
if alias:
del opts['size']
opts = dict(alias, **opts)
else:
if raise_errors:
raise TemplateSyntaxError(
"%r is not a valid size." % size)
return self.bail_out(context)
# Ensure the quality is an integer.
if 'quality' in opts:
try:
opts['quality'] = int(opts['quality'])
except (TypeError, ValueError):
if raise_errors:
raise TemplateSyntaxError(
"%r is an invalid quality." % opts['quality'])
return self.bail_out(context)
# Ensure the subsampling level is an integer.
if 'subsampling' in opts:
try:
opts['subsampling'] = int(opts['subsampling'])
except (TypeError, ValueError):
if raise_errors:
raise TemplateSyntaxError(
"%r is an invalid subsampling level." %
opts['subsampling'])
return self.bail_out(context)
try:
thumbnail = get_thumbnailer(source).get_thumbnail(opts)
except Exception:
if raise_errors:
raise
return self.bail_out(context)
# Return the thumbnail file url, or put the file on the context.
if self.context_name is None:
return escape(thumbnail.url)
else:
context[self.context_name] = thumbnail
return ''
def bail_out(self, context):
if self.context_name:
context[self.context_name] = ''
return ''
@register.tag
def thumbnail(parser, token):
"""
Creates a thumbnail of an ImageField.
Basic tag Syntax::
{% thumbnail [source] [size] [options] %}
*source* must be a ``File`` object, usually an Image/FileField of a model
instance.
*size* can either be:
* the name of an alias
* the size in the format ``[width]x[height]`` (for example,
``{% thumbnail person.photo 100x50 %}``) or
* a variable containing a valid size (i.e. either a string in the
``[width]x[height]`` format or a tuple containing two integers):
``{% thumbnail person.photo size_var %}``.
*options* are a space separated list of options which are used when
processing the image to a thumbnail such as ``sharpen``, ``crop`` and
``quality=90``.
If *size* is specified as an alias name, *options* are used to override
and/or supplement the options defined in that alias.
The thumbnail tag can also place a
:class:`~easy_thumbnails.files.ThumbnailFile` object in the context,
providing access to the properties of the thumbnail such as the height and
width::
{% thumbnail [source] [size] [options] as [variable] %}
When ``as [variable]`` is used, the tag doesn't output anything. Instead,
use the variable like a standard ``ImageFieldFile`` object::
{% thumbnail obj.picture 200x200 upscale as thumb %}
<img src="{{ thumb.url }}"
width="{{ thumb.width }}"
height="{{ thumb.height }}" />
**Debugging**
By default, if there is an error creating the thumbnail or resolving the
image variable then the thumbnail tag will just return an empty string (and
if there was a context variable to be set then it will also be set to an
empty string).
For example, you will not see an error if the thumbnail could not
be written to directory because of permissions error. To display those
errors rather than failing silently, set ``THUMBNAIL_DEBUG = True`` in
your Django project's settings module.
"""
args = token.split_contents()
tag = args[0]
# Check to see if we're setting to a context variable.
if len(args) > 4 and args[-2] == 'as':
context_name = args[-1]
args = args[:-2]
else:
context_name = None
if len(args) < 3:
raise TemplateSyntaxError(
"Invalid syntax. Expected "
"'{%% %s source size [option1 option2 ...] %%}' or "
"'{%% %s source size [option1 option2 ...] as variable %%}'" %
(tag, tag))
opts = {}
# The first argument is the source file.
source_var = parser.compile_filter(args[1])
# The second argument is the requested size. If it's the static "10x10"
# format, wrap it in quotes so that it is compiled correctly.
size = args[2]
match = RE_SIZE.match(size)
if match:
size = '"%s"' % size
opts['size'] = parser.compile_filter(size)
# All further arguments are options.
args_list = split_args(args[3:]).items()
for arg, value in args_list:
if arg in VALID_OPTIONS:
if value and value is not True:
value = parser.compile_filter(value)
opts[arg] = value
else:
raise TemplateSyntaxError("'%s' tag received a bad argument: "
"'%s'" % (tag, arg))
return ThumbnailNode(source_var, opts=opts, context_name=context_name)
@register.filter
def thumbnailer(obj, relative_name=None):
"""
Creates a thumbnailer from an object (usually a ``FileField``).
Example usage::
{% with photo=person.photo|thumbnailer %}
{% if photo %}
<a href="{{ photo.large.url }}">
{{ photo.square.tag }}
</a>
{% else %}
<img src="{% static 'template/fallback.png' %}" alt="" />
{% endif %}
{% endwith %}
If you know what you're doing, you can also pass the relative name::
{% with photo=storage|thumbnailer:'some/file.jpg' %}...
"""
return get_thumbnailer(obj, relative_name=relative_name)
@register.filter
def thumbnailer_passive(obj):
"""
Creates a thumbnailer from an object (usually a ``FileFile``) that won't
generate new thumbnails.
This is useful if you are using another process to generate the thumbnails
rather than having them generated on the fly if they are missing.
Example usage::
{% with avatar=person.avatar|thumbnailer_passive %}
{% with avatar_thumb=avatar.small %}
{% if avatar_thumb %}
<img src="{{ avatar_thumb.url }}" alt="" />
{% else %}
<img src="{% static 'img/default-avatar-small.png' %}"
alt="" />
{% endif %}
{% endwith %}
{% endwith %}
"""
thumbnailer = get_thumbnailer(obj)
thumbnailer.generate = False
return thumbnailer
@register.filter
def thumbnail_url(source, alias):
"""
Return the thumbnail url for a source file using an aliased set of
thumbnail options.
If no matching alias is found, returns an empty string.
Example usage::
<img src="{{ person.photo|thumbnail_url:'small' }}" alt="">
"""
try:
thumb = get_thumbnailer(source)[alias]
except Exception as e:
if settings.THUMBNAIL_DEBUG:
raise e
return ''
return thumb.url
@register.filter
def data_uri(thumbnail):
"""
This filter will return the base64 encoded data URI for a given thumbnail object.
Example usage::
{% thumbnail sample_image 25x25 crop as thumb %}
<img src="{{ thumb|data_uri }}">
will for instance be rendered as:
<img src="data:image/png;base64,iVBORw0KGgo...">
"""
try:
thumbnail.open('rb')
data = thumbnail.read()
finally:
thumbnail.close()
mime_type = mimetypes.guess_type(str(thumbnail.file))[0] or 'application/octet-stream'
data = b64encode(data).decode('utf-8')
return 'data:{0};base64,{1}'.format(mime_type, data)
| ThumbnailNode |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 54922,
"end": 55524
} | class ____(Module):
r"""Applies the element-wise Softsign function.
.. math::
\text{SoftSign}(x) = \frac{x}{ 1 + |x|}
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softsign.png
Examples::
>>> m = nn.Softsign()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return F.softsign(input)
| Softsign |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/required2.py | {
"start": 278,
"end": 1328
} | class ____(TypedDict, total=False):
a: Annotated["te.Required[int]", ""]
b: Annotated[te.NotRequired[str], ""]
c: "te.Required[int | str]"
d: te.Required[str | None]
e: Required[Literal[1, 2, 3]]
f: Required[None]
g: Required[type[int]]
td1_1: TD1 = {"a": 3, "c": "hi", "d": None, "e": 3, "f": None, "g": int}
# This should generate an error because a is missing.
td1_2: TD1 = {"c": "hi", "d": None, "e": 3, "f": None, "g": int}
# This should generate an error because c is missing.
td1_3: TD1 = {"a": 3, "d": None, "e": 3, "f": None, "g": int}
# This should generate an error because d is missing.
td1_4: TD1 = {"a": 3, "c": "hi", "e": 3, "f": None, "g": int}
# This should generate an error because e is missing.
td1_5: TD1 = {"a": 3, "c": "hi", "d": None, "f": None, "g": int}
# This should generate an error because f is missing.
td1_6: TD1 = {"a": 3, "c": "hi", "d": None, "e": 3, "g": int}
# This should generate an error because g is missing.
td1_7: TD1 = {"a": 3, "c": "hi", "d": None, "e": 3, "f": None}
| TD1 |
python | facelessuser__soupsieve | tests/test_versions.py | {
"start": 93,
"end": 4052
} | class ____(unittest.TestCase):
"""Test versions."""
def test_version_output(self):
"""Test that versions generate proper strings."""
assert Version(1, 0, 0, "final")._get_canonical() == "1.0"
assert Version(1, 2, 0, "final")._get_canonical() == "1.2"
assert Version(1, 2, 3, "final")._get_canonical() == "1.2.3"
assert Version(1, 2, 0, "alpha", pre=4)._get_canonical() == "1.2a4"
assert Version(1, 2, 0, "beta", pre=4)._get_canonical() == "1.2b4"
assert Version(1, 2, 0, "candidate", pre=4)._get_canonical() == "1.2rc4"
assert Version(1, 2, 0, "final", post=1)._get_canonical() == "1.2.post1"
assert Version(1, 2, 3, ".dev-alpha", pre=1)._get_canonical() == "1.2.3a1.dev0"
assert Version(1, 2, 3, ".dev")._get_canonical() == "1.2.3.dev0"
assert Version(1, 2, 3, ".dev", dev=1)._get_canonical() == "1.2.3.dev1"
def test_version_comparison(self):
"""Test that versions compare proper."""
assert Version(1, 0, 0, "final") < Version(1, 2, 0, "final")
assert Version(1, 2, 0, "alpha", pre=4) < Version(1, 2, 0, "final")
assert Version(1, 2, 0, "final") < Version(1, 2, 0, "final", post=1)
assert Version(1, 2, 3, ".dev-beta", pre=2) < Version(1, 2, 3, "beta", pre=2)
assert Version(1, 2, 3, ".dev") < Version(1, 2, 3, ".dev-beta", pre=2)
assert Version(1, 2, 3, ".dev") < Version(1, 2, 3, ".dev", dev=1)
def test_version_parsing(self):
"""Test version parsing."""
assert parse_version(
Version(1, 0, 0, "final")._get_canonical()
) == Version(1, 0, 0, "final")
assert parse_version(
Version(1, 2, 0, "final")._get_canonical()
) == Version(1, 2, 0, "final")
assert parse_version(
Version(1, 2, 3, "final")._get_canonical()
) == Version(1, 2, 3, "final")
assert parse_version(
Version(1, 2, 0, "alpha", pre=4)._get_canonical()
) == Version(1, 2, 0, "alpha", pre=4)
assert parse_version(
Version(1, 2, 0, "beta", pre=4)._get_canonical()
) == Version(1, 2, 0, "beta", pre=4)
assert parse_version(
Version(1, 2, 0, "candidate", pre=4)._get_canonical()
) == Version(1, 2, 0, "candidate", pre=4)
assert parse_version(
Version(1, 2, 0, "final", post=1)._get_canonical()
) == Version(1, 2, 0, "final", post=1)
assert parse_version(
Version(1, 2, 3, ".dev-alpha", pre=1)._get_canonical()
) == Version(1, 2, 3, ".dev-alpha", pre=1)
assert parse_version(
Version(1, 2, 3, ".dev")._get_canonical()
) == Version(1, 2, 3, ".dev")
assert parse_version(
Version(1, 2, 3, ".dev", dev=1)._get_canonical()
) == Version(1, 2, 3, ".dev", dev=1)
def test_asserts(self):
"""Test asserts."""
with self.assertRaises(ValueError):
Version("1", "2", "3")
with self.assertRaises(ValueError):
Version(1, 2, 3, 1)
with self.assertRaises(ValueError):
Version("1", "2", "3")
with self.assertRaises(ValueError):
Version(1, 2, 3, "bad")
with self.assertRaises(ValueError):
Version(1, 2, 3, "alpha")
with self.assertRaises(ValueError):
Version(1, 2, 3, "alpha", pre=1, dev=1)
with self.assertRaises(ValueError):
Version(1, 2, 3, "alpha", pre=1, post=1)
with self.assertRaises(ValueError):
Version(1, 2, 3, ".dev-alpha")
with self.assertRaises(ValueError):
Version(1, 2, 3, ".dev-alpha", pre=1, post=1)
with self.assertRaises(ValueError):
Version(1, 2, 3, pre=1)
with self.assertRaises(ValueError):
Version(1, 2, 3, dev=1)
with self.assertRaises(ValueError):
parse_version('bad&version')
| TestVersion |
python | kamyu104__LeetCode-Solutions | Python/minimum-limit-of-balls-in-a-bag.py | {
"start": 55,
"end": 604
} | class ____(object):
def minimumSize(self, nums, maxOperations):
"""
:type nums: List[int]
:type maxOperations: int
:rtype: int
"""
def check(nums, maxOperations, x):
return sum((num+x-1)//x-1 for num in nums) <= maxOperations
left, right = 1, max(nums)
while left <= right:
mid = left + (right-left)//2
if check(nums, maxOperations, mid):
right = mid-1
else:
left = mid+1
return left
| Solution |
python | Textualize__textual | src/textual/renderables/tint.py | {
"start": 304,
"end": 2605
} | class ____:
"""Applies a color on top of an existing renderable."""
def __init__(
self,
renderable: RenderableType,
color: Color,
) -> None:
"""Wrap a renderable to apply a tint color.
Args:
renderable: A renderable.
color: A color (presumably with alpha).
"""
self.renderable = renderable
self.color = color
@classmethod
def process_segments(
cls,
segments: Iterable[Segment],
color: Color,
ansi_theme: TerminalTheme,
background: Color = TRANSPARENT,
) -> Iterable[Segment]:
"""Apply tint to segments.
Args:
segments: Incoming segments.
color: Color of tint.
ansi_theme: The TerminalTheme defining how to map ansi colors to hex.
background: Background color.
Returns:
Segments with applied tint.
"""
from_rich_color = Color.from_rich_color
style_from_color = Style.from_color
_Segment = Segment
truecolor_style = ANSIToTruecolor(ansi_theme).truecolor_style
background_rich_color = background.rich_color
NULL_STYLE = Style()
for segment in segments:
text, style, control = segment
if control:
yield segment
else:
style = (
truecolor_style(style, background_rich_color)
if style is not None
else NULL_STYLE
)
yield _Segment(
text,
(
style
+ style_from_color(
(
(from_rich_color(style.color) + color).rich_color
if style.color is not None
else None
),
(
(from_rich_color(style.bgcolor) + color).rich_color
if style.bgcolor is not None
else None
),
)
),
control,
)
| Tint |
python | pytorch__pytorch | test/test_set_default_mobile_cpu_allocator.py | {
"start": 115,
"end": 976
} | class ____(TestCase):
def test_no_exception(self):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
def test_exception(self):
with self.assertRaises(Exception):
torch._C._unset_default_mobile_cpu_allocator()
with self.assertRaises(Exception):
torch._C._set_default_mobile_cpu_allocator()
torch._C._set_default_mobile_cpu_allocator()
# Must reset to good state
# For next test.
torch._C._unset_default_mobile_cpu_allocator()
with self.assertRaises(Exception):
torch._C._set_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
torch._C._unset_default_mobile_cpu_allocator()
if __name__ == '__main__':
run_tests()
| TestSetDefaultMobileCPUAllocator |
python | walkccc__LeetCode | solutions/2163. Minimum Difference in Sums After Removal of Elements/2163.py | {
"start": 0,
"end": 863
} | class ____:
def minimumDifference(self, nums: list[int]) -> int:
n = len(nums) // 3
ans = math.inf
leftSum = 0
rightSum = 0
maxHeap = [] # Left part, as small as possible
minHeap = [] # Right part, as big as possible
# minLeftSum[i] := the minimum of the sum of n nums in nums[0..i)
minLeftSum = [0] * len(nums)
for i in range(2 * n):
heapq.heappush(maxHeap, -nums[i])
leftSum += nums[i]
if len(maxHeap) == n + 1:
leftSum += heapq.heappop(maxHeap)
if len(maxHeap) == n:
minLeftSum[i] = leftSum
for i in range(len(nums) - 1, n - 1, -1):
heapq.heappush(minHeap, nums[i])
rightSum += nums[i]
if len(minHeap) == n + 1:
rightSum -= heapq.heappop(minHeap)
if len(minHeap) == n:
ans = min(ans, minLeftSum[i - 1] - rightSum)
return ans
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_service_spec.py | {
"start": 383,
"end": 44516
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allocate_load_balancer_node_ports': 'bool',
'cluster_ip': 'str',
'cluster_i_ps': 'list[str]',
'external_i_ps': 'list[str]',
'external_name': 'str',
'external_traffic_policy': 'str',
'health_check_node_port': 'int',
'internal_traffic_policy': 'str',
'ip_families': 'list[str]',
'ip_family_policy': 'str',
'load_balancer_class': 'str',
'load_balancer_ip': 'str',
'load_balancer_source_ranges': 'list[str]',
'ports': 'list[V1ServicePort]',
'publish_not_ready_addresses': 'bool',
'selector': 'dict(str, str)',
'session_affinity': 'str',
'session_affinity_config': 'V1SessionAffinityConfig',
'traffic_distribution': 'str',
'type': 'str'
}
attribute_map = {
'allocate_load_balancer_node_ports': 'allocateLoadBalancerNodePorts',
'cluster_ip': 'clusterIP',
'cluster_i_ps': 'clusterIPs',
'external_i_ps': 'externalIPs',
'external_name': 'externalName',
'external_traffic_policy': 'externalTrafficPolicy',
'health_check_node_port': 'healthCheckNodePort',
'internal_traffic_policy': 'internalTrafficPolicy',
'ip_families': 'ipFamilies',
'ip_family_policy': 'ipFamilyPolicy',
'load_balancer_class': 'loadBalancerClass',
'load_balancer_ip': 'loadBalancerIP',
'load_balancer_source_ranges': 'loadBalancerSourceRanges',
'ports': 'ports',
'publish_not_ready_addresses': 'publishNotReadyAddresses',
'selector': 'selector',
'session_affinity': 'sessionAffinity',
'session_affinity_config': 'sessionAffinityConfig',
'traffic_distribution': 'trafficDistribution',
'type': 'type'
}
def __init__(self, allocate_load_balancer_node_ports=None, cluster_ip=None, cluster_i_ps=None, external_i_ps=None, external_name=None, external_traffic_policy=None, health_check_node_port=None, internal_traffic_policy=None, ip_families=None, ip_family_policy=None, load_balancer_class=None, load_balancer_ip=None, load_balancer_source_ranges=None, ports=None, publish_not_ready_addresses=None, selector=None, session_affinity=None, session_affinity_config=None, traffic_distribution=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1ServiceSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allocate_load_balancer_node_ports = None
self._cluster_ip = None
self._cluster_i_ps = None
self._external_i_ps = None
self._external_name = None
self._external_traffic_policy = None
self._health_check_node_port = None
self._internal_traffic_policy = None
self._ip_families = None
self._ip_family_policy = None
self._load_balancer_class = None
self._load_balancer_ip = None
self._load_balancer_source_ranges = None
self._ports = None
self._publish_not_ready_addresses = None
self._selector = None
self._session_affinity = None
self._session_affinity_config = None
self._traffic_distribution = None
self._type = None
self.discriminator = None
if allocate_load_balancer_node_ports is not None:
self.allocate_load_balancer_node_ports = allocate_load_balancer_node_ports
if cluster_ip is not None:
self.cluster_ip = cluster_ip
if cluster_i_ps is not None:
self.cluster_i_ps = cluster_i_ps
if external_i_ps is not None:
self.external_i_ps = external_i_ps
if external_name is not None:
self.external_name = external_name
if external_traffic_policy is not None:
self.external_traffic_policy = external_traffic_policy
if health_check_node_port is not None:
self.health_check_node_port = health_check_node_port
if internal_traffic_policy is not None:
self.internal_traffic_policy = internal_traffic_policy
if ip_families is not None:
self.ip_families = ip_families
if ip_family_policy is not None:
self.ip_family_policy = ip_family_policy
if load_balancer_class is not None:
self.load_balancer_class = load_balancer_class
if load_balancer_ip is not None:
self.load_balancer_ip = load_balancer_ip
if load_balancer_source_ranges is not None:
self.load_balancer_source_ranges = load_balancer_source_ranges
if ports is not None:
self.ports = ports
if publish_not_ready_addresses is not None:
self.publish_not_ready_addresses = publish_not_ready_addresses
if selector is not None:
self.selector = selector
if session_affinity is not None:
self.session_affinity = session_affinity
if session_affinity_config is not None:
self.session_affinity_config = session_affinity_config
if traffic_distribution is not None:
self.traffic_distribution = traffic_distribution
if type is not None:
self.type = type
@property
def allocate_load_balancer_node_ports(self):
"""Gets the allocate_load_balancer_node_ports of this V1ServiceSpec. # noqa: E501
allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. # noqa: E501
:return: The allocate_load_balancer_node_ports of this V1ServiceSpec. # noqa: E501
:rtype: bool
"""
return self._allocate_load_balancer_node_ports
@allocate_load_balancer_node_ports.setter
def allocate_load_balancer_node_ports(self, allocate_load_balancer_node_ports):
"""Sets the allocate_load_balancer_node_ports of this V1ServiceSpec.
allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. # noqa: E501
:param allocate_load_balancer_node_ports: The allocate_load_balancer_node_ports of this V1ServiceSpec. # noqa: E501
:type: bool
"""
self._allocate_load_balancer_node_ports = allocate_load_balancer_node_ports
@property
def cluster_ip(self):
"""Gets the cluster_ip of this V1ServiceSpec. # noqa: E501
clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
:return: The cluster_ip of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._cluster_ip
@cluster_ip.setter
def cluster_ip(self, cluster_ip):
"""Sets the cluster_ip of this V1ServiceSpec.
clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
:param cluster_ip: The cluster_ip of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._cluster_ip = cluster_ip
@property
def cluster_i_ps(self):
"""Gets the cluster_i_ps of this V1ServiceSpec. # noqa: E501
ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value. This field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
:return: The cluster_i_ps of this V1ServiceSpec. # noqa: E501
:rtype: list[str]
"""
return self._cluster_i_ps
@cluster_i_ps.setter
def cluster_i_ps(self, cluster_i_ps):
"""Sets the cluster_i_ps of this V1ServiceSpec.
ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value. This field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
:param cluster_i_ps: The cluster_i_ps of this V1ServiceSpec. # noqa: E501
:type: list[str]
"""
self._cluster_i_ps = cluster_i_ps
@property
def external_i_ps(self):
"""Gets the external_i_ps of this V1ServiceSpec. # noqa: E501
externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. # noqa: E501
:return: The external_i_ps of this V1ServiceSpec. # noqa: E501
:rtype: list[str]
"""
return self._external_i_ps
@external_i_ps.setter
def external_i_ps(self, external_i_ps):
"""Sets the external_i_ps of this V1ServiceSpec.
externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. # noqa: E501
:param external_i_ps: The external_i_ps of this V1ServiceSpec. # noqa: E501
:type: list[str]
"""
self._external_i_ps = external_i_ps
@property
def external_name(self):
"""Gets the external_name of this V1ServiceSpec. # noqa: E501
externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\". # noqa: E501
:return: The external_name of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._external_name
@external_name.setter
def external_name(self, external_name):
"""Sets the external_name of this V1ServiceSpec.
externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\". # noqa: E501
:param external_name: The external_name of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._external_name = external_name
@property
def external_traffic_policy(self):
"""Gets the external_traffic_policy of this V1ServiceSpec. # noqa: E501
externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node. # noqa: E501
:return: The external_traffic_policy of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._external_traffic_policy
@external_traffic_policy.setter
def external_traffic_policy(self, external_traffic_policy):
"""Sets the external_traffic_policy of this V1ServiceSpec.
externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node. # noqa: E501
:param external_traffic_policy: The external_traffic_policy of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._external_traffic_policy = external_traffic_policy
@property
def health_check_node_port(self):
"""Gets the health_check_node_port of this V1ServiceSpec. # noqa: E501
healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set. # noqa: E501
:return: The health_check_node_port of this V1ServiceSpec. # noqa: E501
:rtype: int
"""
return self._health_check_node_port
@health_check_node_port.setter
def health_check_node_port(self, health_check_node_port):
"""Sets the health_check_node_port of this V1ServiceSpec.
healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set. # noqa: E501
:param health_check_node_port: The health_check_node_port of this V1ServiceSpec. # noqa: E501
:type: int
"""
self._health_check_node_port = health_check_node_port
@property
def internal_traffic_policy(self):
"""Gets the internal_traffic_policy of this V1ServiceSpec. # noqa: E501
InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). # noqa: E501
:return: The internal_traffic_policy of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._internal_traffic_policy
@internal_traffic_policy.setter
def internal_traffic_policy(self, internal_traffic_policy):
"""Sets the internal_traffic_policy of this V1ServiceSpec.
InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). # noqa: E501
:param internal_traffic_policy: The internal_traffic_policy of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._internal_traffic_policy = internal_traffic_policy
@property
def ip_families(self):
"""Gets the ip_families of this V1ServiceSpec. # noqa: E501
IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName. This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. # noqa: E501
:return: The ip_families of this V1ServiceSpec. # noqa: E501
:rtype: list[str]
"""
return self._ip_families
@ip_families.setter
def ip_families(self, ip_families):
"""Sets the ip_families of this V1ServiceSpec.
IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName. This field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. # noqa: E501
:param ip_families: The ip_families of this V1ServiceSpec. # noqa: E501
:type: list[str]
"""
self._ip_families = ip_families
@property
def ip_family_policy(self):
"""Gets the ip_family_policy of this V1ServiceSpec. # noqa: E501
IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName. # noqa: E501
:return: The ip_family_policy of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._ip_family_policy
@ip_family_policy.setter
def ip_family_policy(self, ip_family_policy):
"""Sets the ip_family_policy of this V1ServiceSpec.
IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName. # noqa: E501
:param ip_family_policy: The ip_family_policy of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._ip_family_policy = ip_family_policy
@property
def load_balancer_class(self):
"""Gets the load_balancer_class of this V1ServiceSpec. # noqa: E501
loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. # noqa: E501
:return: The load_balancer_class of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._load_balancer_class
@load_balancer_class.setter
def load_balancer_class(self, load_balancer_class):
"""Sets the load_balancer_class of this V1ServiceSpec.
loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. # noqa: E501
:param load_balancer_class: The load_balancer_class of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._load_balancer_class = load_balancer_class
@property
def load_balancer_ip(self):
"""Gets the load_balancer_ip of this V1ServiceSpec. # noqa: E501
Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available. # noqa: E501
:return: The load_balancer_ip of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._load_balancer_ip
@load_balancer_ip.setter
def load_balancer_ip(self, load_balancer_ip):
"""Sets the load_balancer_ip of this V1ServiceSpec.
Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available. # noqa: E501
:param load_balancer_ip: The load_balancer_ip of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._load_balancer_ip = load_balancer_ip
@property
def load_balancer_source_ranges(self):
"""Gets the load_balancer_source_ranges of this V1ServiceSpec. # noqa: E501
If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ # noqa: E501
:return: The load_balancer_source_ranges of this V1ServiceSpec. # noqa: E501
:rtype: list[str]
"""
return self._load_balancer_source_ranges
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, load_balancer_source_ranges):
"""Sets the load_balancer_source_ranges of this V1ServiceSpec.
If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ # noqa: E501
:param load_balancer_source_ranges: The load_balancer_source_ranges of this V1ServiceSpec. # noqa: E501
:type: list[str]
"""
self._load_balancer_source_ranges = load_balancer_source_ranges
@property
def ports(self):
"""Gets the ports of this V1ServiceSpec. # noqa: E501
The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
:return: The ports of this V1ServiceSpec. # noqa: E501
:rtype: list[V1ServicePort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1ServiceSpec.
The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
:param ports: The ports of this V1ServiceSpec. # noqa: E501
:type: list[V1ServicePort]
"""
self._ports = ports
@property
def publish_not_ready_addresses(self):
"""Gets the publish_not_ready_addresses of this V1ServiceSpec. # noqa: E501
publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior. # noqa: E501
:return: The publish_not_ready_addresses of this V1ServiceSpec. # noqa: E501
:rtype: bool
"""
return self._publish_not_ready_addresses
@publish_not_ready_addresses.setter
def publish_not_ready_addresses(self, publish_not_ready_addresses):
"""Sets the publish_not_ready_addresses of this V1ServiceSpec.
publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior. # noqa: E501
:param publish_not_ready_addresses: The publish_not_ready_addresses of this V1ServiceSpec. # noqa: E501
:type: bool
"""
self._publish_not_ready_addresses = publish_not_ready_addresses
@property
def selector(self):
"""Gets the selector of this V1ServiceSpec. # noqa: E501
Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/ # noqa: E501
:return: The selector of this V1ServiceSpec. # noqa: E501
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1ServiceSpec.
Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/ # noqa: E501
:param selector: The selector of this V1ServiceSpec. # noqa: E501
:type: dict(str, str)
"""
self._selector = selector
@property
def session_affinity(self):
"""Gets the session_affinity of this V1ServiceSpec. # noqa: E501
Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
:return: The session_affinity of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._session_affinity
@session_affinity.setter
def session_affinity(self, session_affinity):
"""Sets the session_affinity of this V1ServiceSpec.
Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # noqa: E501
:param session_affinity: The session_affinity of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._session_affinity = session_affinity
@property
def session_affinity_config(self):
"""Gets the session_affinity_config of this V1ServiceSpec. # noqa: E501
:return: The session_affinity_config of this V1ServiceSpec. # noqa: E501
:rtype: V1SessionAffinityConfig
"""
return self._session_affinity_config
@session_affinity_config.setter
def session_affinity_config(self, session_affinity_config):
"""Sets the session_affinity_config of this V1ServiceSpec.
:param session_affinity_config: The session_affinity_config of this V1ServiceSpec. # noqa: E501
:type: V1SessionAffinityConfig
"""
self._session_affinity_config = session_affinity_config
@property
def traffic_distribution(self):
"""Gets the traffic_distribution of this V1ServiceSpec. # noqa: E501
TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are in the same zone. # noqa: E501
:return: The traffic_distribution of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._traffic_distribution
@traffic_distribution.setter
def traffic_distribution(self, traffic_distribution):
"""Sets the traffic_distribution of this V1ServiceSpec.
TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are in the same zone. # noqa: E501
:param traffic_distribution: The traffic_distribution of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._traffic_distribution = traffic_distribution
@property
def type(self):
"""Gets the type of this V1ServiceSpec. # noqa: E501
type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types # noqa: E501
:return: The type of this V1ServiceSpec. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1ServiceSpec.
type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types # noqa: E501
:param type: The type of this V1ServiceSpec. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ServiceSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ServiceSpec):
return True
return self.to_dict() != other.to_dict()
| V1ServiceSpec |
python | kamyu104__LeetCode-Solutions | Python/minimum-incompatibility.py | {
"start": 175,
"end": 1314
} | class ____(object):
def minimumIncompatibility(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
inf = (len(nums)-1)*(len(nums)//k)+1
def backtracking(nums, d, lookup):
if not nums:
return 0
if nums not in lookup:
ret = inf
for new_nums in itertools.combinations(nums, d):
new_nums_set = set(new_nums)
if len(new_nums_set) < d:
continue
left = []
for num in nums:
if num in new_nums_set:
new_nums_set.remove(num)
continue
left.append(num)
ret = min(ret, max(new_nums)-min(new_nums) + backtracking(tuple(left), d, lookup))
lookup[nums] = ret
return lookup[nums]
result = backtracking(tuple(nums), len(nums)//k, {})
return result if result != inf else -1
# Time: O(max(n * 2^n, 3^n))
# Space: O(2^n)
| Solution |
python | numba__numba | numba/tests/test_ir_inlining.py | {
"start": 36368,
"end": 38100
} | class ____(MemoryLeakMixin, InliningBase):
def test_with_inlined_and_noninlined_variants(self):
# This test is contrived and was to demonstrate fixing a bug in the
# template walking logic where inlinable and non-inlinable definitions
# would not mix.
@overload(len, inline='always')
def overload_len(A):
if False:
return lambda A: 10
def impl():
return len([2, 3, 4])
# len(list) won't be inlined because the overload above doesn't apply
self.check(impl, inline_expect={'len': False})
def test_with_kwargs(self):
def foo(a, b=3, c=5):
return a + b + c
@overload(foo, inline='always')
def overload_foo(a, b=3, c=5):
def impl(a, b=3, c=5):
return a + b + c
return impl
def impl():
return foo(3, c=10)
self.check(impl, inline_expect={'foo': True})
def test_with_kwargs2(self):
@njit(inline='always')
def bar(a, b=12, c=9):
return a + b
def impl(a, b=7, c=5):
return bar(a + b, c=19)
self.check(impl, 3, 4, inline_expect={'bar': True})
def test_inlining_optional_constant(self):
# This testcase causes `b` to be a Optional(bool) constant once it is
# inlined into foo().
@njit(inline='always')
def bar(a=None, b=None):
if b is None:
b = 123 # this changes the type of `b` due to lack of SSA
return (a, b)
def impl():
return bar(), bar(123), bar(b=321)
self.check(impl, block_count='SKIP', inline_expect={'bar': True})
| TestGeneralInlining |
python | keras-team__keras | keras/src/distribution/distribution_lib_test.py | {
"start": 5561,
"end": 9623
} | class ____(testing.TestCase):
def setUp(self):
super().setUp()
self.devices = [f"cpu:{i}" for i in range(8)]
shape = (8,)
axis_names = ["data"]
self.device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, self.devices
)
def test_create_with_device_mesh(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
device_mesh = distribution.device_mesh
self.assertEqual(len(device_mesh.devices), 8)
self.assertEqual(device_mesh.axis_names, ["data"])
self.assertEqual(distribution.batch_dim_name, "data")
self.assertFalse(distribution._is_multi_process)
self.assertEqual(distribution._process_id, 0)
self.assertEqual(distribution._num_process, 1)
def test_create_with_devices(self):
distribution = distribution_lib.DataParallel(devices=self.devices)
device_mesh = distribution.device_mesh
self.assertEqual(len(device_mesh.devices), 8)
self.assertEqual(device_mesh.axis_names, ["batch"])
self.assertEqual(distribution.batch_dim_name, "batch")
@mock.patch.object(
distribution_lib,
"list_devices",
return_value=[f"cpu:{i}" for i in range(8)],
)
def test_create_with_list_devices(self, mock_list_devices):
distribution = distribution_lib.DataParallel()
mock_list_devices.assert_called_once()
device_mesh = distribution.device_mesh
self.assertEqual(len(device_mesh.devices), 8)
self.assertEqual(device_mesh.axis_names, ["batch"])
self.assertEqual(distribution.batch_dim_name, "batch")
def test_get_data_layout(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
data = np.arange(16).reshape((4, 2, 2))
data_layout = distribution.get_data_layout(data.shape)
self.assertIs(data_layout.device_mesh, self.device_mesh)
self.assertEqual(data_layout.axes, ("data", None, None))
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="CI segfault")
def test_get_variable_layout(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
variable = backend.Variable(initializer=[1, 2, 3])
variable_layout = distribution.get_variable_layout(variable)
self.assertIs(variable_layout.device_mesh, self.device_mesh)
self.assertEqual(variable_layout.axes, (None,))
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="CI segfault")
def test_get_variable_layout_with_explicit_layout(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
explicit_mesh = distribution_lib.DeviceMesh((8,), ["x"], self.devices)
explicit_layout = distribution_lib.TensorLayout(["x"], explicit_mesh)
variable = backend.Variable(initializer=[1, 2, 3])
variable._layout = explicit_layout
variable_layout = distribution.get_variable_layout(variable)
self.assertIs(variable_layout.device_mesh, explicit_mesh)
self.assertEqual(variable_layout.axes, explicit_layout.axes)
def test_get_tensor_layout(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
path = "path/to/tensor"
tensor_layout = distribution.get_tensor_layout(path)
self.assertIsNone(tensor_layout)
def test_distribute_dataset(self):
# We can only verify the single worker/process case in OSS for now.
dataset = tf.data.Dataset.range(8)
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
distributed_dataset = distribution.distribute_dataset(dataset)
self.assertIs(dataset, distributed_dataset)
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Only JAX has the proper backend distribution lib",
)
| DataParallelDistributionTest |
python | tqdm__tqdm | tests/tests_contrib_logging.py | {
"start": 589,
"end": 754
} | class ____(tqdm):
messages = []
@classmethod
def write(cls, s, **__): # pylint: disable=arguments-differ
CustomTqdm.messages.append(s)
| CustomTqdm |
python | doocs__leetcode | solution/1200-1299/1282.Group the People Given the Group Size They Belong To/Solution.py | {
"start": 0,
"end": 269
} | class ____:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
g = defaultdict(list)
for i, v in enumerate(groupSizes):
g[v].append(i)
return [v[j : j + i] for i, v in g.items() for j in range(0, len(v), i)]
| Solution |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 163240,
"end": 163355
} | class ____(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
| RecvmsgUDPTest |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/expressions/slicing.py | {
"start": 451,
"end": 1203
} | class ____(Expr):
__slots__ = ("length", "offset")
_non_child = ("dtype", "offset", "length")
def __init__(
self,
dtype: DataType,
offset: int,
length: int | None,
column: Expr,
) -> None:
self.dtype = dtype
self.offset = offset
self.length = length
self.children = (column,)
self.is_pointwise = False
def do_evaluate(
self, df: DataFrame, *, context: ExecutionContext = ExecutionContext.FRAME
) -> Column:
"""Evaluate this expression given a dataframe for context."""
(child,) = self.children
column = child.evaluate(df, context=context)
return column.slice((self.offset, self.length), stream=df.stream)
| Slice |
python | ansible__ansible | test/units/module_utils/facts/test_collectors.py | {
"start": 16402,
"end": 16610
} | class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'user']
valid_subsets = ['user']
fact_namespace = 'ansible_user'
collector_class = UserFactCollector
| TestUserFactCollector |
python | walkccc__LeetCode | solutions/446. Arithmetic Slices II - Subsequence/446.py | {
"start": 0,
"end": 597
} | class ____:
def numberOfArithmeticSlices(self, nums: list[int]) -> int:
n = len(nums)
ans = 0
# dp[i][j] := the number of subsequences end in nums[j] nums[i]
dp = [[0] * n for _ in range(n)]
numToIndices = collections.defaultdict(list)
for i, num in enumerate(nums):
numToIndices[num].append(i)
for i in range(n):
for j in range(i):
target = nums[j] * 2 - nums[i]
if target in numToIndices:
for k in numToIndices[target]:
if k < j:
dp[i][j] += dp[j][k] + 1
ans += dp[i][j]
return ans
| Solution |
python | wandb__wandb | wandb/apis/public/reports.py | {
"start": 8932,
"end": 16613
} | class ____:
"""Converts Python-style query expressions to MongoDB-style queries for W&B reports.
<!-- lazydoc-ignore-class: internal -->
"""
SPACER = "----------"
DECIMAL_SPACER = ";;;"
FRONTEND_NAME_MAPPING = {
"ID": "name",
"Name": "displayName",
"Tags": "tags",
"State": "state",
"CreatedTimestamp": "createdAt",
"Runtime": "duration",
"User": "username",
"Sweep": "sweep",
"Group": "group",
"JobType": "jobType",
"Hostname": "host",
"UsingArtifact": "inputArtifacts",
"OutputtingArtifact": "outputArtifacts",
"Step": "_step",
"Relative Time (Wall)": "_absolute_runtime",
"Relative Time (Process)": "_runtime",
"Wall Time": "_timestamp",
# "GroupedRuns": "__wb_group_by_all"
}
FRONTEND_NAME_MAPPING_REVERSED = {v: k for k, v in FRONTEND_NAME_MAPPING.items()}
AST_OPERATORS = {
ast.Lt: "$lt",
ast.LtE: "$lte",
ast.Gt: "$gt",
ast.GtE: "$gte",
ast.Eq: "=",
ast.Is: "=",
ast.NotEq: "$ne",
ast.IsNot: "$ne",
ast.In: "$in",
ast.NotIn: "$nin",
ast.And: "$and",
ast.Or: "$or",
ast.Not: "$not",
}
AST_FIELDS = {
ast.Constant: "value",
ast.Name: "id",
ast.List: "elts",
ast.Tuple: "elts",
}
def __init__(self, run_set):
self.run_set = run_set
self.panel_metrics_helper = PanelMetricsHelper()
def _handle_compare(self, node):
# only left side can be a col
left = self.front_to_back(self._handle_fields(node.left))
op = self._handle_ops(node.ops[0])
right = self._handle_fields(node.comparators[0])
# Eq has no op for some reason
if op == "=":
return {left: right}
else:
return {left: {op: right}}
def _handle_fields(self, node):
result = getattr(node, self.AST_FIELDS.get(type(node)))
if isinstance(result, list):
return [self._handle_fields(node) for node in result]
elif isinstance(result, str):
return self._unconvert(result)
return result
def _handle_ops(self, node):
return self.AST_OPERATORS.get(type(node))
def _replace_numeric_dots(self, s):
numeric_dots = []
for i, (left, mid, right) in enumerate(zip(s, s[1:], s[2:]), 1):
if mid == ".":
if (
left.isdigit()
and right.isdigit() # 1.2
or left.isdigit()
and right == " " # 1.
or left == " "
and right.isdigit() # .2
):
numeric_dots.append(i)
# Edge: Catch number ending in dot at end of string
if s[-2].isdigit() and s[-1] == ".":
numeric_dots.append(len(s) - 1)
numeric_dots = [-1] + numeric_dots + [len(s)]
substrs = []
for start, stop in zip(numeric_dots, numeric_dots[1:]):
substrs.append(s[start + 1 : stop])
substrs.append(self.DECIMAL_SPACER)
substrs = substrs[:-1]
return "".join(substrs)
def _convert(self, filterstr):
_conversion = (
self._replace_numeric_dots(filterstr) # temporarily sub numeric dots
.replace(".", self.SPACER) # Allow dotted fields
.replace(self.DECIMAL_SPACER, ".") # add them back
)
return "(" + _conversion + ")"
def _unconvert(self, field_name):
return field_name.replace(self.SPACER, ".") # Allow dotted fields
def python_to_mongo(self, filterstr):
"""Convert Python expresion to MongoDB filter.
<!-- lazydoc-ignore: internal -->
"""
try:
tree = ast.parse(self._convert(filterstr), mode="eval")
except SyntaxError as e:
raise ValueError(
"Invalid python comparison expression; form something like `my_col == 123`"
) from e
multiple_filters = hasattr(tree.body, "op")
if multiple_filters:
op = self.AST_OPERATORS.get(type(tree.body.op))
values = [self._handle_compare(v) for v in tree.body.values]
else:
op = "$and"
values = [self._handle_compare(tree.body)]
return {"$or": [{op: values}]}
def front_to_back(self, name):
"""Convert frontend metric names to backend field names.
<!-- lazydoc-ignore: internal -->
"""
name, *rest = name.split(".")
rest = "." + ".".join(rest) if rest else ""
if name in self.FRONTEND_NAME_MAPPING:
return self.FRONTEND_NAME_MAPPING[name]
elif name in self.FRONTEND_NAME_MAPPING_REVERSED:
return name
elif name in self.run_set._runs_config:
return f"config.{name}.value{rest}"
else: # assume summary metrics
return f"summary_metrics.{name}{rest}"
def back_to_front(self, name):
"""Convert backend field names to frontend metric names.
<!-- lazydoc-ignore: internal -->
"""
if name in self.FRONTEND_NAME_MAPPING_REVERSED:
return self.FRONTEND_NAME_MAPPING_REVERSED[name]
elif name in self.FRONTEND_NAME_MAPPING:
return name
elif (
name.startswith("config.") and ".value" in name
): # may be brittle: originally "endswith", but that doesn't work with nested keys...
# strip is weird sometimes (??)
return name.replace("config.", "").replace(".value", "")
elif name.startswith("summary_metrics."):
return name.replace("summary_metrics.", "")
wandb.termerror(f"Unknown token: {name}")
return name
# These are only used for ParallelCoordinatesPlot because it has weird backend names...
def pc_front_to_back(self, name):
"""Convert ParallelCoordinatesPlot to backend field names.
<!-- lazydoc-ignore: internal -->
"""
name, *rest = name.split(".")
rest = "." + ".".join(rest) if rest else ""
if name is None:
return None
elif name in self.panel_metrics_helper.FRONTEND_NAME_MAPPING:
return "summary:" + self.panel_metrics_helper.FRONTEND_NAME_MAPPING[name]
elif name in self.FRONTEND_NAME_MAPPING:
return self.FRONTEND_NAME_MAPPING[name]
elif name in self.FRONTEND_NAME_MAPPING_REVERSED:
return name
elif name in self.run_set._runs_config:
return f"config:{name}.value{rest}"
else: # assume summary metrics
return f"summary:{name}{rest}"
def pc_back_to_front(self, name):
"""Convert backend backend field names to ParallelCoordinatesPlot names.
<!-- lazydoc-ignore: internal -->
"""
if name is None:
return None
elif "summary:" in name:
name = name.replace("summary:", "")
return self.panel_metrics_helper.FRONTEND_NAME_MAPPING_REVERSED.get(
name, name
)
elif name in self.FRONTEND_NAME_MAPPING_REVERSED:
return self.FRONTEND_NAME_MAPPING_REVERSED[name]
elif name in self.FRONTEND_NAME_MAPPING:
return name
elif name.startswith("config:") and ".value" in name:
return name.replace("config:", "").replace(".value", "")
elif name.startswith("summary_metrics."):
return name.replace("summary_metrics.", "")
return name
| PythonMongoishQueryGenerator |
python | huggingface__transformers | src/transformers/models/bart/modeling_bart.py | {
"start": 19481,
"end": 20289
} | class ____(PreTrainedModel):
config: BartConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_unexpected = ["encoder.version", "decoder.version"]
_no_split_modules = [r"BartEncoderLayer", r"BartDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
| BartPreTrainedModel |
python | PyCQA__pylint | tests/functional/i/iterable_context.py | {
"start": 1977,
"end": 2211
} | class ____(Iterable):
pass
m = MyClass()
for i in m:
print(i)
# skip uninferable instances
ambiguous = range(i) or range(i)
for j in ambiguous:
print(j)
# skip checks if statement is inside mixin/base/abstract class
| MyClass |
python | kamyu104__LeetCode-Solutions | Python/erect-the-fence-ii.py | {
"start": 123,
"end": 2025
} | class ____(object):
def outerTrees(self, trees):
"""
:type trees: List[List[int]]
:rtype: List[float]
"""
def dist(a, b):
return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5
def inside(c, p):
return dist(c[0], p) < c[1]+EPS
def circle_center(bx, by, cx, cy):
B = bx*bx + by*by
C = cx*cx + cy*cy
D = bx*cy - by*cx
return [float(cy*B - by*C)/(2*D),
float(bx*C - cx*B)/(2*D)]
def circle_from_2_points(A, B):
C = [(A[0]+B[0])/2.0, (A[1]+B[1])/2.0]
return [C, dist(A, B)/2.0]
def circle_from_3_points(A, B, C):
I = circle_center(B[0]-A[0], B[1]-A[1],
C[0]-A[0], C[1]-A[1])
I[0] += A[0]
I[1] += A[1]
return [I, dist(I, A)]
def trivial(boundaries): # circumscribed circle
if not boundaries:
return None
if len(boundaries) == 1:
return [boundaries[0], 0.0]
if len(boundaries) == 2:
return circle_from_2_points(boundaries[0], boundaries[1])
return circle_from_3_points(boundaries[0], boundaries[1], boundaries[2])
def Welzl(points, boundaries, curr):
if curr == len(points) or len(boundaries) == 3:
return trivial(boundaries)
result = Welzl(points, boundaries, curr+1)
if result is not None and inside(result, points[curr]):
return result
boundaries.append(points[curr])
result = Welzl(points, boundaries, curr+1)
boundaries.pop()
return result
EPS = 1e-5
random.seed(0)
random.shuffle(trees)
result = Welzl(trees, [], 0)
return result[0][0], result[0][1], result[1]
| Solution |
python | pandas-dev__pandas | pandas/tests/reshape/merge/test_merge_asof.py | {
"start": 296,
"end": 121035
} | class ____:
def prep_data(self, df, dedupe=False):
if dedupe:
df = df.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
df.time = to_datetime(df.time)
return df
@pytest.fixture
def trades(self):
df = pd.DataFrame(
[
["20160525 13:30:00.023", "MSFT", "51.9500", "75", "NASDAQ"],
["20160525 13:30:00.038", "MSFT", "51.9500", "155", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.7700", "100", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9200", "100", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "200", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "300", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "600", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "44", "NASDAQ"],
["20160525 13:30:00.074", "AAPL", "98.6700", "478343", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6700", "478343", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6600", "6", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "30", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "75", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "20", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "35", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "10", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"],
["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "1000", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "300", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "400", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "600", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"],
["20160525 13:30:00.078", "MSFT", "51.9500", "783", "NASDAQ"],
["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"],
["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"],
],
columns="time,ticker,price,quantity,marketCenter".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
return self.prep_data(df)
@pytest.fixture
def quotes(self):
df = pd.DataFrame(
[
["20160525 13:30:00.023", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.023", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.041", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.072", "GOOG", "720.50", "720.88"],
["20160525 13:30:00.075", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.92", "51.95"],
],
columns="time,ticker,bid,ask".split(","),
)
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
return self.prep_data(df, dedupe=True)
@pytest.fixture
def asof(self):
df = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
return self.prep_data(df)
@pytest.fixture
def tolerance(self):
df = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
return self.prep_data(df)
def test_examples1(self):
"""doc-string examples"""
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self, unit):
"""doc-string examples"""
if unit == "s":
pytest.skip(
"This test is invalid for unit='s' because that would "
"round the trades['time']]"
)
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
).astype(f"M8[{unit}]"),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
).astype(f"M8[{unit}]"),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
merge_asof(trades, quotes, on="time", by="ticker")
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("2ms"))
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
).astype(f"M8[{unit}]"),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
"""doc-string examples"""
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
"""doc-string examples"""
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self, trades, asof, quotes):
expected = asof
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self, trades, asof, quotes):
expected = asof
trades.ticker = trades.ticker.astype("category")
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self, trades, asof, quotes):
# GH14253
expected = asof
trades = trades.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self, trades, asof, quotes):
expected = asof
quotes = quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self, trades, asof, quotes):
expected = asof.set_index("time")
trades = trades.set_index("time")
quotes = quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_left(self, trades, quotes):
# MultiIndex is prohibited
trades = trades.set_index(["time", "price"])
quotes = quotes.set_index("time")
with pytest.raises(MergeError, match="left can only have one index"):
merge_asof(trades, quotes, left_index=True, right_index=True)
def test_multi_index_right(self, trades, quotes):
# MultiIndex is prohibited
trades = trades.set_index("time")
quotes = quotes.set_index(["time", "bid"])
with pytest.raises(MergeError, match="right can only have one index"):
merge_asof(trades, quotes, left_index=True, right_index=True)
def test_on_and_index_left_on(self, trades, quotes):
# "on" parameter and index together is prohibited
trades = trades.set_index("time")
quotes = quotes.set_index("time")
msg = 'Can only pass argument "left_on" OR "left_index" not both.'
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
def test_on_and_index_right_on(self, trades, quotes):
trades = trades.set_index("time")
quotes = quotes.set_index("time")
msg = 'Can only pass argument "right_on" OR "right_index" not both.'
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self, trades, asof, quotes):
# GH14253
expected = asof
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self, trades, asof, quotes):
expected = asof
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["object", "string"])
def test_multiby_heterogeneous_types(self, dtype):
# GH13936
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
trades = trades.astype({"ticker": dtype, "exch": dtype})
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
quotes = quotes.astype({"ticker": dtype, "exch": dtype})
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
expected = expected.astype({"ticker": dtype, "exch": dtype})
result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_mismatched_index_dtype(self):
# similar to test_multiby_indexed, but we change the dtype on left.index
left = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a"],
[to_datetime("20160602"), 2, "a"],
[to_datetime("20160603"), 1, "b"],
[to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
# different dtype for the index
left.index = left.index - pd.Timestamp(0)
right = pd.DataFrame(
[
[to_datetime("20160502"), 1, "a", 1.0],
[to_datetime("20160502"), 2, "a", 2.0],
[to_datetime("20160503"), 1, "b", 3.0],
[to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
msg = "incompatible merge keys"
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, left_index=True, right_index=True, by=["k1", "k2"])
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a"],
[to_datetime("20160602"), 2, "a"],
[to_datetime("20160603"), 1, "b"],
[to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[to_datetime("20160502"), 1, "a", 1.0],
[to_datetime("20160502"), 2, "a", 2.0],
[to_datetime("20160503"), 1, "b", 3.0],
[to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a", 1.0],
[to_datetime("20160602"), 2, "a", 2.0],
[to_datetime("20160603"), 1, "b", 3.0],
[to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(
MergeError, match="left_by and right_by must be the same length"
):
merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.084",
"AAPL",
"98.64",
"40",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.084",
"AAPL",
"98.55",
"149",
"EDGX",
"98.55",
"98.56",
],
[
"20160525 13:30:00.086",
"AAPL",
"98.56",
"500",
"ARCA",
"98.55",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"647",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"300",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"50",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"50",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"70",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"70",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"1",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"62",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"10",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"100",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.105",
"AAPL",
"98.63",
"100",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.105",
"AAPL",
"98.63",
"700",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.106",
"AAPL",
"98.63",
"61",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.107",
"AAPL",
"98.63",
"100",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.107",
"AAPL",
"98.63",
"53",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.108",
"AAPL",
"98.63",
"100",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.108",
"AAPL",
"98.63",
"839",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.115",
"AAPL",
"98.63",
"5",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.118",
"AAPL",
"98.63",
"295",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.118",
"AAPL",
"98.63",
"5",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.128",
"AAPL",
"98.63",
"100",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.128",
"AAPL",
"98.63",
"100",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.128",
"MSFT",
"51.92",
"100",
"ARCA",
"51.92",
"51.95",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"100",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"10",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"59",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"31",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"69",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"12",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"12",
"EDGX",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"100",
"ARCA",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"100",
"ARCA",
"98.61",
"98.63",
],
[
"20160525 13:30:00.130",
"MSFT",
"51.95",
"317",
"ARCA",
"51.93",
"51.95",
],
[
"20160525 13:30:00.130",
"MSFT",
"51.95",
"283",
"ARCA",
"51.93",
"51.95",
],
[
"20160525 13:30:00.135",
"MSFT",
"51.93",
"100",
"EDGX",
"51.92",
"51.95",
],
[
"20160525 13:30:00.135",
"AAPL",
"98.62",
"100",
"ARCA",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"12",
"NASDAQ",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"88",
"NASDAQ",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"162",
"NASDAQ",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.61",
"100",
"BATS",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"61",
"ARCA",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"25",
"ARCA",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"14",
"ARCA",
"98.61",
"98.62",
],
[
"20160525 13:30:00.145",
"AAPL",
"98.62",
"12",
"ARCA",
"98.6",
"98.63",
],
[
"20160525 13:30:00.145",
"AAPL",
"98.62",
"100",
"ARCA",
"98.6",
"98.63",
],
[
"20160525 13:30:00.145",
"AAPL",
"98.63",
"100",
"NASDAQ",
"98.6",
"98.63",
],
[
"20160525 13:30:00.145",
"AAPL",
"98.63",
"100",
"NASDAQ",
"98.6",
"98.63",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
expected["price"] = expected["price"].astype("float64")
expected["quantity"] = expected["quantity"].astype("int64")
expected["bid"] = expected["bid"].astype("float64")
expected["ask"] = expected["ask"].astype("float64")
expected = self.prep_data(expected)
trades = pd.DataFrame(
[
["20160525 13:30:00.023", "MSFT", "51.9500", "75", "NASDAQ"],
["20160525 13:30:00.038", "MSFT", "51.9500", "155", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.7700", "100", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9200", "100", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "200", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "300", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "600", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "44", "NASDAQ"],
["20160525 13:30:00.074", "AAPL", "98.6700", "478343", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6700", "478343", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6600", "6", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "30", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "75", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "20", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "35", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "10", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"],
["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "1000", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "300", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "400", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "600", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"],
["20160525 13:30:00.078", "MSFT", "51.9500", "783", "NASDAQ"],
["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"],
["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"],
["20160525 13:30:00.084", "AAPL", "98.6400", "40", "NASDAQ"],
["20160525 13:30:00.084", "AAPL", "98.5500", "149", "EDGX"],
["20160525 13:30:00.086", "AAPL", "98.5600", "500", "ARCA"],
["20160525 13:30:00.104", "AAPL", "98.6300", "647", "EDGX"],
["20160525 13:30:00.104", "AAPL", "98.6300", "300", "EDGX"],
["20160525 13:30:00.104", "AAPL", "98.6300", "50", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "50", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "70", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "70", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "1", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "62", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "10", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "100", "ARCA"],
["20160525 13:30:00.105", "AAPL", "98.6300", "100", "ARCA"],
["20160525 13:30:00.105", "AAPL", "98.6300", "700", "ARCA"],
["20160525 13:30:00.106", "AAPL", "98.6300", "61", "EDGX"],
["20160525 13:30:00.107", "AAPL", "98.6300", "100", "ARCA"],
["20160525 13:30:00.107", "AAPL", "98.6300", "53", "ARCA"],
["20160525 13:30:00.108", "AAPL", "98.6300", "100", "ARCA"],
["20160525 13:30:00.108", "AAPL", "98.6300", "839", "ARCA"],
["20160525 13:30:00.115", "AAPL", "98.6300", "5", "EDGX"],
["20160525 13:30:00.118", "AAPL", "98.6300", "295", "EDGX"],
["20160525 13:30:00.118", "AAPL", "98.6300", "5", "EDGX"],
["20160525 13:30:00.128", "AAPL", "98.6300", "100", "NASDAQ"],
["20160525 13:30:00.128", "AAPL", "98.6300", "100", "NASDAQ"],
["20160525 13:30:00.128", "MSFT", "51.9200", "100", "ARCA"],
["20160525 13:30:00.129", "AAPL", "98.6200", "100", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "10", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "59", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "31", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "69", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "12", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "12", "EDGX"],
["20160525 13:30:00.129", "AAPL", "98.6200", "100", "ARCA"],
["20160525 13:30:00.129", "AAPL", "98.6200", "100", "ARCA"],
["20160525 13:30:00.130", "MSFT", "51.9500", "317", "ARCA"],
["20160525 13:30:00.130", "MSFT", "51.9500", "283", "ARCA"],
["20160525 13:30:00.135", "MSFT", "51.9300", "100", "EDGX"],
["20160525 13:30:00.135", "AAPL", "98.6200", "100", "ARCA"],
["20160525 13:30:00.144", "AAPL", "98.6200", "12", "NASDAQ"],
["20160525 13:30:00.144", "AAPL", "98.6200", "88", "NASDAQ"],
["20160525 13:30:00.144", "AAPL", "98.6200", "162", "NASDAQ"],
["20160525 13:30:00.144", "AAPL", "98.6100", "100", "BATS"],
["20160525 13:30:00.144", "AAPL", "98.6200", "61", "ARCA"],
["20160525 13:30:00.144", "AAPL", "98.6200", "25", "ARCA"],
["20160525 13:30:00.144", "AAPL", "98.6200", "14", "ARCA"],
["20160525 13:30:00.145", "AAPL", "98.6200", "12", "ARCA"],
["20160525 13:30:00.145", "AAPL", "98.6200", "100", "ARCA"],
["20160525 13:30:00.145", "AAPL", "98.6300", "100", "NASDAQ"],
["20160525 13:30:00.145", "AAPL", "98.6300", "100", "NASDAQ"],
],
columns="time,ticker,price,quantity,marketCenter".split(","),
)
trades["price"] = trades["price"].astype("float64")
trades["quantity"] = trades["quantity"].astype("int64")
trades = self.prep_data(trades)
quotes = pd.DataFrame(
[
["20160525 13:30:00.023", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.023", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.041", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.072", "GOOG", "720.50", "720.88"],
["20160525 13:30:00.075", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.92", "51.95"],
["20160525 13:30:00.079", "MSFT", "51.92", "51.95"],
["20160525 13:30:00.080", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.084", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.086", "AAPL", "98.55", "98.63"],
["20160525 13:30:00.088", "AAPL", "98.65", "98.63"],
["20160525 13:30:00.089", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.105", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.107", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.115", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.115", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.118", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.128", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.128", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.130", "MSFT", "51.93", "51.95"],
["20160525 13:30:00.130", "MSFT", "51.93", "51.95"],
["20160525 13:30:00.130", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.131", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.131", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.135", "MSFT", "51.92", "51.95"],
["20160525 13:30:00.135", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.136", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.136", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.144", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.144", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.145", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.145", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.145", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.145", "AAPL", "98.60", "98.63"],
["20160525 13:30:00.145", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.145", "AAPL", "98.60", "98.63"],
],
columns="time,ticker,bid,ask".split(","),
)
quotes["bid"] = quotes["bid"].astype("float64")
quotes["ask"] = quotes["ask"].astype("float64")
quotes = self.prep_data(quotes, dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self, trades, asof, quotes):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(asof)
trades = f(trades)
quotes = f(quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self, trades, quotes):
msg = r"incompatible merge keys \[1\] .* must be the same type"
with pytest.raises(MergeError, match=msg):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError, match="can only asof on a key for left"):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError, match="can only asof on a key for left"):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath, trades, quotes, asof):
q = (
pd.concat([quotes, quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(trades, q, on="time", by="ticker")
expected = self.prep_data(asof)
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self, trades, quotes):
msg = "allow_exact_matches must be boolean, passed foo"
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self, trades, quotes):
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
msg = r"incompatible tolerance .*, must be compat with type .*"
# incompat
with pytest.raises(MergeError, match=msg):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError, match=msg):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
msg = "tolerance must be positive"
# invalid negative
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError, match=msg):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self, trades, quotes):
trades = trades.sort_values("time", ascending=False)
quotes = quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic_increasing
assert not quotes.time.is_monotonic_increasing
with pytest.raises(ValueError, match="left keys must be sorted"):
merge_asof(trades, quotes, on="time", by="ticker")
trades = trades.sort_values("time")
assert trades.time.is_monotonic_increasing
assert not quotes.time.is_monotonic_increasing
with pytest.raises(ValueError, match="right keys must be sorted"):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = quotes.sort_values("time")
assert trades.time.is_monotonic_increasing
assert quotes.time.is_monotonic_increasing
# ok, though has dupes
merge_asof(trades, quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance_ts",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance_ts, trades, quotes, tolerance):
result = merge_asof(
trades, quotes, on="time", by="ticker", tolerance=tolerance_ts
)
expected = tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self, unit):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=datetime.UTC,
unit=unit,
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=datetime.UTC,
unit=unit,
),
"value2": list("ABCDE"),
}
)
result = merge_asof(left, right, on="date", tolerance=Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=datetime.UTC,
unit=unit,
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self, trades, quotes, tolerance):
# GH 15135
expected = tolerance.set_index("time")
trades = trades.set_index("time")
quotes = quotes.set_index("time")
result = merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self, trades, quotes):
result = merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches=False
)
df = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
np.nan,
np.nan,
],
["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan],
["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.95",
"51.95",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
expected = self.prep_data(df)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self, trades, quotes):
result = merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
df = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
np.nan,
np.nan,
],
["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan],
["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.95",
"51.95",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
expected = self.prep_data(df)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = merge_asof(left, right, on="a", by="b", direction="forward")
tm.assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = merge_asof(left, right, on="a", by="b", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
tm.assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_numpy_dtype):
# see gh-13936
dtype = np.dtype(any_real_numpy_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_numpy_dtype):
# see gh-13936
dtype = np.dtype(any_real_numpy_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"price": [
3.26,
3.2599,
3.2598,
12.58,
12.59,
12.5,
378.15,
378.2,
378.25,
],
},
columns=["symbol", "exch", "price"],
)
df2 = pd.DataFrame(
{
"exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
"mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
},
columns=["exch", "price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
df2 = df2.sort_values("price").reset_index(drop=True)
result = merge_asof(df1, df2, on="price", by="exch")
expected = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
"price": [
3.2598,
3.2599,
3.26,
12.5,
12.58,
12.59,
378.15,
378.2,
378.25,
],
"mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
},
columns=["symbol", "exch", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datatype_error_raises(self):
msg = r"Incompatible merge dtype, .*, both sides must have numeric dtype"
left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_datatype_categorical_error_raises(self):
msg = (
r"incompatible merge keys \[0\] .* both sides category, "
"but not equal ones"
)
left = pd.DataFrame(
{"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
)
right = pd.DataFrame(
{
"right_val": [1, 2, 3, 6, 7],
"a": pd.Categorical(["a", "X", "c", "X", "b"]),
}
)
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_groupby_multiple_column_with_categorical_column(self):
# GH 16454
df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
result = merge_asof(df, df, on="x", by=["y", "z"])
expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda x: x, to_datetime], ids=["numeric", "datetime"]
)
@pytest.mark.parametrize("side", ["left", "right"])
def test_merge_on_nans(self, func, side):
# GH 23189
msg = f"Merge keys contain null values on {side} side"
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.0])
df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]})
with pytest.raises(ValueError, match=msg):
if side == "left":
merge_asof(df_null, df, on="a")
else:
merge_asof(df, df_null, on="a")
def test_by_nullable(self, any_numeric_ea_dtype, using_infer_string):
# Note: this test passes if instead of using pd.array we use
# np.array([np.nan, 1]). Other than that, I (@jbrockmendel)
# have NO IDEA what the expected behavior is.
# TODO(GH#32306): may be relevant to the expected behavior here.
arr = pd.array([pd.NA, 0, 1], dtype=any_numeric_ea_dtype)
if arr.dtype.kind in ["i", "u"]:
max_val = np.iinfo(arr.dtype.numpy_dtype).max
else:
max_val = np.finfo(arr.dtype.numpy_dtype).max
# set value s.t. (at least for integer dtypes) arr._values_for_argsort
# is not an injection
arr[2] = max_val
left = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["HELLO", "To", "You"],
"on_col": [2, 4, 6],
"value": ["a", "c", "e"],
}
)
right = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["WORLD", "Wide", "Web"],
"on_col": [1, 2, 6],
"value": ["b", "d", "f"],
}
)
result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["HELLO", "To", "You"],
"on_col": [2, 4, 6],
"value_x": ["a", "c", "e"],
}
)
expected["value_y"] = np.array([np.nan, np.nan, np.nan], dtype=object)
if using_infer_string:
expected["value_y"] = expected["value_y"].astype("str")
tm.assert_frame_equal(result, expected)
def test_merge_by_col_tz_aware(self):
# GH 21184
left = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [2],
"values": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [1],
"values": ["b"],
}
)
result = merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]],
columns=["by_col", "on_col", "values_x", "values_y"],
)
tm.assert_frame_equal(result, expected)
def test_by_mixed_tz_aware(self, using_infer_string):
# GH 26649
left = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["HELLO"],
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["WORLD"],
"on_col": [1],
"value": ["b"],
}
)
result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]],
columns=["by_col1", "by_col2", "on_col", "value_x"],
)
expected["value_y"] = np.array([np.nan], dtype=object)
if using_infer_string:
expected["value_y"] = expected["value_y"].astype("str")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int16", "m8[ns]", "M8[us]"])
def test_by_dtype(self, dtype):
# GH 55453, GH 22794
left = pd.DataFrame(
{
"by_col": np.array([1], dtype=dtype),
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": np.array([1], dtype=dtype),
"on_col": [1],
"value": ["b"],
}
)
result = merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
{
"by_col": np.array([1], dtype=dtype),
"on_col": [2],
"value_x": ["a"],
"value_y": ["b"],
}
)
tm.assert_frame_equal(result, expected)
def test_timedelta_tolerance_nearest(self, unit):
# GH 27642
if unit == "s":
pytest.skip(
"This test is invalid with unit='s' because that would "
"round left['time']"
)
left = pd.DataFrame(
list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5], strict=True)),
columns=["time", "left"],
)
left["time"] = pd.to_timedelta(left["time"], "ms").astype(f"m8[{unit}]")
right = pd.DataFrame(
list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5], strict=True)),
columns=["time", "right"],
)
right["time"] = pd.to_timedelta(right["time"], "ms").astype(f"m8[{unit}]")
expected = pd.DataFrame(
list(
zip(
[0, 5, 10, 15, 20, 25],
[0, 1, 2, 3, 4, 5],
[0, np.nan, 2, 4, np.nan, np.nan],
strict=True,
)
),
columns=["time", "left", "right"],
)
expected["time"] = pd.to_timedelta(expected["time"], "ms").astype(f"m8[{unit}]")
result = merge_asof(
left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest"
)
tm.assert_frame_equal(result, expected)
def test_int_type_tolerance(self, any_int_dtype):
# GH #28870
left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]})
right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]})
left["a"] = left["a"].astype(any_int_dtype)
right["a"] = right["a"].astype(any_int_dtype)
expected = pd.DataFrame(
{"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]}
)
expected["a"] = expected["a"].astype(any_int_dtype)
result = merge_asof(left, right, on="a", tolerance=10)
tm.assert_frame_equal(result, expected)
def test_merge_index_column_tz(self):
# GH 29864
index = pd.date_range("2019-10-01", freq="30min", periods=5, tz="UTC")
left = pd.DataFrame([0.9, 0.8, 0.7, 0.6], columns=["xyz"], index=index[1:])
right = pd.DataFrame({"from_date": index, "abc": [2.46] * 4 + [2.19]})
result = merge_asof(
left=left, right=right, left_index=True, right_on=["from_date"]
)
expected = pd.DataFrame(
{
"xyz": [0.9, 0.8, 0.7, 0.6],
"from_date": index[1:],
"abc": [2.46] * 3 + [2.19],
},
index=pd.date_range(
"2019-10-01 00:30:00", freq="30min", periods=4, tz="UTC"
),
)
tm.assert_frame_equal(result, expected)
result = merge_asof(
left=right, right=left, right_index=True, left_on=["from_date"]
)
expected = pd.DataFrame(
{
"from_date": index,
"abc": [2.46] * 4 + [2.19],
"xyz": [np.nan, 0.9, 0.8, 0.7, 0.6],
},
index=Index([0, 1, 2, 3, 4]),
)
tm.assert_frame_equal(result, expected)
def test_left_index_right_index_tolerance(self, unit):
# https://github.com/pandas-dev/pandas/issues/35558
if unit == "s":
pytest.skip(
"This test is invalid with unit='s' because that would round dr1"
)
dr1 = pd.date_range(
start="1/1/2020", end="1/20/2020", freq="2D", unit=unit
) + Timedelta(seconds=0.4).as_unit(unit)
dr2 = pd.date_range(start="1/1/2020", end="2/1/2020", unit=unit)
df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1))
df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2))
expected = pd.DataFrame(
{"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1)
)
result = merge_asof(
df1,
df2,
left_index=True,
right_index=True,
tolerance=Timedelta(seconds=0.5),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
)
@pytest.mark.parametrize(
"kwargs", [{"on": "x"}, {"left_index": True, "right_index": True}]
)
@pytest.mark.parametrize(
"data",
[["2019-06-01 00:09:12", "2019-06-01 00:10:29"], [1.0, "2019-06-01 00:10:29"]],
)
def test_merge_asof_non_numerical_dtype(kwargs, data, infer_string):
# GH#29130
with option_context("future.infer_string", infer_string):
left = pd.DataFrame({"x": data}, index=data)
right = pd.DataFrame({"x": data}, index=data)
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, **kwargs)
def test_merge_asof_non_numerical_dtype_object():
# GH#29130
left = pd.DataFrame({"a": ["12", "13", "15"], "left_val1": ["a", "b", "c"]})
right = pd.DataFrame({"a": ["a", "b", "c"], "left_val": ["d", "e", "f"]})
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(
left,
right,
left_on="left_val1",
right_on="a",
left_by="a",
right_by="left_val",
)
@pytest.mark.parametrize(
"kwargs",
[
{"right_index": True, "left_index": True},
{"left_on": "left_time", "right_index": True},
{"left_index": True, "right_on": "right"},
],
)
def test_merge_asof_index_behavior(kwargs):
# GH 33463
index = Index([1, 5, 10], name="test")
left = pd.DataFrame({"left": ["a", "b", "c"], "left_time": [1, 4, 10]}, index=index)
right = pd.DataFrame({"right": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
result = merge_asof(left, right, **kwargs)
expected = pd.DataFrame(
{"left": ["a", "b", "c"], "left_time": [1, 4, 10], "right": [1, 3, 7]},
index=index,
)
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeric_column_in_index():
# GH#34488
left = pd.DataFrame({"b": [10, 11, 12]}, index=Index([1, 2, 3], name="a"))
right = pd.DataFrame({"c": [20, 21, 22]}, index=Index([0, 2, 3], name="a"))
result = merge_asof(left, right, left_on="a", right_on="a")
expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]})
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeric_column_in_multiindex():
# GH#34488
left = pd.DataFrame(
{"b": [10, 11, 12]},
index=pd.MultiIndex.from_arrays([[1, 2, 3], ["a", "b", "c"]], names=["a", "z"]),
)
right = pd.DataFrame(
{"c": [20, 21, 22]},
index=pd.MultiIndex.from_arrays([[1, 2, 3], ["x", "y", "z"]], names=["a", "y"]),
)
result = merge_asof(left, right, left_on="a", right_on="a")
expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]})
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeri_column_in_index_object_dtype():
# GH#34488
left = pd.DataFrame({"b": [10, 11, 12]}, index=Index(["1", "2", "3"], name="a"))
right = pd.DataFrame({"c": [20, 21, 22]}, index=Index(["m", "n", "o"], name="a"))
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, left_on="a", right_on="a")
left = left.reset_index().set_index(["a", "b"])
right = right.reset_index().set_index(["a", "c"])
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, left_on="a", right_on="a")
def test_merge_asof_array_as_on(unit):
# GH#42844
dti = pd.DatetimeIndex(
["2021/01/01 00:37", "2021/01/01 01:40"], dtype=f"M8[{unit}]"
)
right = pd.DataFrame(
{
"a": [2, 6],
"ts": dti,
}
)
ts_merge = pd.date_range(
start=pd.Timestamp("2021/01/01 00:00"), periods=3, freq="1h", unit=unit
)
left = pd.DataFrame({"b": [4, 8, 7]})
result = merge_asof(
left,
right,
left_on=ts_merge,
right_on="ts",
allow_exact_matches=False,
direction="backward",
)
expected = pd.DataFrame({"b": [4, 8, 7], "a": [np.nan, 2, 6], "ts": ts_merge})
tm.assert_frame_equal(result, expected)
result = merge_asof(
right,
left,
left_on="ts",
right_on=ts_merge,
allow_exact_matches=False,
direction="backward",
)
expected = pd.DataFrame(
{
"a": [2, 6],
"ts": dti,
"b": [4, 8],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_asof_raise_for_duplicate_columns():
# GH#50102
left = pd.DataFrame([[1, 2, "a"]], columns=["a", "a", "left_val"])
right = pd.DataFrame([[1, 1, 1]], columns=["a", "a", "right_val"])
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, on="a")
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, left_on="a", right_on="right_val")
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, left_on="left_val", right_on="a")
@pytest.mark.parametrize(
"dtype",
[
"Int64",
pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")),
pytest.param("timestamp[s][pyarrow]", marks=td.skip_if_no("pyarrow")),
],
)
def test_merge_asof_extension_dtype(dtype):
# GH 52904
left = pd.DataFrame(
{
"join_col": [1, 3, 5],
"left_val": [1, 2, 3],
}
)
right = pd.DataFrame(
{
"join_col": [2, 3, 4],
"right_val": [1, 2, 3],
}
)
left = left.astype({"join_col": dtype})
right = right.astype({"join_col": dtype})
result = merge_asof(left, right, on="join_col")
expected = pd.DataFrame(
{
"join_col": [1, 3, 5],
"left_val": [1, 2, 3],
"right_val": [np.nan, 2.0, 3.0],
}
)
expected = expected.astype({"join_col": dtype})
tm.assert_frame_equal(result, expected)
@td.skip_if_no("pyarrow")
def test_merge_asof_pyarrow_td_tolerance():
# GH 56486
ser = pd.Series(
[datetime.datetime(2023, 1, 1)], dtype="timestamp[us, UTC][pyarrow]"
)
df = pd.DataFrame(
{
"timestamp": ser,
"value": [1],
}
)
result = merge_asof(df, df, on="timestamp", tolerance=Timedelta("1s"))
expected = pd.DataFrame(
{
"timestamp": ser,
"value_x": [1],
"value_y": [1],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_asof_read_only_ndarray():
# GH 53513
left = pd.Series([2], index=[2], name="left")
right = pd.Series([1], index=[1], name="right")
# set to read-only
left.index.values.flags.writeable = False
right.index.values.flags.writeable = False
result = merge_asof(left, right, left_index=True, right_index=True)
expected = pd.DataFrame({"left": [2], "right": [1]}, index=[2])
tm.assert_frame_equal(result, expected)
def test_merge_asof_multiby_with_categorical():
# GH 43541
left = pd.DataFrame(
{
"c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]),
"c2": ["x"] * 4,
"t": [1] * 4,
"v": range(4),
}
)
right = pd.DataFrame(
{
"c1": pd.Categorical(["b", "b"], categories=["b", "a"]),
"c2": ["x"] * 2,
"t": [1, 2],
"v": range(2),
}
)
result = merge_asof(
left,
right,
by=["c1", "c2"],
on="t",
direction="forward",
suffixes=["_left", "_right"],
)
expected = pd.DataFrame(
{
"c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]),
"c2": ["x"] * 4,
"t": [1] * 4,
"v_left": range(4),
"v_right": [np.nan, np.nan, 0.0, 0.0],
}
)
tm.assert_frame_equal(result, expected)
| TestAsOfMerge |
python | walkccc__LeetCode | solutions/3447. Assign Elements to Groups with Constraints/3447.py | {
"start": 0,
"end": 780
} | class ____:
def assignElements(self, groups: list[int], elements: list[int]) -> list[int]:
ans = []
elementToMinIndex = {}
for i, element in enumerate(elements):
if element not in elementToMinIndex:
elementToMinIndex[element] = i
for num in groups:
ans.append(self._getMinIndex(num, elementToMinIndex))
return ans
def _getMinIndex(self, num: int, elementToMinIndex: dict[int, int]) -> int:
res = math.inf
i = 1
while i * i <= num:
if num % i != 0:
continue
if i in elementToMinIndex:
res = min(res, elementToMinIndex[i])
if num // i != i and (num // i) in elementToMinIndex:
res = min(res, elementToMinIndex[num // i])
i += 1
return -1 if res == math.inf else res
| Solution |
python | kamyu104__LeetCode-Solutions | Python/sum-of-elements-with-frequency-divisible-by-k.py | {
"start": 46,
"end": 417
} | class ____(object):
def sumDivisibleByK(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
mx = max(nums)
cnt = [0]*(mx+1)
for x in nums:
cnt[x] += 1
return sum(x for x in nums if cnt[x]%k == 0)
# Time: O(n)
# Space: O(n)
import collections
# freq table
| Solution |
python | encode__django-rest-framework | tests/test_relations.py | {
"start": 13829,
"end": 19111
} | class ____(APISimpleTestCase):
def setUp(self):
self.queryset = MockQueryset([
MockObject(
pk=1, name='foo', nested=MockObject(
pk=2, name='bar', nested=MockObject(
pk=7, name="foobar"
)
)
),
MockObject(
pk=3, name='hello', nested=MockObject(
pk=4, name='world', nested=MockObject(
pk=8, name="helloworld"
)
)
),
MockObject(
pk=5, name='harry', nested=MockObject(
pk=6, name='potter', nested=MockObject(
pk=9, name="harrypotter"
)
)
)
])
self.instance = self.queryset.items[2]
self.field = serializers.SlugRelatedField(
slug_field='name', queryset=self.queryset
)
self.nested_field = serializers.SlugRelatedField(
slug_field='nested__name', queryset=self.queryset
)
self.nested_nested_field = serializers.SlugRelatedField(
slug_field='nested__nested__name', queryset=self.queryset
)
# testing nested inside nested relations
def test_slug_related_nested_nested_lookup_exists(self):
instance = self.nested_nested_field.to_internal_value(
self.instance.nested.nested.name
)
assert instance is self.instance
def test_slug_related_nested_nested_lookup_does_not_exist(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.nested_nested_field.to_internal_value('doesnotexist')
msg = excinfo.value.detail[0]
assert msg == \
'Object with nested__nested__name=doesnotexist does not exist.'
def test_slug_related_nested_nested_lookup_invalid_type(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.nested_nested_field.to_internal_value(BadType())
msg = excinfo.value.detail[0]
assert msg == 'Invalid value.'
def test_nested_nested_representation(self):
representation =\
self.nested_nested_field.to_representation(self.instance)
assert representation == self.instance.nested.nested.name
def test_nested_nested_overriding_get_queryset(self):
qs = self.queryset
class NoQuerySetSlugRelatedField(serializers.SlugRelatedField):
def get_queryset(self):
return qs
field = NoQuerySetSlugRelatedField(slug_field='nested__nested__name')
field.to_internal_value(self.instance.nested.nested.name)
# testing nested relations
def test_slug_related_nested_lookup_exists(self):
instance = \
self.nested_field.to_internal_value(self.instance.nested.name)
assert instance is self.instance
def test_slug_related_nested_lookup_does_not_exist(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.nested_field.to_internal_value('doesnotexist')
msg = excinfo.value.detail[0]
assert msg == 'Object with nested__name=doesnotexist does not exist.'
def test_slug_related_nested_lookup_invalid_type(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.nested_field.to_internal_value(BadType())
msg = excinfo.value.detail[0]
assert msg == 'Invalid value.'
def test_nested_representation(self):
representation = self.nested_field.to_representation(self.instance)
assert representation == self.instance.nested.name
def test_nested_overriding_get_queryset(self):
qs = self.queryset
class NoQuerySetSlugRelatedField(serializers.SlugRelatedField):
def get_queryset(self):
return qs
field = NoQuerySetSlugRelatedField(slug_field='nested__name')
field.to_internal_value(self.instance.nested.name)
# testing non-nested relations
def test_slug_related_lookup_exists(self):
instance = self.field.to_internal_value(self.instance.name)
assert instance is self.instance
def test_slug_related_lookup_does_not_exist(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.field.to_internal_value('doesnotexist')
msg = excinfo.value.detail[0]
assert msg == 'Object with name=doesnotexist does not exist.'
def test_slug_related_lookup_invalid_type(self):
with pytest.raises(serializers.ValidationError) as excinfo:
self.field.to_internal_value(BadType())
msg = excinfo.value.detail[0]
assert msg == 'Invalid value.'
def test_representation(self):
representation = self.field.to_representation(self.instance)
assert representation == self.instance.name
def test_overriding_get_queryset(self):
qs = self.queryset
class NoQuerySetSlugRelatedField(serializers.SlugRelatedField):
def get_queryset(self):
return qs
field = NoQuerySetSlugRelatedField(slug_field='name')
field.to_internal_value(self.instance.name)
| TestNestedSlugRelatedField |
python | wandb__wandb | wandb/sdk/artifacts/_generated/project_artifact_collections.py | {
"start": 778,
"end": 1057
} | class ____(GQLResult):
total_count: int = Field(alias="totalCount")
page_info: PageInfoFragment = Field(alias="pageInfo")
edges: List[ProjectArtifactCollectionsProjectArtifactTypeArtifactCollectionsEdges]
| ProjectArtifactCollectionsProjectArtifactTypeArtifactCollections |
python | pytorch__pytorch | test/distributed/_composable/test_replicate.py | {
"start": 9232,
"end": 10627
} | class ____(ReplicateTest):
@skip_if_lt_x_gpu(2)
@unittest.skipIf(TEST_XPU, "XPU does not support gloo backend")
def test_replicate_fully_shard_init(self):
class ToyModel(nn.Module):
def __init__(self, dim: int):
super().__init__()
self.linears = nn.Sequential(
nn.Linear(dim, dim, bias=False),
nn.Linear(dim, dim, bias=False),
nn.Linear(dim, dim, bias=False),
)
self.proj = nn.Linear(dim, dim, bias=False)
def forward(self, x: torch.Tensor):
y = self.linears(x)
y = self.proj(y)
return y
self._init_pg()
torch.accelerator.set_device_index(self.rank)
dim = 3
bz = 2
model = ToyModel(dim).to(device_type)
for linear in model.linears:
fully_shard(linear)
fully_shard(model.linears)
replicate(model, device_id=torch.accelerator.current_device_index())
for linear in model.linears:
self.assertTrue(isinstance(linear.weight, DTensor))
inp = torch.rand(bz, dim)
# trigger lazy init
model(inp).sum()
for linear in model.linears:
self.assertTrue(isinstance(linear.weight, DTensor))
if __name__ == "__main__":
run_tests()
| ReplicateFullyShardInit |
python | bokeh__bokeh | tests/unit/bokeh/document/test_events__document.py | {
"start": 1291,
"end": 2102
} | class ____:
def __init__(self) -> None:
self.called = []
def _document_changed(self, event): self.called.append('_document_changed')
def _document_patched(self, event): self.called.append('_document_patched')
def _document_model_changed(self, event): self.called.append('_document_model_changed')
def _column_data_changed(self, event): self.called.append('_column_data_changed')
def _columns_streamed(self, event): self.called.append('_columns_streamed')
def _columns_patched(self, event): self.called.append('_columns_patched')
def _session_callback_added(self, event): self.called.append('_session_callback_added')
def _session_callback_removed(self, event): self.called.append('_session_callback_removed')
| FakeFullDispatcher |
python | PrefectHQ__prefect | src/prefect/events/clients.py | {
"start": 14961,
"end": 16196
} | class ____(PrefectEventsClient):
"""A Prefect Events client that streams events to a Prefect Cloud Workspace"""
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
reconnection_attempts: int = 10,
checkpoint_every: int = 700,
):
"""
Args:
api_url: The base URL for a Prefect Cloud workspace
api_key: The API of an actor with the manage_events scope
reconnection_attempts: When the client is disconnected, how many times
the client should attempt to reconnect
checkpoint_every: How often the client should sync with the server to
confirm receipt of all previously sent events
"""
api_url, api_key = _get_api_url_and_key(api_url, api_key)
super().__init__(
api_url=api_url,
reconnection_attempts=reconnection_attempts,
checkpoint_every=checkpoint_every,
)
self._connect = websocket_connect(
self._events_socket_url,
additional_headers={"Authorization": f"bearer {api_key}"},
)
SEEN_EVENTS_SIZE = 500_000
SEEN_EVENTS_TTL = 120
| PrefectCloudEventsClient |
python | ray-project__ray | python/ray/util/collective/types.py | {
"start": 3382,
"end": 3481
} | class ____:
reduceOp = ReduceOp.SUM
timeout_ms = unset_timeout_ms
@dataclass
| AllReduceOptions |
python | huggingface__transformers | src/transformers/models/ijepa/modeling_ijepa.py | {
"start": 3146,
"end": 7567
} | class ____(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: IJepaConfig, use_mask_token: bool = False) -> None:
super().__init__()
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
self.patch_embeddings = IJepaPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1]
num_positions = self.position_embeddings.shape[1]
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
patch_pos_embed = self.position_embeddings
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return patch_pos_embed
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
interpolate_pos_encoding: bool = False,
) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
if bool_masked_pos is not None:
seq_length = embeddings.shape[1]
mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
# add positional encoding to each token
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| IJepaEmbeddings |
python | xlwings__xlwings | xlwings/_xlwindows.py | {
"start": 46262,
"end": 48329
} | class ____(base_classes.Shape):
def __init__(self, xl):
self.xl = xl
@property
def api(self):
return self.xl
@property
def name(self):
return self.xl.Name
@property
def parent(self):
return Sheet(xl=self.xl.Parent)
@property
def type(self):
return shape_types_i2s[self.xl.Type]
@property
def left(self):
return self.xl.Left
@left.setter
def left(self, value):
self.xl.Left = value
@property
def top(self):
return self.xl.Top
@top.setter
def top(self, value):
self.xl.Top = value
@property
def width(self):
return self.xl.Width
@width.setter
def width(self, value):
self.xl.Width = value
@property
def height(self):
return self.xl.Height
@height.setter
def height(self, value):
self.xl.Height = value
def delete(self):
self.xl.Delete()
@name.setter
def name(self, value):
self.xl.Name = value
@property
def index(self):
return self.xl.Index
def activate(self):
self.xl.Activate()
def scale_height(self, factor, relative_to_original_size, scale):
self.xl.ScaleHeight(
Scale=scaling[scale],
RelativeToOriginalSize=relative_to_original_size,
Factor=factor,
)
def scale_width(self, factor, relative_to_original_size, scale):
self.xl.ScaleWidth(
Scale=scaling[scale],
RelativeToOriginalSize=relative_to_original_size,
Factor=factor,
)
@property
def text(self):
if self.xl.TextFrame2.HasText:
return self.xl.TextFrame2.TextRange.Text
@text.setter
def text(self, value):
self.xl.TextFrame2.TextRange.Text = value
@property
def font(self):
return Font(self, self.xl.TextFrame2.TextRange.Font)
@property
def characters(self):
return Characters(parent=self, xl=self.xl.TextFrame2.TextRange.GetCharacters)
| Shape |
python | coleifer__peewee | tests/sqlite.py | {
"start": 100520,
"end": 101196
} | class ____(ModelTestCase):
database = get_in_memory_db()
def test_deterministic(self):
db = self.database
@db.func(deterministic=True)
def pylower(s):
if s is not None:
return s.lower()
class Reg(db.Model):
key = TextField()
class Meta:
indexes = [
SQL('create unique index "reg_pylower_key" '
'on "reg" (pylower("key"))')]
db.create_tables([Reg])
Reg.create(key='k1')
with self.assertRaises(IntegrityError):
with db.atomic():
Reg.create(key='K1')
| TestDeterministicFunction |
python | bokeh__bokeh | src/bokeh/models/ranges.py | {
"start": 10208,
"end": 18013
} | class ____(Range):
''' A Range of values for a categorical dimension.
In addition to supplying ``factors`` as a keyword argument to the
``FactorRange`` initializer, you may also instantiate with a sequence of
positional arguments:
.. code-block:: python
FactorRange("foo", "bar") # equivalent to FactorRange(factors=["foo", "bar"])
Users will normally supply categorical values directly:
.. code-block:: python
p.scatter(x=["foo", "bar"], ...)
BokehJS will create a mapping from ``"foo"`` and ``"bar"`` to a numerical
coordinate system called *synthetic coordinates*. In the simplest cases,
factors are separated by a distance of 1.0 in synthetic coordinates,
however the exact mapping from factors to synthetic coordinates is
affected by the padding properties as well as whether the number of levels
the factors have.
Users typically do not need to worry about the details of this mapping,
however it can be useful to fine tune positions by adding offsets. When
supplying factors as coordinates or values, it is possible to add an
offset in the synthetic coordinate space by adding a final number value
to a factor tuple. For example:
.. code-block:: python
p.scatter(x=[("foo", 0.3), ...], ...)
will position the first circle at an ``x`` position that is offset by
adding 0.3 to the synthetic coordinate for ``"foo"``.
'''
factors = FactorSeq(default=[], help="""
A sequence of factors to define this categorical range.
Factors may have 1, 2, or 3 levels. For 1-level factors, each factor is
simply a string. For example:
.. code-block:: python
FactorRange(factors=["sales", "marketing", "engineering"])
defines a range with three simple factors that might represent different
units of a business.
For 2- and 3- level factors, each factor is a tuple of strings:
.. code-block:: python
FactorRange(factors=[
["2016", "sales"], ["2016", "marketing"], ["2016", "engineering"],
["2017", "sales"], ["2017", "marketing"], ["2017", "engineering"],
])
defines a range with six 2-level factors that might represent the three
business units, grouped by year.
Note that factors and sub-factors *may only be strings*.
""")
factor_padding = Float(default=0.0, help="""
How much padding to add in between all lowest-level factors. When
``factor_padding`` is non-zero, every factor in every group will have the
padding value applied.
""")
subgroup_padding = Float(default=0.8, help="""
How much padding to add in between mid-level groups of factors. This
property only applies when the overall factors have three levels. For
example with:
.. code-block:: python
FactorRange(factors=[
['foo', 'A', '1'], ['foo', 'A', '2'], ['foo', 'A', '3'],
['foo', 'B', '2'],
['bar', 'A', '1'], ['bar', 'A', '2']
])
This property dictates how much padding to add between the three factors
in the `['foo', 'A']` group, and between the two factors in the
[`bar`]
""")
group_padding = Float(default=1.4, help="""
How much padding to add in between top-level groups of factors. This
property only applies when the overall range factors have either two or
three levels. For example, with:
.. code-block:: python
FactorRange(factors=[["foo", "1"], ["foo", "2"], ["bar", "1"]])
The top level groups correspond to ``"foo"`` and ``"bar"``, and the
group padding will be applied between the factors ``["foo", "2"]`` and
``["bar", "1"]``
""")
range_padding = Float(default=0, help="""
How much padding to add around the outside of computed range bounds.
When ``range_padding_units`` is set to ``"percent"``, the span of the
range span is expanded to make the range ``range_padding`` percent larger.
When ``range_padding_units`` is set to ``"absolute"``, the start and end
of the range span are extended by the amount ``range_padding``.
""")
range_padding_units = Enum(PaddingUnits, default="percent", help="""
Whether the ``range_padding`` should be interpreted as a percentage, or
as an absolute quantity. (default: ``"percent"``)
""")
start = Readonly(Float, default=0, help="""
The start of the range, in synthetic coordinates.
.. note::
Synthetic coordinates are only computed in the browser, based on the
factors and various padding properties. The value of ``start`` will only
be available in situations where bidirectional communication is
available (e.g. server, notebook).
""")
end = Readonly(Float, default=0, help="""
The end of the range, in synthetic coordinates.
.. note::
Synthetic coordinates are only computed in the browser, based on the
factors and various padding properties. The value of ``end`` will only
be available in situations where bidirectional communication is
available (e.g. server, notebook).
""")
bounds = Nullable(MinMaxBounds(accept_datetime=False), help="""
The bounds (in synthetic coordinates) that the range is allowed to go to.
Typically used to prevent the user from panning/zooming/etc away from the
data.
.. note::
Synthetic coordinates are only computed in the browser, based on the
factors and various padding properties. Some experimentation may be
required to arrive at bounds suitable for specific situations.
By default, the bounds will be None, allowing your plot to pan/zoom as far
as you want. If bounds are 'auto' they will be computed to be the same as
the start and end of the ``FactorRange``.
""")
min_interval = Nullable(Float, help="""
The level that the range is allowed to zoom in, expressed as the
minimum visible interval in synthetic coordinates. If set to ``None``
(default), the minimum interval is not bounded.
The default "width" of a category is 1.0 in synthetic coordinates.
However, the distance between factors is affected by the various
padding properties and whether or not factors are grouped.
""")
max_interval = Nullable(Float, help="""
The level that the range is allowed to zoom out, expressed as the
maximum visible interval in synthetic coordinates.. Note that ``bounds``
can impose an implicit constraint on the maximum interval as well.
The default "width" of a category is 1.0 in synthetic coordinates.
However, the distance between factors is affected by the various
padding properties and whether or not factors are grouped.
""")
def __init__(self, *args, **kwargs) -> None:
if args and "factors" in kwargs:
raise ValueError("'factors' keyword cannot be used with positional arguments")
elif args:
kwargs['factors'] = list(args)
super().__init__(**kwargs)
@error(DUPLICATE_FACTORS)
def _check_duplicate_factors(self):
dupes = [item for item, count in Counter(self.factors).items() if count > 1]
if dupes:
return f"duplicate factors found: {', '.join(repr(x) for x in dupes)}"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| FactorRange |
python | jamielennox__requests-mock | tests/test_custom_matchers.py | {
"start": 863,
"end": 1997
} | class ____(base.TestCase):
def assertMatchAll(self, resp):
self.assertEqual(200, resp.status_code)
self.assertEqual(resp.text, u'data')
@requests_mock.Mocker()
def test_custom_matcher(self, mocker):
mocker.add_matcher(match_all)
resp = requests.get('http://any/thing')
self.assertMatchAll(resp)
@requests_mock.Mocker()
def test_failing_matcher(self, mocker):
failer = FailMatcher()
mocker.add_matcher(match_all)
mocker.add_matcher(failer)
resp = requests.get('http://any/thing')
self.assertMatchAll(resp)
self.assertTrue(failer.called)
@requests_mock.Mocker()
def test_some_pass(self, mocker):
def matcher_a(request):
if 'a' in request.url:
return match_all(request)
return None
mocker.add_matcher(matcher_a)
resp = requests.get('http://any/thing')
self.assertMatchAll(resp)
self.assertRaises(requests_mock.NoMockAddress,
requests.get,
'http://other/thing')
| CustomMatchersTests |
python | redis__redis-py | redis/event.py | {
"start": 7949,
"end": 8184
} | class ____(EventListenerInterface):
"""
Listener that performs re-authentication of given connection.
"""
def listen(self, event: AfterConnectionReleasedEvent):
event.connection.re_auth()
| ReAuthConnectionListener |
python | google__jax | tests/lax_test.py | {
"start": 200717,
"end": 213870
} | class ____(jtu.JaxTestCase):
def _test_ragged_dot(self, m, k, n, num_groups, dtype):
"""Tests ragged_dot.
The ragged_dot is tested against numpy reference implementation, and by
running JAX compilation.
Raises:
SkipTest: in the case dtype is not supported.
"""
if (dtype == np.float16):
raise SkipTest(f"unsupported dtype for ragged_dot: {dtype}")
lhs_shape = (m, k)
rhs_shape = (num_groups, k, n)
def group_sizes(m, num_groups):
ends_no_final = jnp.sort(self.rng().choice(m, size=num_groups - 1))
ends = jnp.concatenate(
[ends_no_final, jnp.array([m], dtype=ends_no_final.dtype)])
starts = jnp.concatenate(
[jnp.zeros(1, dtype=ends_no_final.dtype), ends_no_final])
return ends - starts
rng = jtu.rand_small(self.rng())
args_maker = lambda: [
rng(lhs_shape, dtype),
rng(rhs_shape, dtype),
group_sizes(m, num_groups),
]
self._CompileAndCheck(lax.ragged_dot, args_maker)
self._CheckAgainstNumpy(
lax_reference.ragged_dot, lax.ragged_dot, args_maker)
@jtu.sample_product(
[
{"m": 64, "k": 4, "n": 3, "num_groups": 1},
{"m": 64, "k": 9, "n": 8, "num_groups": 2},
],
dtype=jtu.dtypes.all_floating,
)
def test_ragged_dot(self, m, k, n, num_groups, dtype):
return self._test_ragged_dot(m, k, n, num_groups, dtype)
@parameterized.parameters([True, False])
def test_ragged_dot_use_ragged_dot_instruction(self, use_instruction):
with config.jax_ragged_dot_use_ragged_dot_instruction(use_instruction):
self._test_ragged_dot(16, 4, 3, 2, jnp.float32)
if jtu.test_device_matches(["tpu"]) and use_instruction:
self.assertIn(
"chlo.ragged_dot",
jax.jit(lax.ragged_dot)
.lower(
core.ShapedArray((16, 4), dtype=jnp.float32),
core.ShapedArray((2, 4, 3), dtype=jnp.float32),
core.ShapedArray((2,), dtype=jnp.int32),
)
.as_text(dialect="stablehlo"),
)
@parameterized.parameters(
{"m": 5, "k": 4, "n": 3, "num_groups": 1},
{"m": 5, "k": 4, "n": 3, "num_groups": 2},
{"m": 9, "k": 4, "n": 3, "num_groups": 1},
{"m": 10, "k": 9, "n": 8, "num_groups": 2},
)
def test_ragged_dot_small_m(self, m, k, n, num_groups):
if not jtu.if_cloud_tpu_at_least(2025, 10, 14):
self.skipTest("Requires libtpu built after 2025-10-14")
lhs_shape = (m, k)
rhs_shape = (num_groups, k, n)
group_sizes_shape = (num_groups,)
args_maker = lambda: [
jnp.ones(lhs_shape, dtype=jnp.float32),
jnp.ones(rhs_shape, dtype=jnp.float32),
jnp.ones(group_sizes_shape, dtype=jnp.int32),
]
self._CompileAndCheck(lax.ragged_dot, args_maker)
@parameterized.parameters(
{
"lhs_shape": lhs_shape,
"rhs_shape": rhs_shape,
"group_sizes_shape": group_sizes_shape,
"ragged_dot_dimension_numbers": ragged_dot_dimension_numbers,
"err_msg": err_msg,
}
for lhs_shape, rhs_shape, group_sizes_shape, ragged_dot_dimension_numbers, err_msg in [
(
[11, 5],
[3, 5, 7],
[3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [1]), ([], [])),
lhs_ragged_dimensions=[0, 1],
rhs_group_dimensions=[0],
),
"ragged_dot_general expects exactly one lhs ragged dimension",
),
(
[11, 5],
[3, 5, 7],
[3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [1]), ([], [])),
lhs_ragged_dimensions=[2],
rhs_group_dimensions=[0],
),
(
"ragged_dot_general requires lhs ragged dimension numbers to "
"be nonnegative and less than the number of axes of the lhs"
),
),
(
[11, 5],
[3, 5, 7],
[2, 3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [1]), ([], [])),
lhs_ragged_dimensions=[0],
rhs_group_dimensions=[0],
),
r"expected group_sizes to have shape \(3,\), got \(2, 3\)",
),
(
[19, 17, 11, 5],
[3, 19, 5, 7],
[19, 11, 3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([3], [2]), ([0], [1])),
lhs_ragged_dimensions=[2],
rhs_group_dimensions=[0],
),
(
r"expected group_sizes to have shape \(19, 17, 3\), "
r"got \(19, 11, 3\)"
),
),
(
[19, 11, 17, 5],
[19, 17, 5, 7],
[19, 11, 3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([2, 3], [1, 2]), ([0], [0])),
lhs_ragged_dimensions=[3],
rhs_group_dimensions=[],
),
(
r"expected group_sizes to have shape \(19, 17, 3\), "
r"got \(19, 11, 3\)"
),
),
(
[17, 19, 11, 5],
[17, 19, 5, 7],
[19, 3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([3], [2]), ([0, 1], [0, 1])),
lhs_ragged_dimensions=[1],
rhs_group_dimensions=[],
),
(
r"expected group_sizes to have shape \(17, 3\), "
r"got \(19, 3\)"
),
),
(
[19, 11, 5],
[19, 5, 7],
[19, 3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([2], [1]), ([0], [0])),
lhs_ragged_dimensions=[1],
rhs_group_dimensions=[0],
),
(
"ragged_dot_general requires rhs group dimension numbers to "
"be distinct from contracting and batch dimensions"
),
),
(
[11, 3],
[3, 3, 7],
[3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [1]), ([], [])),
lhs_ragged_dimensions=[0],
rhs_group_dimensions=[1],
),
(
"ragged_dot_general requires rhs group dimension numbers to "
"be distinct from contracting and batch dimensions"
),
),
(
[11, 5],
[3, 5, 7],
[2],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [1]), ([], [])),
lhs_ragged_dimensions=[0],
rhs_group_dimensions=[0],
),
"expected rhs group dimension size to be 2, got 3",
),
(
[2, 11, 5],
[3, 2, 5, 7],
[3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([2], [2]), ([0], [1])),
lhs_ragged_dimensions=[0],
rhs_group_dimensions=[0],
),
(
"ragged_dot_general requires zero group dimensions in "
"the rhs when lhs ragged dimension is contracting or batch"
),
),
(
[11, 5],
[3, 5, 7],
[3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [1]), ([], [])),
lhs_ragged_dimensions=[1],
rhs_group_dimensions=[0],
),
(
"ragged_dot_general requires zero group dimensions in "
"the rhs when lhs ragged dimension is contracting or batch"
),
),
(
[11, 5],
[5, 7],
[3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [0]), ([], [])),
lhs_ragged_dimensions=[0],
rhs_group_dimensions=[],
),
(
"ragged_dot_general requires exactly one rhs group dimension "
"when lhs ragged dimension is noncontracting"
),
),
]
)
def test_ragged_dot_general_shape_inference_failure(
self, lhs_shape, rhs_shape, group_sizes_shape,
ragged_dot_dimension_numbers, err_msg):
lhs = jnp.ones(lhs_shape, dtype=jnp.float32)
rhs = jnp.ones(rhs_shape, dtype=jnp.float32)
group_sizes = jnp.ones(group_sizes_shape, dtype=jnp.int32)
with self.assertRaisesRegex(TypeError, err_msg):
lax.ragged_dot_general(lhs, rhs, group_sizes,
ragged_dot_dimension_numbers)
@parameterized.parameters(
{
"lhs_shape": lhs_shape,
"rhs_shape": rhs_shape,
"group_sizes_shape": group_sizes_shape,
"ragged_dnums": ragged_dnums,
"out_shape": out_shape,
}
for lhs_shape, rhs_shape, group_sizes_shape, ragged_dnums, out_shape in [
( # Ragged non-contracting.
[11, 5],
[3, 5, 7],
[3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [1]), ([], [])),
lhs_ragged_dimensions=[0],
rhs_group_dimensions=[0],
),
(11, 7),
),
( # Ragged contracting.
[11, 5],
[5, 7],
[3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([1], [0]), ([], [])),
lhs_ragged_dimensions=[1],
rhs_group_dimensions=[],
),
(3, 11, 7),
),
( # Ragged contracting with batch dimensions.
[2, 11, 5],
[2, 5, 7],
[2, 3],
lax.RaggedDotDimensionNumbers(
dot_dimension_numbers=(([2], [1]), ([0], [0]),
),
lhs_ragged_dimensions=[2],
rhs_group_dimensions=[],
),
(3, 2, 11, 7),
),
]
)
def test_ragged_dot_general_shape_inference_success(
self, lhs_shape, rhs_shape, group_sizes_shape, ragged_dnums, out_shape):
lhs = jnp.ones(lhs_shape, dtype=jnp.float32)
rhs = jnp.ones(rhs_shape, dtype=jnp.float32)
group_sizes = jnp.ones(group_sizes_shape, dtype=jnp.int32)
if jtu.test_device_matches(["tpu"]):
actual_shape = lax_internal._ragged_dot_general_shape_rule(
lhs, rhs, group_sizes, ragged_dot_dimension_numbers=ragged_dnums,
precision=jax.lax.Precision.DEFAULT,
preferred_element_type=jnp.float32,
)
else:
actual_shape = lax.ragged_dot_general(
lhs, rhs, group_sizes, ragged_dnums
).shape
self.assertEqual(actual_shape, out_shape)
@parameterized.product(
batch_size=[3, 5],
m=[128, 1024],
k=[128, 1024],
n=[128, 1024],
num_groups=[2, 4],
)
def test_ragged_dot_general_vmap(
self, batch_size: int, m: int, k: int, n: int, num_groups: int
):
if (jtu.test_device_matches(["tpu"])):
raise SkipTest("batched ragged_dot not yet supported on TPU")
lhs_shape = (batch_size, m, k)
rhs_shape = (batch_size, num_groups, k, n)
dtype = jnp.float32
def make_group_sizes(m, num_groups):
ends_no_final = jnp.sort(self.rng().choice(m, size=num_groups - 1))
ends = jnp.concatenate(
[ends_no_final, jnp.array([m], dtype=ends_no_final.dtype)])
starts = jnp.concatenate(
[jnp.zeros(1, dtype=ends_no_final.dtype), ends_no_final])
return ends - starts
rng = jtu.rand_small(self.rng())
args_maker = lambda: [
rng(lhs_shape, dtype),
rng(rhs_shape, dtype),
jnp.array([make_group_sizes(m, num_groups) for _ in range(batch_size)]),
]
lhs, rhs, group_sizes = args_maker()
out_dtype = jnp.float32
precision = jax.lax.Precision.HIGHEST
ragged_dot = partial(
jax.lax.ragged_dot,
preferred_element_type=out_dtype,
precision=precision,
)
tol = 1e-5
batch_res = jax.vmap(ragged_dot)(lhs, rhs, group_sizes)
for i in range(batch_size):
# The ragged_dot does not zero out the output in the case sum(group_sizes)
# < m, hence we need to compare only the valid part of the output.
upper_bound = group_sizes[i].sum(axis=0)
ref_res = ragged_dot(lhs[i], rhs[i], group_sizes[i])[0:upper_bound, :]
self.assertArraysAllClose(
batch_res[i, 0:upper_bound, :], ref_res, rtol=tol, atol=tol
)
| RaggedTest |
python | dagster-io__dagster | python_modules/libraries/dagster-airflow/dagster_airflow/operators/dagster_operator.py | {
"start": 500,
"end": 4768
} | class ____(BaseOperator):
"""DagsterOperator.
Uses the dagster graphql api to run and monitor dagster jobs on remote dagster infrastructure
Parameters:
repository_name (str): the name of the repository to use
repostitory_location_name (str): the name of the repostitory location to use
job_name (str): the name of the job to run
run_config (Optional[Dict[str, Any]]): the run config to use for the job run
dagster_conn_id (Optional[str]): the id of the dagster connection, airflow 2.0+ only
organization_id (Optional[str]): the id of the dagster cloud organization
deployment_name (Optional[str]): the name of the dagster cloud deployment
user_token (Optional[str]): the dagster cloud user token to use
"""
template_fields = ["run_config"]
template_ext = (".yaml", ".yml", ".json")
ui_color = "#663399"
ui_fgcolor = "#e0e3fc"
operator_extra_links = (DagsterLink(),)
@apply_defaults
def __init__(
self,
dagster_conn_id="dagster_default",
run_config=None,
repository_name="",
repostitory_location_name="",
job_name="",
# params for airflow < 2.0.0 were custom connections aren't supported
deployment_name="prod",
user_token=None,
organization_id="",
url="https://dagster.cloud/",
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = None
self.dagster_conn_id = dagster_conn_id if is_airflow_2_loaded_in_environment() else None
self.run_config = run_config or {}
self.repository_name = repository_name
self.repostitory_location_name = repostitory_location_name
self.job_name = job_name
self.user_token = user_token
self.url = url
self.organization_id = organization_id
self.deployment_name = deployment_name
self.hook = DagsterHook(
dagster_conn_id=self.dagster_conn_id,
user_token=self.user_token,
url=f"{self.url}{self.organization_id}/{self.deployment_name}/graphql",
)
def _is_json(self, blob):
try:
json.loads(blob)
except ValueError:
return False
return True
def pre_execute(self, context):
# force re-rendering to ensure run_config renders any templated
# content from run_config that couldn't be accessed on init
setattr(
self,
"run_config",
self.render_template(self.run_config, context),
)
def on_kill(self):
self.log.info("Terminating Run")
self.hook.terminate_run(
run_id=self.run_id, # pyright: ignore[reportArgumentType]
)
def execute(self, context):
try:
return self._execute(context)
except Exception as e:
raise e
def _execute(self, context):
self.run_id = self.hook.launch_run(
repository_name=self.repository_name,
repostitory_location_name=self.repostitory_location_name,
job_name=self.job_name,
run_config=self.run_config,
)
# save relevant info in xcom for use in links
context["task_instance"].xcom_push(key="run_id", value=self.run_id)
context["task_instance"].xcom_push(
key="organization_id",
value=self.hook.organization_id if self.dagster_conn_id else self.organization_id,
)
context["task_instance"].xcom_push(
key="deployment_name",
value=self.hook.deployment_name if self.dagster_conn_id else self.deployment_name,
)
self.log.info("Run Starting....")
self.log.info(
"Run tracking: %s",
LINK_FMT.format(
organization_id=self.hook.organization_id,
deployment_name=self.hook.deployment_name,
run_id=self.run_id,
),
)
self.hook.wait_for_run(
run_id=self.run_id,
)
@superseded(
additional_warn_text=(
"`DagsterCloudOperator` has been superseded "
"by the functionality in the `dagster-airlift` library."
)
)
| DagsterOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 584788,
"end": 585614
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for EnterpriseAdministratorInvitation."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("EnterpriseAdministratorInvitationEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("EnterpriseAdministratorInvitation"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| EnterpriseAdministratorInvitationConnection |
python | walkccc__LeetCode | solutions/2530. Maximal Score After Applying K Operations/2530.py | {
"start": 0,
"end": 294
} | class ____:
def maxKelements(self, nums: list[int], k: int) -> int:
ans = 0
maxHeap = [-num for num in nums]
heapq.heapify(maxHeap)
for _ in range(k):
num = -heapq.heappop(maxHeap)
ans += num
heapq.heappush(maxHeap, -math.ceil(num / 3))
return ans
| Solution |
python | fluentpython__example-code-2e | 24-class-metaprog/sentinel/sentinel_test.py | {
"start": 104,
"end": 811
} | class ____(Sentinel):
repr = '***SentinelRepr***'
def test_repr():
assert repr(PlainSentinel) == 'PlainSentinel'
def test_cannot_instantiate():
with pytest.raises(TypeError) as e:
PlainSentinel()
msg = "'PlainSentinel' is a sentinel and cannot be instantiated"
assert msg in str(e.value)
def test_custom_repr():
assert repr(SentinelCustomRepr) == '***SentinelRepr***'
def test_pickle():
s = pickle.dumps(SentinelCustomRepr)
ps = pickle.loads(s)
assert ps is SentinelCustomRepr
def test_sentinel_comes_ready_to_use():
assert repr(Sentinel) == 'Sentinel'
s = pickle.dumps(Sentinel)
ps = pickle.loads(s)
assert ps is Sentinel
| SentinelCustomRepr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.