language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
scipy__scipy
|
scipy/sparse/_matrix.py
|
{
"start": 0,
"end": 5022
}
|
class ____:
"""This class provides a base class for all sparse matrix classes.
It cannot be instantiated. Most of the work is provided by subclasses.
"""
_allow_nd = (2,)
@property
def _bsr_container(self):
from ._bsr import bsr_matrix
return bsr_matrix
@property
def _coo_container(self):
from ._coo import coo_matrix
return coo_matrix
@property
def _csc_container(self):
from ._csc import csc_matrix
return csc_matrix
@property
def _csr_container(self):
from ._csr import csr_matrix
return csr_matrix
@property
def _dia_container(self):
from ._dia import dia_matrix
return dia_matrix
@property
def _dok_container(self):
from ._dok import dok_matrix
return dok_matrix
@property
def _lil_container(self):
from ._lil import lil_matrix
return lil_matrix
# Restore matrix multiplication
def __mul__(self, other):
return self._matmul_dispatch(other)
def __rmul__(self, other):
return self._rmatmul_dispatch(other)
# Restore matrix power
def __pow__(self, power):
from .linalg import matrix_power
return matrix_power(self, power)
## Backward compatibility
def set_shape(self, shape):
"""Set the shape of the matrix in-place"""
# Make sure copy is False since this is in place
# Make sure format is unchanged because we are doing a __dict__ swap
new_self = self.reshape(shape, copy=False).asformat(self.format)
self.__dict__ = new_self.__dict__
def get_shape(self):
"""Get the shape of the matrix"""
return self._shape
shape = property(fget=get_shape, fset=set_shape,
doc="Shape of the matrix")
def asfptype(self):
"""Upcast matrix to a floating point format (if necessary)"""
return self._asfptype()
def getmaxprint(self):
"""Maximum number of elements to display when printed."""
return self._getmaxprint()
def getformat(self):
"""Matrix storage format"""
return self.format
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros.
Parameters
----------
axis : None, 0, or 1
Select between the number of values across the whole array, in
each column, or in each row.
"""
return self._getnnz(axis=axis)
def getH(self):
"""Return the Hermitian transpose of this matrix.
See Also
--------
numpy.matrix.getH : NumPy's implementation of `getH` for matrices
"""
return self.conjugate().transpose()
def getcol(self, j):
"""Returns a copy of column j of the matrix, as an (m x 1) sparse
matrix (column vector).
"""
return self._getcol(j)
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n) sparse
matrix (row vector).
"""
return self._getrow(i)
def todense(self, order=None, out=None):
"""
Return a dense representation of this sparse matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', which provides no ordering guarantees.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-D, optional
If specified, uses this array (or `numpy.matrix`) as the
output buffer instead of allocating a new array to
return. The provided array must have the same shape and
dtype as the sparse matrix on which you are calling the
method.
Returns
-------
arr : numpy.matrix, 2-D
A NumPy matrix object with the same shape and containing
the same data represented by the sparse matrix, with the
requested memory order. If `out` was passed and was an
array (rather than a `numpy.matrix`), it will be filled
with the appropriate values and returned wrapped in a
`numpy.matrix` object that shares the same memory.
"""
return super().todense(order, out)
@classmethod
def __class_getitem__(cls, arg, /):
"""
Return a parametrized wrapper around the `~scipy.sparse.spmatrix` type.
.. versionadded:: 1.16.0
Returns
-------
alias : types.GenericAlias
A parametrized `~scipy.sparse.spmatrix` type.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import coo_matrix
>>> coo_matrix[np.int8]
scipy.sparse._coo.coo_matrix[numpy.int8]
"""
from types import GenericAlias
return GenericAlias(cls, arg)
|
spmatrix
|
python
|
readthedocs__readthedocs.org
|
readthedocs/search/api/v2/views.py
|
{
"start": 7008,
"end": 7114
}
|
class ____(SettingsOverrideObject):
_default_class = BaseProxiedPageSearchAPIView
|
ProxiedPageSearchAPIView
|
python
|
Pylons__pyramid
|
tests/test_csrf.py
|
{
"start": 87,
"end": 1739
}
|
class ____(unittest.TestCase):
class MockSession:
def __init__(self, current_token='02821185e4c94269bdc38e6eeae0a2f8'):
self.current_token = current_token
def new_csrf_token(self):
self.current_token = 'e5e9e30a08b34ff9842ff7d2b958c14b'
return self.current_token
def get_csrf_token(self):
return self.current_token
def _makeOne(self):
from pyramid.csrf import LegacySessionCSRFStoragePolicy
return LegacySessionCSRFStoragePolicy()
def test_register_session_csrf_policy(self):
from pyramid.csrf import LegacySessionCSRFStoragePolicy
from pyramid.interfaces import ICSRFStoragePolicy
config = Configurator()
config.set_csrf_storage_policy(self._makeOne())
config.commit()
policy = config.registry.queryUtility(ICSRFStoragePolicy)
self.assertTrue(isinstance(policy, LegacySessionCSRFStoragePolicy))
def test_session_csrf_implementation_delegates_to_session(self):
policy = self._makeOne()
request = DummyRequest(session=self.MockSession())
self.assertEqual(
policy.get_csrf_token(request), '02821185e4c94269bdc38e6eeae0a2f8'
)
self.assertEqual(
policy.new_csrf_token(request), 'e5e9e30a08b34ff9842ff7d2b958c14b'
)
def test_check_csrf_token(self):
request = DummyRequest(session=self.MockSession('foo'))
policy = self._makeOne()
self.assertTrue(policy.check_csrf_token(request, 'foo'))
self.assertFalse(policy.check_csrf_token(request, 'bar'))
|
TestLegacySessionCSRFStoragePolicy
|
python
|
walkccc__LeetCode
|
solutions/474. Ones and Zeroes/474.py
|
{
"start": 0,
"end": 458
}
|
class ____:
def findMaxForm(self, strs: list[str], m: int, n: int) -> int:
# dp[i][j] := the maximum size of the subset given i 0s and j 1s are
# available
dp = [[0] * (n + 1) for _ in range(m + 1)]
for s in strs:
zeros = s.count('0')
ones = len(s) - zeros
for i in range(m, zeros - 1, -1):
for j in range(n, ones - 1, -1):
dp[i][j] = max(dp[i][j], dp[i - zeros][j - ones] + 1)
return dp[m][n]
|
Solution
|
python
|
has2k1__plotnine
|
plotnine/scales/scale_xy.py
|
{
"start": 9241,
"end": 9407
}
|
class ____(scale_y_continuous):
"""
Continuous y position log10 transformed scale
"""
trans: TransUser = "log10"
@dataclass(kw_only=True)
|
scale_y_log10
|
python
|
huggingface__transformers
|
src/transformers/quantizers/quantizer_torchao.py
|
{
"start": 3075,
"end": 26575
}
|
class ____(HfQuantizer):
"""
Quantizer for torchao: https://github.com/pytorch/ao/
"""
requires_parameters_quantization = True
requires_calibration = False
required_packages = ["torchao"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
if isinstance(self.quantization_config.quant_type, str):
is_int_4 = "int4" in self.quantization_config.quant_type
else:
config_name = self.quantization_config.quant_type.__class__.__name__
is_int_4 = fuzzy_match_size(config_name) == "4"
# TODO: better way to get the serialized key names? Hard to read from torchao codebase
if is_int_4:
self.weight_ao_keys = ["qdata", "scale", "zero_point"]
else:
self.weight_ao_keys = ["qdata", "scale"]
# Instead of serializing the simple torch.Tensor like usual, torchao adds a `:_data` suffix so we need this
self.full_ao_keys = self.weight_ao_keys + ["_data"]
def validate_environment(self, *args, **kwargs):
if not is_torchao_available():
raise ImportError("Loading an torchao quantized model requires torchao library (`pip install torchao`)")
self.offload = False
device_map = kwargs.get("device_map")
if isinstance(device_map, dict):
if ("disk" in device_map.values() or "cpu" in device_map.values()) and len(device_map) > 1:
self.offload = True
if self.pre_quantized and "disk" in device_map.values():
raise ValueError(
"You are attempting to perform disk offload with a pre-quantized torchao model "
"This is not supported yet . Please remove the disk device from the device_map."
)
if self.pre_quantized:
weights_only = kwargs.get("weights_only")
if weights_only:
torch_version = version.parse(importlib.metadata.version("torch"))
if torch_version < version.parse("2.5.0"):
raise RuntimeError(
f"In order to use torchao pre-quantized model, you need to have torch>=2.5.0. However, the current version is {torch_version}."
f" You can also set with `weights_only=False` in `from_pretrained` if you don't want to update torch"
)
def update_dtype(self, dtype):
if self.quantization_config.quant_type == "int4_weight_only":
if dtype is not None and dtype != torch.bfloat16:
logger.warning_once(
f"Setting dtype to {dtype} for int4_weight_only quantization, but only bfloat16 is supported right now. Please set the dtype to bfloat16."
)
if dtype is None:
logger.warning_once(
"Setting dtype to torch.bfloat16 for int4_weight_only quantization since only bfloat16 is supported right now. Please set dtype=torch.bfloat16 to remove this warning."
)
dtype = torch.bfloat16
if self.quantization_config.quant_type == "int8_dynamic_activation_int8_weight":
if dtype is None:
logger.info(
"Setting dtype to torch.float32 for int8_dynamic_activation_int8_weight quantization as no dtype was specified in from_pretrained"
)
# we need to set the dtype, otherwise we have dtype mismatch when performing the quantized linear op
dtype = torch.float32
return dtype
def get_state_dict_and_metadata(self, model, safe_serialization: bool | None = False):
"""
If the model is safe serializable, we flatten the state dict of tensor subclasses so that it is compatible with
the safetensors format.
"""
if type(self.quantization_config.quant_type) in SUPPORTED_SAFE_SERIALIZATION_CONFIGS and safe_serialization:
if TORCHAO_VERSION >= version.parse("0.14.0"):
return flatten_tensor_state_dict(model.state_dict())
else:
raise RuntimeError(
f"In order to use safetensors with torchao, please use torchao version >= 0.14.0. Current version: {TORCHAO_VERSION}"
)
else:
return None, {}
def adjust_target_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
from accelerate.utils import CustomDtype
# Import AOBaseConfig directly since we know we have the right version
if self.quantization_config._get_ao_version() > version.Version("0.9.0"):
from torchao.core.config import AOBaseConfig
quant_type = self.quantization_config.quant_type
if isinstance(quant_type, AOBaseConfig):
# Extract size digit using fuzzy match on the class name
config_name = quant_type.__class__.__name__
size_digit = fuzzy_match_size(config_name)
# Map the extracted digit to appropriate dtype
if size_digit == "4":
return CustomDtype.INT4
else:
# Default to int8
return torch.int8
# Original mapping for non-AOBaseConfig types
map_to_target_dtype = {
"int4_weight_only": CustomDtype.INT4,
"int8_weight_only": torch.int8,
"int8_dynamic_activation_int8_weight": torch.int8,
"autoquant": None,
}
return map_to_target_dtype[self.quantization_config.quant_type]
else:
raise ValueError(
"You are using `device_map='auto'` on a torchao quantized model. To automatically compute"
" the appropriate device map, you should upgrade your `accelerate` library with "
"`pip install --upgrade accelerate`"
)
def adjust_max_memory(self, max_memory: dict[str, int | str]) -> dict[str, int | str]:
# need more space for the quantization parameters (e.g. scale). Tested with int4 wo and group size = 128
max_memory = {key: val * 0.9 for key, val in max_memory.items()}
return max_memory
def _process_model_before_weight_loading(
self, model: "PreTrainedModel", keep_in_fp32_modules: list[str] | None = None, **kwargs
):
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
if self.quantization_config.include_input_output_embeddings:
input_emb = model.get_input_embeddings()
input_emb_names = [name for name, module in model.named_modules() if id(module) == id(input_emb)]
output_emb = model.get_output_embeddings()
output_emb_names = [name for name, module in model.named_modules() if id(module) == id(output_emb)]
self.modules_to_not_convert = [
x for x in self.modules_to_not_convert if x not in input_emb_names + output_emb_names
]
return
def update_unexpected_keys(self, model, unexpected_keys: list[str]) -> list[str]:
return [k for k in unexpected_keys if not any(k.endswith(x) for x in self.full_ao_keys)]
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
if self.pre_quantized:
return False
if self.quantization_config.quant_type == "autoquant":
return False
# check if the param_name is not in self.modules_to_not_convert
if any(key + "." in param_name or key == param_name for key in self.modules_to_not_convert):
return False
elif any(param_name.endswith(f":{x}") for x in self.full_ao_keys):
return True
# we only quantize the weight of nn.Linear and nn.Embedding
module, tensor_name = get_module_from_name(model, param_name)
_QUANTIZABLE = [torch.nn.Linear]
if self.quantization_config.include_input_output_embeddings:
_QUANTIZABLE.append(torch.nn.Embedding)
# Handle FqnToConfig, introduced in torchao 0.15.0+
if self.quantization_config._get_ao_version() >= version.parse("0.15.0"):
from torchao.quantization import FqnToConfig, fqn_matches_fqn_config
if isinstance(self.quantization_config.quant_type, FqnToConfig):
module_fqn, param_name_fqn = param_name.rsplit(".", 1)
if (
fqn_matches_fqn_config(module_fqn, self.quantization_config.quant_type)
or fqn_matches_fqn_config(param_name, self.quantization_config.quant_type)
or (
"_default" in self.quantization_config.quant_type.fqn_to_config
and isinstance(module, tuple(_QUANTIZABLE))
)
):
return True
return isinstance(module, tuple(_QUANTIZABLE)) and tensor_name == "weight"
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
**kwargs,
):
"""
Each nn.Linear layer that needs to be quantized is processed here.
First, we set the value the weight tensor, then we move it to the target device. Finally, we quantize the module.
"""
from torchao.quantization import quantize_
full_name = param_name
# Those are the pre quantized weights
if ":" in param_name:
param_name = param_name.rsplit(":", 1)[0]
module, tensor_name = get_module_from_name(model, param_name)
if self.pre_quantized:
# If it's a bias, no need to do anything special (except removing the ":_data" part of the key, but was
# already done) - if it's unsafe-serialized (i.e. not safetensors), not need for anything either
is_unsafe_serialization = ":" not in full_name
if tensor_name == "bias" or is_unsafe_serialization:
module._parameters[tensor_name] = torch.nn.Parameter(
param_value.to(target_device), requires_grad=param_value.requires_grad
)
return
# Sanity check for the new serialization format
elif not (TORCHAO_VERSION >= version.parse("0.14.0") and is_metadata_torchao(self.metadata)):
raise ValueError("To use `safetensors` serialization, you should have `torchao>=0.14.0` installed")
# Save the states for later quantization when they are all gathered
if not hasattr(self, "ao_params"):
self.ao_params = defaultdict(dict)
self.ao_params[param_name].update({full_name: param_value})
# We are ready for quantization in this case (we retrieved all the needed keys)
if len(self.ao_params[param_name]) == len(self.weight_ao_keys):
new_param = unflatten_tensor_state_dict(self.ao_params[param_name], self.metadata)[param_name]
# Set it
module._parameters[tensor_name] = torch.nn.Parameter(
new_param.to(target_device), requires_grad=new_param.requires_grad
)
# Free memory
del self.ao_params[param_name]
# Add repr to the module
if isinstance(module, nn.Linear):
module.extra_repr = types.MethodType(_linear_extra_repr, module)
else:
module._parameters[tensor_name] = torch.nn.Parameter(
param_value, requires_grad=param_value.requires_grad
).to(target_device)
# if we are quantizing tied parameters, to avoid tying the quantized weights
# the correct order to do it is
# 1. load the weight to model
# 2. run tie_weights to populate the weights
# 3. quantize
input_embed = model.get_input_embeddings()
if self.quantization_config.untie_embedding_weights and id(module) == id(input_embed):
model.tie_weights()
setattr(model.config.get_text_config(decoder=True), "tie_word_embeddings", False)
# handle FqnToConfig, introduced in torchao 0.15.0+
if self.quantization_config._get_ao_version() >= version.Version("0.15.0"):
from torchao.quantization import FqnToConfig
config = self.quantization_config.get_apply_tensor_subclass()
if isinstance(config, FqnToConfig):
module_fqn, top_level_param_name = param_name.rsplit(".", 1)
c = None
if param_name in config.fqn_to_config:
assert not module_fqn.startswith("re:"), (
"param fqn should not start with`re:`, which is used for specifying regex"
)
c = config.module_fqn_to_config[param_name]
elif module_fqn in config.fqn_to_config:
assert not module_fqn.startswith("re:"), (
"module fqn should not start with`re:`, which is used for specifying regex"
)
c = config.module_fqn_to_config[module_fqn]
# regex match module and param
else:
for maybe_module_fqn_pattern in config.fqn_to_config:
# if key doesn't start with re, it is an exact fqn key, so we don't regex match
if not maybe_module_fqn_pattern.startswith("re:"):
continue
# see if param matches first
elif re.fullmatch(maybe_module_fqn_pattern[3:], param_name):
c = config.module_fqn_to_config[maybe_module_fqn_pattern]
break
elif re.fullmatch(maybe_module_fqn_pattern[3:], module_fqn):
# we'll apply the config for first fully matched pattern
c = config.module_fqn_to_config[maybe_module_fqn_pattern]
break
else:
c = config.module_fqn_to_config.get("_default", None)
if c is not None:
if top_level_param_name == "weight":
# we can apply the module config directly
quantize_(module, c, (lambda x, fqn: True))
else:
# need to apply to custom param name
custom_param_fqn_config = FqnToConfig({top_level_param_name: c})
quantize_(module, custom_param_fqn_config, filter_fn=None)
return
# handle ModuleFqnToConfig, introduced in torchao 0.12.0+
# TODO deprecate this when we deprecate ModuleFqnToConfig
elif self.quantization_config._get_ao_version() >= version.Version("0.12.0"):
from torchao.quantization import ModuleFqnToConfig
config = self.quantization_config.get_apply_tensor_subclass()
if isinstance(config, ModuleFqnToConfig):
module_fqn, _ = param_name.rsplit(".", 1)
c = None
if module_fqn in config.module_fqn_to_config:
assert not module_fqn.startswith("re:"), (
"module fqn should not start with`re:`, which is used for specifying regex"
)
c = config.module_fqn_to_config[module_fqn]
else:
for maybe_module_fqn_pattern in config.module_fqn_to_config:
if not maybe_module_fqn_pattern.startswith("re:"):
continue
elif re.fullmatch(maybe_module_fqn_pattern[3:], module_fqn):
# we'll apply the config for first fully matched pattern
c = config.module_fqn_to_config[maybe_module_fqn_pattern]
break
else:
c = config.module_fqn_to_config.get("_default", None)
if c is not None:
# filter_fn: not filtering out any modules
quantize_(module, c, filter_fn=lambda x, fqn: True)
return
quantize_(module, self.quantization_config.get_apply_tensor_subclass())
def preprocess_model(self, model: "PreTrainedModel", config, dtype=None, checkpoint_files=None, **kwargs):
"""
Setting model attributes and/or converting model before weights loading. At this point
the model should be initialized on the meta device so you can freely manipulate the skeleton
of the model in order to replace modules in-place. Make sure to override the abstract method `_process_model_before_weight_loading`.
Args:
model (`~transformers.PreTrainedModel`):
The model to quantize
kwargs (`dict`, *optional*):
The keyword arguments that are passed along `_process_model_before_weight_loading`.
"""
super().preprocess_model(model, config, dtype, checkpoint_files, **kwargs)
# Torchao needs access to all metadata later
self.set_metadata(checkpoint_files)
def _process_model_after_weight_loading(self, model, **kwargs):
"""No process required for torchao quantized model"""
if self.quantization_config.quant_type == "autoquant":
from torchao import autoquant
from torchao.quantization import ALL_AUTOQUANT_CLASS_LIST
model = torch.compile(model, mode="max-autotune")
model = autoquant(
model,
qtensor_class_list=ALL_AUTOQUANT_CLASS_LIST,
set_inductor_config=False,
**self.quantization_config.quant_type_kwargs,
)
return model
return
def is_serializable(self, safe_serialization=None) -> bool:
if safe_serialization:
_is_torchao_serializable = type(
self.quantization_config.quant_type
) in SUPPORTED_SAFE_SERIALIZATION_CONFIGS and TORCHAO_VERSION >= version.parse("0.14.0")
if not _is_torchao_serializable:
logger.warning(
f"torchao quantized model only supports safe serialization for {SUPPORTED_SAFE_SERIALIZATION_CONFIGS}, \
and torchao version >= 0.14.0, please set `safe_serialization` to False for \
{type(self.quantization_config.quant_type)} and {TORCHAO_VERSION}."
)
return _is_torchao_serializable
_is_torchao_serializable = version.parse(importlib.metadata.version("huggingface_hub")) >= version.parse(
"0.25.0"
)
if not _is_torchao_serializable:
logger.warning("torchao quantized model is only serializable after huggingface_hub >= 0.25.0 ")
if self.offload and self.quantization_config.modules_to_not_convert is None:
logger.warning(
"The model contains offloaded modules and these modules are not quantized. We don't recommend saving the model as we won't be able to reload them."
"If you want to specify modules to not quantize, please specify modules_to_not_convert in the quantization_config."
)
return False
return _is_torchao_serializable
def get_accelerator_warm_up_factor(self):
"""
This factor is used in caching_allocator_warmup to determine how many bytes to pre-allocate for accelerator warmup.
- A factor of 2 means we pre-allocate the full memory footprint of the model.
- A factor of 4 means we pre-allocate half of that, and so on
However, when using TorchAO, calculating memory usage with param.numel() * param.element_size() doesn't give the correct size for quantized weights (like int4 or int8)
That's because TorchAO internally represents quantized tensors using subtensors and metadata, and the reported element_size() still corresponds to the dtype
not the actual bit-width of the quantized data.
To correct for this:
- Use a division factor of 8 for int4 weights
- Use a division factor of 4 for int8 weights
"""
if self.quantization_config._get_ao_version() > version.Version("0.9.0"):
from torchao.core.config import AOBaseConfig
quant_type = self.quantization_config.quant_type
# For autoquant case, it will be treated in the string implementation below in map_to_target_dtype
if isinstance(quant_type, AOBaseConfig):
# Extract size digit using fuzzy match on the class name
config_name = quant_type.__class__.__name__
size_digit = fuzzy_match_size(config_name)
if size_digit == "4":
return 8
else:
return 4
# Original mapping for non-AOBaseConfig types
map_to_target_dtype = {
"int4_weight_only": 8,
"int8_weight_only": 4,
"int8_dynamic_activation_int8_weight": 4,
"autoquant": 4,
}
return map_to_target_dtype[self.quantization_config.quant_type]
@property
def is_trainable(self) -> bool:
supported_quant_types_for_training = [
"int8_weight_only",
"int8_dynamic_activation_int8_weight",
]
return self.quantization_config.quant_type in supported_quant_types_for_training
@property
def is_compileable(self) -> bool:
return True
def set_metadata(self, checkpoint_files: list[str]):
if checkpoint_files[0].endswith(".safetensors"):
metadata = {}
for checkpoint in checkpoint_files:
with safe_open(checkpoint, framework="pt") as f:
metadata_ = f.metadata() or {}
metadata.update(metadata_)
# Save it
self.metadata = metadata
def get_quantize_ops(self):
from ..integrations.torchao import TorchAoQuantize
return TorchAoQuantize(self)
def get_weight_conversions(self):
from ..integrations.torchao import TorchAoDeserialize
if self.pre_quantized:
return [
WeightConverter(
source_patterns=["weight:qdata", "weight:scale", "weight:zero_point"],
target_patterns="weight",
operations=[TorchAoDeserialize(self)],
),
WeightConverter(
source_patterns=["weight:_data"],
target_patterns="weight",
operations=[TorchAoDeserialize(self)],
),
# used for unsafe serialization
]
return []
|
TorchAoHfQuantizer
|
python
|
dask__distributed
|
distributed/comm/inproc.py
|
{
"start": 2508,
"end": 3715
}
|
class ____:
"""
A single-reader, single-writer, non-threadsafe, peekable queue.
"""
def __init__(self):
self._q = deque()
self._read_future = None
def get_nowait(self):
q = self._q
if not q:
raise QueueEmpty
return q.popleft()
def get(self):
assert not self._read_future, "Only one reader allowed"
fut = Future()
q = self._q
if q:
fut.set_result(q.popleft())
else:
self._read_future = fut
return fut
def put_nowait(self, value):
q = self._q
fut = self._read_future
if fut is not None:
assert len(q) == 0
self._read_future = None
_set_result_unless_cancelled(fut, value)
else:
q.append(value)
put = put_nowait
_omitted = object()
def peek(self, default=_omitted):
"""
Get the next object in the queue without removing it from the queue.
"""
q = self._q
if q:
return q[0]
elif default is not self._omitted:
return default
else:
raise QueueEmpty
_EOF = object()
|
Queue
|
python
|
django__django
|
tests/model_inheritance_regress/models.py
|
{
"start": 690,
"end": 911
}
|
class ____(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
|
ParkingLot3
|
python
|
pytorch__pytorch
|
torch/jit/frontend.py
|
{
"start": 3619,
"end": 3670
}
|
class ____(FrontendError):
pass
|
NotSupportedError
|
python
|
aio-libs__aiohttp
|
aiohttp/web_urldispatcher.py
|
{
"start": 28627,
"end": 29487
}
|
class ____(AbstractRoute):
def __init__(self, http_exception: HTTPException) -> None:
super().__init__(hdrs.METH_ANY, self._handle)
self._http_exception = http_exception
def url_for(self, *args: str, **kwargs: str) -> URL:
raise RuntimeError(".url_for() is not allowed for SystemRoute")
@property
def name(self) -> str | None:
return None
def get_info(self) -> _InfoDict:
return {"http_exception": self._http_exception}
async def _handle(self, request: Request) -> StreamResponse:
raise self._http_exception
@property
def status(self) -> int:
return self._http_exception.status
@property
def reason(self) -> str:
return self._http_exception.reason
def __repr__(self) -> str:
return f"<SystemRoute {self.status}: {self.reason}>"
|
SystemRoute
|
python
|
django__django
|
tests/apps/query_performing_app/apps.py
|
{
"start": 433,
"end": 728
}
|
class ____(BaseAppConfig):
def _perform_query(self):
from ..models import TotallyNormal
queryset = TotallyNormal.objects.using(self.database)
queryset.update_or_create(name="new name")
self.query_results = list(queryset.values_list("name"))
|
ModelQueryAppConfig
|
python
|
walkccc__LeetCode
|
solutions/2202. Maximize the Topmost Element After K Moves/2202.py
|
{
"start": 0,
"end": 516
}
|
class ____:
def maximumTop(self, nums: list[int], k: int) -> int:
n = len(nums)
# After taking k elements, if we're left something, then we return nums[k]
# Otherwise, return -1.
if k == 0 or k == 1:
return -1 if n == k else nums[k]
# Remove then add even number of times.
if n == 1:
return -1 if k & 1 else nums[0]
# Take min(n, k - 1) elements and put the largest one back.
mx = max(nums[:min(n, k - 1)])
if k >= n:
return mx
return max(mx, nums[k])
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/warmup.py
|
{
"start": 938,
"end": 2012
}
|
class ____(Endpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.UNOWNED
permission_classes = ()
rate_limits = RateLimitConfig(group="INTERNAL")
def get(self, request: Request) -> Response:
languages = [lang for lang, _ in settings.LANGUAGES]
languages.append(settings.LANGUAGE_CODE)
# for each possible language we support, warm up the url resolver
# this fixes an issue we were seeing where many languages trying
# to resolve at once would cause lock contention
for lang in languages:
with translation.override(lang):
reverse("sentry-warmup")
# for each possible language we support, warm up the translations
# cache for faster access
for lang in languages:
try:
language = translation.get_supported_language_variant(lang)
except LookupError:
pass
else:
translation.activate(language)
return Response(200)
|
WarmupEndpoint
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/compiler_wrapper/package.py
|
{
"start": 407,
"end": 10573
}
|
class ____(Package):
"""Spack compiler wrapper script.
Compiler commands go through this compiler wrapper in Spack builds.
The compiler wrapper is a thin layer around the standard compilers.
It enables several key pieces of functionality:
1. It allows Spack to swap compilers into and out of builds easily.
2. It adds several options to the compile line so that spack
packages can find their dependencies at build time and run time:
-I and/or -isystem arguments for dependency /include directories.
-L arguments for dependency /lib directories.
-Wl,-rpath arguments for dependency /lib directories.
3. It provides a mechanism to inject flags from specs
"""
homepage = "https://github.com/spack/spack"
url = f"file:///{pathlib.PurePath(__file__).parent}/cc.sh"
# FIXME (compiler as nodes): use a different tag, since this is only to exclude
# this node from auto-generated rules
tags = ["runtime"]
license("Apache-2.0 OR MIT")
if sys.platform != "win32":
version(
"1.0",
sha256="c65a9d2b2d4eef67ab5cb0684d706bb9f005bb2be94f53d82683d7055bdb837c",
expand=False,
)
else:
version("1.0")
has_code = False
def bin_dir(self) -> pathlib.Path:
# This adds an extra "spack" subdir, so that the script and symlinks don't get
# their way to the default view
return pathlib.Path(str(self.prefix)) / "libexec" / "spack"
def install(self, spec, prefix):
if sys.platform == "win32":
placeholder = self.bin_dir() / "placeholder-wrapper"
placeholder.parent.mkdir(parents=True)
placeholder.write_text(
"This file is a placeholder for the compiler wrapper on Windows."
)
return
cc_script = pathlib.Path(self.stage.source_path) / "cc.sh"
bin_dir = self.bin_dir()
# Copy the script
bin_dir.mkdir(parents=True)
installed_script = bin_dir / "cc"
shutil.copy(cc_script, str(installed_script))
set_executable(installed_script)
# Create links to use the script under different names
for name in (
"ld.lld",
"ld.gold",
"ld",
"ftn",
"fc",
"f95",
"f90",
"f77",
"cpp",
"c99",
"c89",
"c++",
):
(bin_dir / name).symlink_to(installed_script)
for subdir, name in (
("aocc", "clang"),
("aocc", "clang++"),
("aocc", "flang"),
("arm", "armclang"),
("arm", "armclang++"),
("arm", "armflang"),
("case-insensitive", "CC"),
("cce", "cc"),
("cce", "craycc"),
("cce", "crayftn"),
("cce", "ftn"),
("clang", "clang"),
("clang", "clang++"),
("clang", "flang"),
("fj", "fcc"),
("fj", "frt"),
("gcc", "gcc"),
("gcc", "g++"),
("gcc", "gfortran"),
("intel", "icc"),
("intel", "icpc"),
("intel", "ifort"),
("nag", "nagfor"),
("nvhpc", "nvc"),
("nvhpc", "nvc++"),
("nvhpc", "nvfortran"),
("oneapi", "icx"),
("oneapi", "icpx"),
("oneapi", "ifx"),
("rocmcc", "amdclang"),
("rocmcc", "amdclang++"),
("rocmcc", "amdflang"),
("xl", "xlc"),
("xl", "xlc++"),
("xl", "xlf"),
("xl", "xlf90"),
("xl_r", "xlc_r"),
("xl_r", "xlc++_r"),
("xl_r", "xlf_r"),
("xl_r", "xlf90_r"),
):
(bin_dir / subdir).mkdir(exist_ok=True)
(bin_dir / subdir / name).symlink_to(installed_script)
# Extra symlinks for Cray
cray_dir = bin_dir / "cce" / "case-insensitive"
cray_dir.mkdir(exist_ok=True)
(cray_dir / "crayCC").symlink_to(installed_script)
(cray_dir / "CC").symlink_to(installed_script)
def setup_dependent_build_environment(
self, env: EnvironmentModifications, dependent_spec: Spec
) -> None:
if sys.platform == "win32":
return
_var_list = []
if dependent_spec.has_virtual_dependency("c"):
_var_list.append(("c", "cc", "CC", "SPACK_CC"))
if dependent_spec.has_virtual_dependency("cxx"):
_var_list.append(("cxx", "cxx", "CXX", "SPACK_CXX"))
if dependent_spec.has_virtual_dependency("fortran"):
_var_list.append(("fortran", "fortran", "F77", "SPACK_F77"))
_var_list.append(("fortran", "fortran", "FC", "SPACK_FC"))
# The package is not used as a compiler, so skip this setup
if not _var_list:
return
bin_dir = self.bin_dir()
implicit_rpaths, env_paths = [], []
extra_rpaths = []
for language, attr_name, wrapper_var_name, spack_var_name in _var_list:
compiler_pkg = dependent_spec[language].package
if not hasattr(compiler_pkg, attr_name):
continue
compiler = getattr(compiler_pkg, attr_name)
env.set(spack_var_name, compiler)
if language not in compiler_pkg.compiler_wrapper_link_paths:
continue
wrapper_path = bin_dir / compiler_pkg.compiler_wrapper_link_paths.get(language)
env.set(wrapper_var_name, str(wrapper_path))
env.set(f"SPACK_{wrapper_var_name}_RPATH_ARG", compiler_pkg.rpath_arg)
uarch = dependent_spec.architecture.target
version_number, _ = spack.vendor.archspec.cpu.version_components(
compiler_pkg.spec.version.dotted_numeric_string
)
try:
isa_arg = uarch.optimization_flags(compiler_pkg.archspec_name(), version_number)
except ValueError:
isa_arg = ""
if isa_arg:
env.set(f"SPACK_TARGET_ARGS_{attr_name.upper()}", isa_arg)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <bin_dir>/case-insensitive
# directory. Add that to the path too.
compiler_specific_dir = (
bin_dir / compiler_pkg.compiler_wrapper_link_paths[language]
).parent
for item in [bin_dir, compiler_specific_dir]:
env_paths.append(item)
ci = item / "case-insensitive"
if ci.is_dir():
env_paths.append(ci)
env.set(f"SPACK_{wrapper_var_name}_LINKER_ARG", compiler_pkg.linker_arg)
# Check if this compiler has implicit rpaths
implicit_rpaths.extend(_implicit_rpaths(pkg=compiler_pkg))
# Add extra rpaths, if they are defined in an external spec
extra_rpaths.extend(
getattr(compiler_pkg.spec, "extra_attributes", {}).get("extra_rpaths", [])
)
if implicit_rpaths:
# Implicit rpaths are accumulated across all compilers so, whenever they are mixed,
# the compiler used in ccld mode will account for rpaths from other compilers too.
implicit_rpaths = lang.dedupe(implicit_rpaths)
env.set("SPACK_COMPILER_IMPLICIT_RPATHS", ":".join(implicit_rpaths))
if extra_rpaths:
extra_rpaths = lang.dedupe(extra_rpaths)
env.set("SPACK_COMPILER_EXTRA_RPATHS", ":".join(extra_rpaths))
env.set("SPACK_ENABLE_NEW_DTAGS", self.enable_new_dtags)
env.set("SPACK_DISABLE_NEW_DTAGS", self.disable_new_dtags)
for item in env_paths:
env.prepend_path("SPACK_COMPILER_WRAPPER_PATH", item)
def setup_dependent_package(self, module, dependent_spec):
def _spack_compiler_attribute(*, language: str) -> str:
compiler_pkg = dependent_spec[language].package
if sys.platform != "win32":
# On non-Windows we return the appropriate path to the compiler wrapper
return str(self.bin_dir() / compiler_pkg.compiler_wrapper_link_paths[language])
# On Windows we return the real compiler
if language == "c":
return compiler_pkg.cc
elif language == "cxx":
return compiler_pkg.cxx
elif language == "fortran":
return compiler_pkg.fortran
if dependent_spec.has_virtual_dependency("c"):
setattr(module, "spack_cc", _spack_compiler_attribute(language="c"))
if dependent_spec.has_virtual_dependency("cxx"):
setattr(module, "spack_cxx", _spack_compiler_attribute(language="cxx"))
if dependent_spec.has_virtual_dependency("fortran"):
setattr(module, "spack_fc", _spack_compiler_attribute(language="fortran"))
setattr(module, "spack_f77", _spack_compiler_attribute(language="fortran"))
@property
def disable_new_dtags(self) -> str:
if self.spec.satisfies("platform=darwin"):
return ""
return "--disable-new-dtags"
@property
def enable_new_dtags(self) -> str:
if self.spec.satisfies("platform=darwin"):
return ""
return "--enable-new-dtags"
def _implicit_rpaths(pkg: spack.package_base.PackageBase) -> List[str]:
detector = spack.compilers.libraries.CompilerPropertyDetector(pkg.spec)
paths = detector.implicit_rpaths()
return paths
|
CompilerWrapper
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 908621,
"end": 909821
}
|
class ____(sgqlc.types.Type):
"""Represents the client's rate limit."""
__schema__ = github_schema
__field_names__ = ("cost", "limit", "node_count", "remaining", "reset_at", "used")
cost = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="cost")
"""The point cost for the current query counting against the rate
limit.
"""
limit = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="limit")
"""The maximum number of points the client is permitted to consume in
a 60 minute window.
"""
node_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="nodeCount")
"""The maximum number of nodes this query may return"""
remaining = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="remaining")
"""The number of points remaining in the current rate limit window."""
reset_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="resetAt")
"""The time at which the current rate limit window resets in UTC
epoch seconds.
"""
used = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="used")
"""The number of points used in the current rate limit window."""
|
RateLimit
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 101661,
"end": 102560
}
|
class ____(sgqlc.types.Enum):
"""The possible actions that GitHub Sponsors activities can
represent.
Enumeration Choices:
* `CANCELLED_SPONSORSHIP`: The activity was cancelling a
sponsorship.
* `NEW_SPONSORSHIP`: The activity was starting a sponsorship.
* `PENDING_CHANGE`: The activity was scheduling a downgrade or
cancellation.
* `REFUND`: The activity was funds being refunded to the sponsor
or GitHub.
* `SPONSOR_MATCH_DISABLED`: The activity was disabling matching
for a previously matched sponsorship.
* `TIER_CHANGE`: The activity was changing the sponsorship tier,
either directly by the sponsor or by a scheduled/pending change.
"""
__schema__ = github_schema
__choices__ = ("CANCELLED_SPONSORSHIP", "NEW_SPONSORSHIP", "PENDING_CHANGE", "REFUND", "SPONSOR_MATCH_DISABLED", "TIER_CHANGE")
|
SponsorsActivityAction
|
python
|
huggingface__transformers
|
src/transformers/models/t5gemma/modeling_t5gemma.py
|
{
"start": 23699,
"end": 24249
}
|
class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, hidden_size: int, num_labels: int, classifier_dropout_rate: float = 0.0):
super().__init__()
self.dropout = nn.Dropout(p=classifier_dropout_rate)
self.out_proj = nn.Linear(hidden_size, num_labels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
|
T5GemmaClassificationHead
|
python
|
huggingface__transformers
|
tests/models/layoutlmv2/test_processing_layoutlmv2.py
|
{
"start": 5701,
"end": 22271
}
|
class ____(unittest.TestCase):
@cached_property
def get_images(self):
# we verify our implementation on 2 document images from the DocVQA dataset
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
return ds[0]["image"].convert("RGB"), ds[1]["image"].convert("RGB")
@cached_property
def get_tokenizers(self):
slow_tokenizer = LayoutLMv2Tokenizer.from_pretrained("microsoft/layoutlmv2-base-uncased")
fast_tokenizer = LayoutLMv2TokenizerFast.from_pretrained("microsoft/layoutlmv2-base-uncased")
return [slow_tokenizer, fast_tokenizer]
@slow
def test_processor_case_1(self):
# case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True
image_processor = LayoutLMv2ImageProcessor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
input_image_proc = image_processor(images[0], return_tensors="pt")
input_processor = processor(images[0], return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify image
self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # fmt: skip
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
input_image_proc = image_processor(images, return_tensors="pt")
input_processor = processor(images, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify images
self.assertAlmostEqual(input_image_proc["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] 7 itc limited report and accounts 2013 itc ’ s brands : an asset for the nation the consumer needs and aspirations they fulfil, the benefit they generate for millions across itc ’ s value chains, the future - ready capabilities that support them, and the value that they create for the country, have made itc ’ s brands national assets, adding to india ’ s competitiveness. it is itc ’ s aspiration to be the no 1 fmcg player in the country, driven by its new fmcg businesses. a recent nielsen report has highlighted that itc's new fmcg businesses are the fastest growing among the top consumer goods companies operating in india. itc takes justifiable pride that, along with generating economic value, these celebrated indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. di wills * ; love delightfully soft skin? aia ans source : https : / / www. industrydocuments. ucsf. edu / docs / snbx0223 [SEP] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD] [PAD]" # fmt: skip
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
@slow
def test_processor_case_2(self):
# case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")
# verify keys
expected_keys = ["input_ids", "bbox", "token_type_ids", "attention_mask", "image"]
actual_keys = list(input_processor.keys())
for key in expected_keys:
self.assertIn(key, actual_keys)
# verify input_ids
expected_decoding = "[CLS] hello world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] hello world [SEP] [PAD] [PAD] [PAD]"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_3(self):
# case 3: token classification (training), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
words = ["weirdly", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
word_labels = [1, 2]
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] weirdly world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify labels
expected_labels = [-100, 1, -100, 2, -100]
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
word_labels = [[1, 2], [6, 3, 10, 2]]
input_processor = processor(
images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] my name is niels [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
# verify labels
expected_labels = [-100, 6, 3, 10, 2, -100, -100]
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
@slow
def test_processor_case_4(self):
# case 4: visual question answering (inference), apply_ocr=True
image_processor = LayoutLMv2ImageProcessor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
input_processor = processor(images[0], question, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] what's his name? [SEP] 11 : 14 to 11 : 39 a. m 11 : 39 to 11 : 44 a. m. 11 : 44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from the floor. dr. emil m. mrak, university of cal - ifornia, chairman, trrf board ; sam r. cecil, university of georgia college of agriculture ; dr. stanley charm, tufts university school of medicine ; dr. robert h. cotton, itt continental baking company ; dr. owen fennema, university of wis - consin ; dr. robert e. hardenburg, usda. questions and answers exhibits open capt. jack stoney room trrf scientific advisory council meeting ballroom foyer [SEP]" # fmt: skip
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
input_processor = processor(
images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "[CLS] what's the time [SEP] 7 itc limited report and accounts 2013 itc ’ s [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [372, 59, 407, 66], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [74, 136, 161, 158], [1000, 1000, 1000, 1000]] # fmt: skip
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_5(self):
# case 5: visual question answering (inference), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutLMv2Processor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], question, words, boxes, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] what's his name? [SEP] hello world [SEP]"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "token_type_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "[CLS] how old is he? [SEP] hello world [SEP] [PAD] [PAD] [PAD]"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_decoding = "[CLS] what's the time [SEP] my name is niels [SEP]"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
|
LayoutLMv2ProcessorIntegrationTests
|
python
|
kamyu104__LeetCode-Solutions
|
Python/smallest-greater-multiple-made-of-two-digits.py
|
{
"start": 84,
"end": 804
}
|
class ____(object):
def findInteger(self, k, digit1, digit2):
"""
:type k: int
:type digit1: int
:type digit2: int
:rtype: int
"""
MAX_NUM_OF_DIGITS = 10
INT_MAX = 2**31-1
if digit1 < digit2:
digit1, digit2 = digit2, digit1
total = 2
for l in xrange(1, MAX_NUM_OF_DIGITS+1):
for mask in xrange(total):
curr, bit = 0, total>>1
while bit:
curr = curr*10 + (digit1 if mask&bit else digit2)
bit >>= 1
if k < curr <= INT_MAX and curr%k == 0:
return curr
total <<= 1
return -1
|
Solution
|
python
|
google__pytype
|
pytype/tests/test_overload.py
|
{
"start": 142,
"end": 8480
}
|
class ____(test_base.BaseTest):
"""Tests for typing.overload."""
def test_simple(self):
self.Check("""
from typing import overload
@overload
def f(x: int) -> int:
pass
def f(x):
return x
""")
def test_bad_implementation(self):
errors = self.CheckWithErrors("""
from typing import overload
@overload
def f(x: int) -> str:
pass
def f(x):
return x # bad-return-type[e]
""")
self.assertErrorRegexes(errors, {"e": r"str.*int"})
def test_bad_call(self):
errors = self.CheckWithErrors("""
from typing import overload
@overload
def f(x: int) -> int:
pass
def f(x):
return x
f("") # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"int.*str"})
def test_sub_return(self):
ty = self.Infer("""
from typing import overload
@overload
def f(x: int) -> float:
pass
def f(x):
return x
v = f(0)
""")
self.assertTypesMatchPytd(
ty,
"""
def f(x: int) -> float: ...
v: float
""",
)
def test_multiple_overload(self):
self.Check("""
from typing import overload
@overload
def f(x: int) -> int:
pass
@overload
def f() -> None:
pass
def f(x=None):
return x
f(0)
f()
""")
def test_multiple_overload_bad_implementation(self):
errors = self.CheckWithErrors("""
from typing import overload
@overload
def f(x: int) -> int:
pass
@overload
def f(x: str) -> int:
pass
def f(x):
return x # bad-return-type[e]
""")
self.assertErrorRegexes(errors, {"e": r"int.*str"})
def test_multiple_overload_bad_call(self):
errors = self.CheckWithErrors("""
from typing import overload
@overload
def f(x: int) -> int:
pass
@overload
def f(x: int, y: str) -> str:
pass
def f(x, y=None):
return x if y is None else y
f("") # wrong-arg-types[e1]
f(0, 0) # wrong-arg-types[e2]
""")
self.assertErrorRegexes(errors, {"e1": r"int.*str", "e2": r"str.*int"})
def test_pyi(self):
src = """
from typing import overload
@overload
def f(x: int) -> int:
pass
@overload
def f(x: str) -> str:
pass
def f(x):
return x
def g():
return f
"""
ty = self.Infer(src, analyze_annotated=False)
self.assertTrue(
pytd_utils.ASTeq(ty, self.Infer(src, analyze_annotated=True))
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable
@overload
def f(x: int) -> int: ...
@overload
def f(x: str) -> str: ...
def g() -> Callable: ...
""",
)
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(ty))
errors = self.CheckWithErrors(
"""
import foo
foo.f(0) # ok
foo.f("") # ok
foo.f(0.0) # wrong-arg-types[e]
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"int.*float"})
def test_method_bad_implementation(self):
errors = self.CheckWithErrors("""
from typing import overload
class Foo:
@overload
def f(self, x: int) -> int:
pass
@overload
def f(self, x: str) -> int:
pass
def f(self, x):
return x # bad-return-type[e]
""")
self.assertErrorRegexes(errors, {"e": r"int.*str"})
def test_method_pyi(self):
src = """
from typing import overload
class Foo:
@overload
def f(self, x: int) -> int:
pass
@overload
def f(self, x: str) -> str:
pass
def f(self, x):
return x
"""
ty = self.Infer(src, analyze_annotated=False)
self.assertTrue(
pytd_utils.ASTeq(ty, self.Infer(src, analyze_annotated=True))
)
self.assertTypesMatchPytd(
ty,
"""
class Foo:
@overload
def f(self, x: int) -> int: ...
@overload
def f(self, x: str) -> str: ...
""",
)
def test_call_overload(self):
errors = self.CheckWithErrors("""
from typing import overload
@overload
def f(x: int) -> int:
pass
f(0) # not-callable[e]
""")
self.assertErrorRegexes(errors, {"e": r"overload"})
def test_varargs(self):
ty = self.Infer("""
from typing import overload
@overload
def f() -> int: ...
@overload
def f(x: float, *args) -> float: ...
def f(*args):
return args[0] if args else 0
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import overload
@overload
def f() -> int: ...
@overload
def f(x: float, *args) -> float: ...
""",
)
def test_varargs_and_kwargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import overload
@overload
def f(x: int) -> int: ...
@overload
def f(x: str) -> str: ...
""",
)
ty = self.Infer(
"""
import foo
def f1(*args):
return foo.f(*args)
def f2(**kwargs):
return foo.f(**kwargs)
def f3():
return foo.f(*(0,))
def f4():
return foo.f(**{"x": "y"})
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def f1(*args) -> Any: ...
def f2(**kwargs) -> Any: ...
def f3() -> int: ...
def f4() -> str: ...
""",
)
def test_init_kwargs_overloads(self):
ty = self.Infer("""
from typing import overload
class Foo:
@overload
def __init__(self, x: int, **kw) -> None: ...
@overload
def __init__(self, **kw) -> None: ...
def __init__(self, x: int, **kw): pass
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import overload
class Foo:
@overload
def __init__(self, x: int, **kw) -> None: ...
@overload
def __init__(self, **kw) -> None: ...
""",
)
def test_use_init_kwargs_overloads(self):
with self.DepTree([(
"foo.py",
"""
from typing import overload
class Foo:
@overload
def __init__(self, x: int, **kw) -> None: ...
@overload
def __init__(self, **kw) -> None: ...
def __init__(self, x: int, **kw): pass
""",
)]):
self.Check("""
import foo
foo.Foo(0)
""")
def test_generic_class(self):
self.Check("""
from typing import Generic, List, TypeVar, overload
T = TypeVar('T')
class Foo(Generic[T]):
@overload
def f(self, x: int) -> T: ...
@overload
def f(self, x: str) -> List[T]: ...
def f(self, x):
return __any_object__
""")
def test_multiple_matches_pyi(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import overload
@overload
def f(x: str) -> str: ...
@overload
def f(x: bytes) -> bytes: ...
""",
)]):
self.Check("""
import foo
from typing import Tuple
def f(arg) -> Tuple[str, str]:
x = 'hello world' if __random__ else arg
y = arg if __random__ else 'goodbye world'
return foo.f(x), foo.f(y)
""")
def test_generic(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import AnyStr, Generic, overload
class C(Generic[AnyStr]):
@overload
def f(self: C[str], x: str) -> str: ...
@overload
def f(self: C[bytes], x: bytes) -> bytes: ...
""",
)]):
ty = self.Infer("""
import foo
def f(c: foo.C[str]):
return filter(c.f, [""])
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Iterator
def f(c: foo.C[str]) -> Iterator[str]: ...
""",
)
|
OverloadTest
|
python
|
openai__openai-python
|
tests/api_resources/chat/completions/test_messages.py
|
{
"start": 455,
"end": 2571
}
|
class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_list(self, client: OpenAI) -> None:
message = client.chat.completions.messages.list(
completion_id="completion_id",
)
assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
@parametrize
def test_method_list_with_all_params(self, client: OpenAI) -> None:
message = client.chat.completions.messages.list(
completion_id="completion_id",
after="after",
limit=0,
order="asc",
)
assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
@parametrize
def test_raw_response_list(self, client: OpenAI) -> None:
response = client.chat.completions.messages.with_raw_response.list(
completion_id="completion_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
@parametrize
def test_streaming_response_list(self, client: OpenAI) -> None:
with client.chat.completions.messages.with_streaming_response.list(
completion_id="completion_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
message = response.parse()
assert_matches_type(SyncCursorPage[ChatCompletionStoreMessage], message, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_path_params_list(self, client: OpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `completion_id` but received ''"):
client.chat.completions.messages.with_raw_response.list(
completion_id="",
)
|
TestMessages
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typedDict19.py
|
{
"start": 269,
"end": 317
}
|
class ____(TypedDict):
x: NotRequired[str]
|
TD1
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataproc_metastore.py
|
{
"start": 31999,
"end": 35468
}
|
class ____(GoogleCloudBaseOperator):
"""
Get the details of a single service.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param service_id: Required. The ID of the metastore service, which is used as the final component of
the metastore service's name. This value must be between 2 and 63 characters long inclusive, begin
with a letter, end with a letter or number, and consist of alphanumeric ASCII characters or
hyphens.
This corresponds to the ``service_id`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"impersonation_chain",
)
operator_extra_links = (DataprocMetastoreLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
service_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.service_id = service_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.region,
"service_id": self.service_id,
"project_id": self.project_id,
}
def execute(self, context: Context) -> dict:
hook = DataprocMetastoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Gets the details of a single Dataproc Metastore service: %s", self.project_id)
result = hook.get_service(
region=self.region,
project_id=self.project_id,
service_id=self.service_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
DataprocMetastoreLink.persist(context=context, url=METASTORE_SERVICE_LINK)
return Service.to_dict(result)
|
DataprocMetastoreGetServiceOperator
|
python
|
pytest-dev__pytest
|
src/_pytest/python.py
|
{
"start": 44650,
"end": 60020
}
|
class ____:
"""Objects passed to the :hook:`pytest_generate_tests` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
"""
def __init__(
self,
definition: FunctionDefinition,
fixtureinfo: fixtures.FuncFixtureInfo,
config: Config,
cls=None,
module=None,
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
#: Access to the underlying :class:`_pytest.python.FunctionDefinition`.
self.definition = definition
#: Access to the :class:`pytest.Config` object for the test session.
self.config = config
#: The module object where the test function is defined in.
self.module = module
#: Underlying Python test function.
self.function = definition.obj
#: Set of fixture names required by the test function.
self.fixturenames = fixtureinfo.names_closure
#: Class object where the test function is defined in or ``None``.
self.cls = cls
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
# Result of parametrize().
self._calls: list[CallSpec2] = []
self._params_directness: dict[str, Literal["indirect", "direct"]] = {}
def parametrize(
self,
argnames: str | Sequence[str],
argvalues: Iterable[ParameterSet | Sequence[object] | object],
indirect: bool | Sequence[str] = False,
ids: Iterable[object | None] | Callable[[Any], object | None] | None = None,
scope: _ScopeName | None = None,
*,
_param_mark: Mark | None = None,
) -> None:
"""Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting ``indirect`` to do it at test setup time instead.
Can be called multiple times per test function (but only on different
argument names), in which case each call parametrizes all previous
parametrizations, e.g.
::
unparametrized: t
parametrize ["x", "y"]: t[x], t[y]
parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2]
:param argnames:
A comma-separated string denoting one or more argument names, or
a list/tuple of argument strings.
:param argvalues:
The list of argvalues determines how often a test is invoked with
different argument values.
If only one argname was specified argvalues is a list of values.
If N argnames were specified, argvalues must be a list of
N-tuples, where each tuple-element specifies a value for its
respective argname.
.. versionchanged:: 9.1
Passing a non-:class:`~collections.abc.Collection` iterable
(such as a generator or iterator) is deprecated. See
:ref:`parametrize-iterators` for details.
:param indirect:
A list of arguments' names (subset of argnames) or a boolean.
If True the list contains all names from the argnames. Each
argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:param ids:
Sequence of (or generator for) ids for ``argvalues``,
or a callable to return part of the id for each argvalue.
With sequences (and generators like ``itertools.count()``) the
returned ids should be of type ``string``, ``int``, ``float``,
``bool``, or ``None``.
They are mapped to the corresponding index in ``argvalues``.
``None`` means to use the auto-generated id.
.. versionadded:: 8.4
:ref:`hidden-param` means to hide the parameter set
from the test name. Can only be used at most 1 time, as
test names need to be unique.
If it is a callable it will be called for each entry in
``argvalues``, and the return value is used as part of the
auto-generated id for the whole set (where parts are joined with
dashes ("-")).
This is useful to provide more specific ids for certain items, e.g.
dates. Returning ``None`` will use an auto-generated id.
If no ids are provided they will be generated automatically from
the argvalues.
:param scope:
If specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
nodeid = self.definition.nodeid
argnames, parametersets = ParameterSet._for_parametrize(
argnames,
argvalues,
self.function,
self.config,
nodeid=self.definition.nodeid,
)
del argvalues
if "request" in argnames:
fail(
f"{nodeid}: 'request' is a reserved name and cannot be used in @pytest.mark.parametrize",
pytrace=False,
)
if scope is not None:
scope_ = Scope.from_user(
scope, descr=f"parametrize() call in {self.function.__name__}"
)
else:
scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
self._validate_if_using_arg_names(argnames, indirect)
# Use any already (possibly) generated ids with parametrize Marks.
if _param_mark and _param_mark._param_ids_from:
generated_ids = _param_mark._param_ids_from._param_ids_generated
if generated_ids is not None:
ids = generated_ids
ids = self._resolve_parameter_set_ids(
argnames, ids, parametersets, nodeid=self.definition.nodeid
)
# Store used (possibly generated) ids with parametrize Marks.
if _param_mark and _param_mark._param_ids_from and generated_ids is None:
object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids)
# Calculate directness.
arg_directness = _resolve_args_directness(
argnames, indirect, self.definition.nodeid
)
self._params_directness.update(arg_directness)
# Add direct parametrizations as fixturedefs to arg2fixturedefs by
# registering artificial "pseudo" FixtureDef's such that later at test
# setup time we can rely on FixtureDefs to exist for all argnames.
node = None
# For scopes higher than function, a "pseudo" FixtureDef might have
# already been created for the scope. We thus store and cache the
# FixtureDef on the node related to the scope.
if scope_ is Scope.Function:
name2pseudofixturedef = None
else:
collector = self.definition.parent
assert collector is not None
node = get_scope_node(collector, scope_)
if node is None:
# If used class scope and there is no class, use module-level
# collector (for now).
if scope_ is Scope.Class:
assert isinstance(collector, Module)
node = collector
# If used package scope and there is no package, use session
# (for now).
elif scope_ is Scope.Package:
node = collector.session
else:
assert False, f"Unhandled missing scope: {scope}"
default: dict[str, FixtureDef[Any]] = {}
name2pseudofixturedef = node.stash.setdefault(
name2pseudofixturedef_key, default
)
for argname in argnames:
if arg_directness[argname] == "indirect":
continue
if name2pseudofixturedef is not None and argname in name2pseudofixturedef:
fixturedef = name2pseudofixturedef[argname]
else:
fixturedef = FixtureDef(
config=self.config,
baseid="",
argname=argname,
func=get_direct_param_fixture_func,
scope=scope_,
params=None,
ids=None,
_ispytest=True,
)
if name2pseudofixturedef is not None:
name2pseudofixturedef[argname] = fixturedef
self._arg2fixturedefs[argname] = [fixturedef]
# Create the new calls: if we are parametrize() multiple times (by applying the decorator
# more than once) then we accumulate those calls generating the cartesian product
# of all calls.
newcalls = []
for callspec in self._calls or [CallSpec2()]:
for param_index, (param_id, param_set) in enumerate(
zip(ids, parametersets, strict=True)
):
newcallspec = callspec.setmulti(
argnames=argnames,
valset=param_set.values,
id=param_id,
marks=param_set.marks,
scope=scope_,
param_index=param_index,
nodeid=nodeid,
)
newcalls.append(newcallspec)
self._calls = newcalls
def _resolve_parameter_set_ids(
self,
argnames: Sequence[str],
ids: Iterable[object | None] | Callable[[Any], object | None] | None,
parametersets: Sequence[ParameterSet],
nodeid: str,
) -> list[str | _HiddenParam]:
"""Resolve the actual ids for the given parameter sets.
:param argnames:
Argument names passed to ``parametrize()``.
:param ids:
The `ids` parameter of the ``parametrize()`` call (see docs).
:param parametersets:
The parameter sets, each containing a set of values corresponding
to ``argnames``.
:param nodeid str:
The nodeid of the definition item that generated this
parametrization.
:returns:
List with ids for each parameter set given.
"""
if ids is None:
idfn = None
ids_ = None
elif callable(ids):
idfn = ids
ids_ = None
else:
idfn = None
ids_ = self._validate_ids(ids, parametersets)
id_maker = IdMaker(
argnames,
parametersets,
idfn,
ids_,
self.config,
nodeid=nodeid,
)
return id_maker.make_unique_parameterset_ids()
def _validate_ids(
self,
ids: Iterable[object | None],
parametersets: Sequence[ParameterSet],
) -> list[object | None]:
try:
num_ids = len(ids) # type: ignore[arg-type]
except TypeError:
try:
iter(ids)
except TypeError as e:
raise TypeError("ids must be a callable or an iterable") from e
num_ids = len(parametersets)
# num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849
if num_ids != len(parametersets) and num_ids != 0:
nodeid = self.definition.nodeid
fail(
f"In {nodeid}: {len(parametersets)} parameter sets specified, with different number of ids: {num_ids}",
pytrace=False,
)
return list(itertools.islice(ids, num_ids))
def _validate_if_using_arg_names(
self,
argnames: Sequence[str],
indirect: bool | Sequence[str],
) -> None:
"""Check if all argnames are being used, by default values, or directly/indirectly.
:param List[str] argnames: List of argument names passed to ``parametrize()``.
:param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
:raises ValueError: If validation fails.
"""
default_arg_names = set(get_default_arg_names(self.function))
nodeid = self.definition.nodeid
for arg in argnames:
if arg not in self.fixturenames:
if arg in default_arg_names:
fail(
f"In {nodeid}: function already takes an argument '{arg}' with a default value",
pytrace=False,
)
else:
if isinstance(indirect, Sequence):
name = "fixture" if arg in indirect else "argument"
else:
name = "fixture" if indirect else "argument"
fail(
f"In {nodeid}: function uses no {name} '{arg}'",
pytrace=False,
)
def _recompute_direct_params_indices(self) -> None:
for argname, param_type in self._params_directness.items():
if param_type == "direct":
for i, callspec in enumerate(self._calls):
callspec.indices[argname] = i
def _find_parametrized_scope(
argnames: Sequence[str],
arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]],
indirect: bool | Sequence[str],
) -> Scope:
"""Find the most appropriate scope for a parametrized call based on its arguments.
When there's at least one direct argument, always use "function" scope.
When a test function is parametrized and all its arguments are indirect
(e.g. fixtures), return the most narrow scope based on the fixtures used.
Related to issue #1832, based on code posted by @Kingdread.
"""
if isinstance(indirect, Sequence):
all_arguments_are_fixtures = len(indirect) == len(argnames)
else:
all_arguments_are_fixtures = bool(indirect)
if all_arguments_are_fixtures:
fixturedefs = arg2fixturedefs or {}
used_scopes = [
fixturedef[-1]._scope
for name, fixturedef in fixturedefs.items()
if name in argnames
]
# Takes the most narrow scope from used fixtures.
return min(used_scopes, default=Scope.Function)
return Scope.Function
def _ascii_escaped_by_config(val: str | bytes, config: Config | None) -> str:
if config is None:
escape_option = False
else:
escape_option = config.getini(
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
)
# TODO: If escaping is turned off and the user passes bytes,
# will return a bytes. For now we ignore this but the
# code *probably* doesn't handle this case.
return val if escape_option else ascii_escaped(val) # type: ignore
|
Metafunc
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/bigquery_dts.py
|
{
"start": 1765,
"end": 11660
}
|
class ____(GoogleBaseHook):
"""
Hook for Google Bigquery Transfer API.
All the methods in the hook where ``project_id`` is used must be called with
keyword arguments rather than positional.
"""
_conn: Resource | None = None
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self.location = location
@staticmethod
def _disable_auto_scheduling(config: dict | TransferConfig) -> TransferConfig:
"""
Create a transfer config with the automatic scheduling disabled.
In the case of Airflow, the customer needs to create a transfer config
with the automatic scheduling disabled (UI, CLI or an Airflow operator) and
then trigger a transfer run using a specialized Airflow operator that will
call start_manual_transfer_runs.
:param config: Data transfer configuration to create.
"""
config = TransferConfig.to_dict(config) if isinstance(config, TransferConfig) else config
new_config = copy(config)
schedule_options = new_config.get("schedule_options")
if schedule_options:
disable_auto_scheduling = schedule_options.get("disable_auto_scheduling", None)
if disable_auto_scheduling is None:
schedule_options["disable_auto_scheduling"] = True
else:
new_config["schedule_options"] = {"disable_auto_scheduling": True}
return TransferConfig(**new_config)
def get_conn(self) -> DataTransferServiceClient:
"""
Retrieve connection to Google Bigquery.
:return: Google Bigquery API client
"""
if not self._conn:
self._conn = DataTransferServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO
)
return self._conn
@GoogleBaseHook.fallback_to_default_project_id
def create_transfer_config(
self,
transfer_config: dict | TransferConfig,
project_id: str = PROVIDE_PROJECT_ID,
authorization_code: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TransferConfig:
"""
Create a new data transfer configuration.
:param transfer_config: Data transfer configuration to create.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param authorization_code: authorization code to use with this transfer configuration.
This is required if new credentials are needed.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: A ``google.cloud.bigquery_datatransfer_v1.types.TransferConfig`` instance.
"""
client = self.get_conn()
parent = f"projects/{project_id}"
if self.location:
parent = f"{parent}/locations/{self.location}"
return client.create_transfer_config(
request={
"parent": parent,
"transfer_config": self._disable_auto_scheduling(transfer_config),
"authorization_code": authorization_code,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_transfer_config(
self,
transfer_config_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete transfer configuration.
:param transfer_config_id: Id of transfer config to be used.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: None
"""
client = self.get_conn()
project = f"projects/{project_id}"
if self.location:
project = f"{project}/locations/{self.location}"
name = f"{project}/transferConfigs/{transfer_config_id}"
return client.delete_transfer_config(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def start_manual_transfer_runs(
self,
transfer_config_id: str,
project_id: str = PROVIDE_PROJECT_ID,
requested_time_range: dict | None = None,
requested_run_time: dict | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> StartManualTransferRunsResponse:
"""
Start manual transfer runs to be executed now with schedule_time equal to current time.
The transfer runs can be created for a time range where the run_time is between
start_time (inclusive) and end_time (exclusive), or for a specific run_time.
:param transfer_config_id: Id of transfer config to be used.
:param requested_time_range: Time range for the transfer runs that should be started.
If a dict is provided, it must be of the same form as the protobuf
message `~google.cloud.bigquery_datatransfer_v1.types.TimeRange`
:param requested_run_time: Specific run_time for a transfer run to be started. The
requested_run_time must not be in the future. If a dict is provided, it
must be of the same form as the protobuf message
`~google.cloud.bigquery_datatransfer_v1.types.Timestamp`
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: An ``google.cloud.bigquery_datatransfer_v1.types.StartManualTransferRunsResponse`` instance.
"""
client = self.get_conn()
project = f"projects/{project_id}"
if self.location:
project = f"{project}/locations/{self.location}"
parent = f"{project}/transferConfigs/{transfer_config_id}"
return client.start_manual_transfer_runs(
request={
"parent": parent,
"requested_time_range": requested_time_range,
"requested_run_time": requested_run_time,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_transfer_run(
self,
run_id: str,
transfer_config_id: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> TransferRun:
"""
Return information about the particular transfer run.
:param run_id: ID of the transfer run.
:param transfer_config_id: ID of transfer config to be used.
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:param metadata: Additional metadata that is provided to the method.
:return: An ``google.cloud.bigquery_datatransfer_v1.types.TransferRun`` instance.
"""
client = self.get_conn()
project = f"projects/{project_id}"
if self.location:
project = f"{project}/locations/{self.location}"
name = f"{project}/transferConfigs/{transfer_config_id}/runs/{run_id}"
return client.get_transfer_run(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
|
BiqQueryDataTransferServiceHook
|
python
|
kamyu104__LeetCode-Solutions
|
Python/balanced-k-factor-decomposition.py
|
{
"start": 1337,
"end": 2437
}
|
class ____(object):
def minDifference(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[int]
"""
def factors(n):
for i in xrange(1, n+1):
if i*i > n:
break
if n%i:
continue
yield i
if n//i != i:
yield n//i
def backtracking(remain):
start = curr[-1] if curr else 1
if len(curr) == k-1 and remain >= start:
curr.append(remain)
if not result or result[-1]-result[0] > curr[-1]-curr[0]:
result[:] = curr
curr.pop()
return
for i in factors(remain):
if i < start:
continue
curr.append(i)
backtracking(remain//i)
curr.pop()
result, curr = [], []
backtracking(n)
return result
# Time: O(2^(k-1) * k * n)
# Space: O(k)
# backtracking, number theory
|
Solution2
|
python
|
nedbat__coveragepy
|
coverage/exceptions.py
|
{
"start": 810,
"end": 899
}
|
class ____(CoverageException):
"""An error in using a data file."""
pass
|
DataError
|
python
|
miyuchina__mistletoe
|
test/test_latex_renderer.py
|
{
"start": 5454,
"end": 6589
}
|
class ____(TestCase):
def setUp(self):
self.renderer = LaTeXRenderer()
self.renderer.__enter__()
self.addCleanup(self.renderer.__exit__, None, None, None)
def test_footnote_image(self):
from mistletoe import Document
raw = ['![alt][foo]\n', '\n', '[foo]: bar "title"\n']
expected = ('\\documentclass{article}\n'
'\\usepackage{graphicx}\n'
'\\begin{document}\n'
'\n'
'\n\\includegraphics{bar}\n'
'\n'
'\\end{document}\n')
self.assertEqual(self.renderer.render(Document(raw)), expected)
def test_footnote_link(self):
from mistletoe import Document
raw = ['[name][key]\n', '\n', '[key]: target\n']
expected = ('\\documentclass{article}\n'
'\\usepackage{hyperref}\n'
'\\begin{document}\n'
'\n'
'\\href{target}{name}'
'\n'
'\\end{document}\n')
self.assertEqual(self.renderer.render(Document(raw)), expected)
|
TestLaTeXFootnotes
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/_type1font.py
|
{
"start": 2754,
"end": 2812
}
|
class ____(_Token):
kind = 'whitespace'
|
_WhitespaceToken
|
python
|
getsentry__sentry
|
tests/sentry/integrations/discord/message_builder/test_flags.py
|
{
"start": 169,
"end": 702
}
|
class ____(TestCase):
def assert_bits_are_set(self, value: int, bits: list[int]) -> None:
expected = 0
for bit in bits:
expected = expected | 1 << bit
assert (value & 1 << bit) == 1 << bit
assert expected == value
def test_none(self) -> None:
flags = DiscordMessageFlags()
assert flags.value == 0
def test_ephemeral(self) -> None:
flags = DiscordMessageFlags().set_ephemeral()
self.assert_bits_are_set(flags.value, [6])
|
TestDiscordMessageFlags
|
python
|
numba__numba
|
numba/tests/test_interpreter.py
|
{
"start": 1741,
"end": 15412
}
|
class ____(MemoryLeakMixin, TestCase):
"""
gh #7812
Tests that check a peephole optimization for Function calls
in Python 3.10. The bytecode changes when
(n_args / 2) + n_kws > 15, which moves the arguments from
the stack into a tuple and dictionary.
This peephole optimization updates the IR to use the original format.
There are different paths when n_args > 30 and n_args <= 30 and when
n_kws > 15 and n_kws <= 15.
"""
THRESHOLD_ARGS = 31
THRESHOLD_KWS = 16
def gen_func(self, n_args, n_kws):
"""
Generates a function that calls sum_jit_func
with the desired number of args and kws.
"""
param_list = [f"arg{i}" for i in range(n_args + n_kws)]
args_list = []
for i in range(n_args + n_kws):
# Call a function on every 5th argument to ensure
# we test function calls.
if i % 5 == 0:
arg_val = f"pow(arg{i}, 2)"
else:
arg_val = f"arg{i}"
args_list.append(arg_val)
total_params = ", ".join(param_list)
func_text = f"def impl({total_params}):\n"
func_text += " return sum_jit_func(\n"
for i in range(n_args):
func_text += f" {args_list[i]},\n"
for i in range(n_args, n_args + n_kws):
func_text += f" {param_list[i]}={args_list[i]},\n"
func_text += " )\n"
local_vars = {}
exec(func_text, {"sum_jit_func": sum_jit_func}, local_vars)
return local_vars["impl"]
@skip_unless_py10_or_later
def test_all_args(self):
"""
Tests calling a function when n_args > 30 and
n_kws = 0. This shouldn't use the peephole, but
it should still succeed.
"""
total_args = [i for i in range(self.THRESHOLD_ARGS)]
f = self.gen_func(self.THRESHOLD_ARGS, 0)
py_func = f
cfunc = njit()(f)
a = py_func(*total_args)
b = cfunc(*total_args)
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_all_kws(self):
"""
Tests calling a function when n_kws > 15 and
n_args = 0.
"""
total_args = [i for i in range(self.THRESHOLD_KWS)]
f = self.gen_func(0, self.THRESHOLD_KWS)
py_func = f
cfunc = njit()(f)
a = py_func(*total_args)
b = cfunc(*total_args)
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_small_args_small_kws(self):
"""
Tests calling a function when (n_args / 2) + n_kws > 15,
but n_args <= 30 and n_kws <= 15
"""
used_args = self.THRESHOLD_ARGS - 1
used_kws = self.THRESHOLD_KWS - 1
total_args = [i for i in range((used_args) + (used_kws))]
f = self.gen_func(used_args, used_kws)
py_func = f
cfunc = njit()(f)
a = py_func(*total_args)
b = cfunc(*total_args)
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_small_args_large_kws(self):
"""
Tests calling a function when (n_args / 2) + n_kws > 15,
but n_args <= 30 and n_kws > 15
"""
used_args = self.THRESHOLD_ARGS - 1
used_kws = self.THRESHOLD_KWS
total_args = [i for i in range((used_args) + (used_kws))]
f = self.gen_func(used_args, used_kws)
py_func = f
cfunc = njit()(f)
a = py_func(*total_args)
b = cfunc(*total_args)
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_large_args_small_kws(self):
"""
Tests calling a function when (n_args / 2) + n_kws > 15,
but n_args > 30 and n_kws <= 15
"""
used_args = self.THRESHOLD_ARGS
used_kws = self.THRESHOLD_KWS - 1
total_args = [i for i in range((used_args) + (used_kws))]
f = self.gen_func(used_args, used_kws)
py_func = f
cfunc = njit()(f)
a = py_func(*total_args)
b = cfunc(*total_args)
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_large_args_large_kws(self):
"""
Tests calling a function when (n_args / 2) + n_kws > 15,
but n_args > 30 and n_kws > 15
"""
used_args = self.THRESHOLD_ARGS
used_kws = self.THRESHOLD_KWS
total_args = [i for i in range((used_args) + (used_kws))]
f = self.gen_func(used_args, used_kws)
py_func = f
cfunc = njit()(f)
a = py_func(*total_args)
b = cfunc(*total_args)
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_large_kws_objmode(self):
"""
Tests calling an objectmode function with > 15 return values.
"""
def py_func():
return (
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
)
@njit
def objmode_func():
"""
Wrapper to call py_func from objmode. This tests
large kws with objmode. If the definition for the
call is not properly updated this test will fail.
"""
with objmode(
a='int64',
b='int64',
c='int64',
d='int64',
e='int64',
f='int64',
g='int64',
h='int64',
i='int64',
j='int64',
k='int64',
l='int64',
m='int64',
n='int64',
o='int64',
p='int64',
):
(
a,
b,
c,
d,
e,
f,
g,
h,
i,
j,
k,
l,
m,
n,
o,
p
) = py_func()
return (
a
+ b
+ c
+ d
+ e
+ f
+ g
+ h
+ i
+ j
+ k
+ l
+ m
+ n
+ o
+ p
)
a = sum(list(py_func()))
b = objmode_func()
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_large_args_inline_controlflow(self):
"""
Tests generating large args when one of the inputs
has inlined controlflow.
"""
def inline_func(flag):
return sum_jit_func(
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1 if flag else 2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
arg41=1,
)
with self.assertRaises(UnsupportedBytecodeError) as raises:
njit()(inline_func)(False)
self.assertIn(
'You can resolve this issue by moving the control flow out',
str(raises.exception)
)
@skip_unless_py10_or_later
def test_large_args_noninlined_controlflow(self):
"""
Tests generating large args when one of the inputs
has the change suggested in the error message
for inlined control flow.
"""
def inline_func(flag):
a_val = 1 if flag else 2
return sum_jit_func(
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
a_val,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
arg41=1,
)
py_func = inline_func
cfunc = njit()(inline_func)
a = py_func(False)
b = cfunc(False)
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_all_args_inline_controlflow(self):
"""
Tests generating only large args when one of the inputs
has inlined controlflow. This requires a special check
inside peep_hole_call_function_ex_to_call_function_kw
because it usually only handles varkwargs.
"""
def inline_func(flag):
return sum_jit_func(
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1 if flag else 2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
)
with self.assertRaises(UnsupportedBytecodeError) as raises:
njit()(inline_func)(False)
self.assertIn(
'You can resolve this issue by moving the control flow out',
str(raises.exception)
)
@skip_unless_py10_or_later
def test_all_args_noninlined_controlflow(self):
"""
Tests generating large args when one of the inputs
has the change suggested in the error message
for inlined control flow.
"""
def inline_func(flag):
a_val = 1 if flag else 2
return sum_jit_func(
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
a_val,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
)
py_func = inline_func
cfunc = njit()(inline_func)
a = py_func(False)
b = cfunc(False)
self.assertEqual(a, b)
@skip_unless_py10_or_later
def test_large_kws_inline_controlflow(self):
"""
Tests generating large kws when one of the inputs
has inlined controlflow.
"""
def inline_func(flag):
return sum_jit_func(
arg0=1,
arg1=1,
arg2=1,
arg3=1,
arg4=1,
arg5=1,
arg6=1,
arg7=1,
arg8=1,
arg9=1,
arg10=1,
arg11=1,
arg12=1,
arg13=1,
arg14=1,
arg15=1 if flag else 2,
)
with self.assertRaises(UnsupportedBytecodeError) as raises:
njit()(inline_func)(False)
self.assertIn(
'You can resolve this issue by moving the control flow out',
str(raises.exception)
)
@skip_unless_py10_or_later
def test_large_kws_noninlined_controlflow(self):
"""
Tests generating large kws when one of the inputs
has the change suggested in the error message
for inlined control flow.
"""
def inline_func(flag):
a_val = 1 if flag else 2
return sum_jit_func(
arg0=1,
arg1=1,
arg2=1,
arg3=1,
arg4=1,
arg5=1,
arg6=1,
arg7=1,
arg8=1,
arg9=1,
arg10=1,
arg11=1,
arg12=1,
arg13=1,
arg14=1,
arg15=a_val,
)
py_func = inline_func
cfunc = njit()(inline_func)
a = py_func(False)
b = cfunc(False)
self.assertEqual(a, b)
|
TestCallFunctionExPeepHole
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/genericType24.py
|
{
"start": 377,
"end": 445
}
|
class ____(Parent[T]):
def m2(self) -> None:
self.m1()
|
Child
|
python
|
django__django
|
tests/serializers/models/data.py
|
{
"start": 5267,
"end": 5353
}
|
class ____(models.Model):
data = models.IntegerField(primary_key=True)
|
IntegerPKData
|
python
|
pypa__setuptools
|
setuptools/_vendor/importlib_metadata/__init__.py
|
{
"start": 8779,
"end": 20157
}
|
class ____(metaclass=abc.ABCMeta):
"""
An abstract Python distribution package.
Custom providers may derive from this class and define
the abstract methods to provide a concrete implementation
for their environment. Some providers may opt to override
the default implementation of some properties to bypass
the file-reading mechanism.
"""
@abc.abstractmethod
def read_text(self, filename) -> Optional[str]:
"""Attempt to load metadata file given by the name.
Python distribution metadata is organized by blobs of text
typically represented as "files" in the metadata directory
(e.g. package-1.0.dist-info). These files include things
like:
- METADATA: The distribution metadata including fields
like Name and Version and Description.
- entry_points.txt: A series of entry points as defined in
`the entry points spec <https://packaging.python.org/en/latest/specifications/entry-points/#file-format>`_.
- RECORD: A record of files according to
`this recording spec <https://packaging.python.org/en/latest/specifications/recording-installed-packages/#the-record-file>`_.
A package may provide any set of files, including those
not listed here or none at all.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@abc.abstractmethod
def locate_file(self, path: str | os.PathLike[str]) -> SimplePath:
"""
Given a path to a file in this distribution, return a SimplePath
to it.
"""
@classmethod
def from_name(cls, name: str) -> Distribution:
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
:raises ValueError: When an invalid value is supplied for name.
"""
if not name:
raise ValueError("A distribution name is required.")
try:
return next(iter(cls.discover(name=name)))
except StopIteration:
raise PackageNotFoundError(name)
@classmethod
def discover(
cls, *, context: Optional[DistributionFinder.Context] = None, **kwargs
) -> Iterable[Distribution]:
"""Return an iterable of Distribution objects for all packages.
Pass a ``context`` or pass keyword arguments for constructing
a context.
:context: A ``DistributionFinder.Context`` object.
:return: Iterable of Distribution objects for packages matching
the context.
"""
if context and kwargs:
raise ValueError("cannot accept context and kwargs")
context = context or DistributionFinder.Context(**kwargs)
return itertools.chain.from_iterable(
resolver(context) for resolver in cls._discover_resolvers()
)
@staticmethod
def at(path: str | os.PathLike[str]) -> Distribution:
"""Return a Distribution for the indicated metadata path.
:param path: a string or path-like object
:return: a concrete Distribution instance for the path
"""
return PathDistribution(pathlib.Path(path))
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers (MetadataPathFinders)."""
declared = (
getattr(finder, 'find_distributions', None) for finder in sys.meta_path
)
return filter(None, declared)
@property
def metadata(self) -> _meta.PackageMetadata:
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata per the
`Core metadata specifications <https://packaging.python.org/en/latest/specifications/core-metadata/#core-metadata>`_.
Custom providers may provide the METADATA file or override this
property.
"""
# deferred for performance (python/cpython#109829)
from . import _adapters
opt_text = (
self.read_text('METADATA')
or self.read_text('PKG-INFO')
# This last clause is here to support old egg-info files. Its
# effect is to just end up using the PathDistribution's self._path
# (which points to the egg-info file) attribute unchanged.
or self.read_text('')
)
text = cast(str, opt_text)
return _adapters.Message(email.message_from_string(text))
@property
def name(self) -> str:
"""Return the 'Name' metadata for the distribution package."""
return self.metadata['Name']
@property
def _normalized_name(self):
"""Return a normalized version of the name."""
return Prepared.normalize(self.name)
@property
def version(self) -> str:
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
@property
def entry_points(self) -> EntryPoints:
"""
Return EntryPoints for this distribution.
Custom providers may provide the ``entry_points.txt`` file
or override this property.
"""
return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
@property
def files(self) -> Optional[List[PackagePath]]:
"""Files in this distribution.
:return: List of PackagePath for this distribution or None
Result is `None` if the metadata file that enumerates files
(i.e. RECORD for dist-info, or installed-files.txt or
SOURCES.txt for egg-info) is missing.
Result may be empty if the metadata exists but is empty.
Custom providers are recommended to provide a "RECORD" file (in
``read_text``) or override this property to allow for callers to be
able to resolve filenames provided by the package.
"""
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = FileHash(hash) if hash else None
result.size = int(size_str) if size_str else None
result.dist = self
return result
@pass_none
def make_files(lines):
# Delay csv import, since Distribution.files is not as widely used
# as other parts of importlib.metadata
import csv
return starmap(make_file, csv.reader(lines))
@pass_none
def skip_missing_files(package_paths):
return list(filter(lambda path: path.locate().exists(), package_paths))
return skip_missing_files(
make_files(
self._read_files_distinfo()
or self._read_files_egginfo_installed()
or self._read_files_egginfo_sources()
)
)
def _read_files_distinfo(self):
"""
Read the lines of RECORD.
"""
text = self.read_text('RECORD')
return text and text.splitlines()
def _read_files_egginfo_installed(self):
"""
Read installed-files.txt and return lines in a similar
CSV-parsable format as RECORD: each file must be placed
relative to the site-packages directory and must also be
quoted (since file names can contain literal commas).
This file is written when the package is installed by pip,
but it might not be written for other installation methods.
Assume the file is accurate if it exists.
"""
text = self.read_text('installed-files.txt')
# Prepend the .egg-info/ subdir to the lines in this file.
# But this subdir is only available from PathDistribution's
# self._path.
subdir = getattr(self, '_path', None)
if not text or not subdir:
return
paths = (
py311.relative_fix((subdir / name).resolve())
.relative_to(self.locate_file('').resolve(), walk_up=True)
.as_posix()
for name in text.splitlines()
)
return map('"{}"'.format, paths)
def _read_files_egginfo_sources(self):
"""
Read SOURCES.txt and return lines in a similar CSV-parsable
format as RECORD: each file name must be quoted (since it
might contain literal commas).
Note that SOURCES.txt is not a reliable source for what
files are installed by a package. This file is generated
for a source archive, and the files that are present
there (e.g. setup.py) may not correctly reflect the files
that are present after the package has been installed.
"""
text = self.read_text('SOURCES.txt')
return text and map('"{}"'.format, text.splitlines())
@property
def requires(self) -> Optional[List[str]]:
"""Generated requirements specified for this Distribution"""
reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
return reqs and list(reqs)
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return pass_none(self._deps_from_requires_text)(source)
@classmethod
def _deps_from_requires_text(cls, source):
return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
@staticmethod
def _convert_egg_info_reqs_to_simple_reqs(sections):
"""
Historically, setuptools would solicit and store 'extra'
requirements, including those with environment markers,
in separate sections. More modern tools expect each
dependency to be defined separately, with any relevant
extras and environment markers attached directly to that
requirement. This method converts the former to the
latter. See _test_deps_from_requires_text for an example.
"""
def make_condition(name):
return name and f'extra == "{name}"'
def quoted_marker(section):
section = section or ''
extra, sep, markers = section.partition(':')
if extra and markers:
markers = f'({markers})'
conditions = list(filter(None, [markers, make_condition(extra)]))
return '; ' + ' and '.join(conditions) if conditions else ''
def url_req_space(req):
"""
PEP 508 requires a space between the url_spec and the quoted_marker.
Ref python/importlib_metadata#357.
"""
# '@' is uniquely indicative of a url_req.
return ' ' * ('@' in req)
for section in sections:
space = url_req_space(section.value)
yield section.value + space + quoted_marker(section.name)
@property
def origin(self):
return self._load_json('direct_url.json')
def _load_json(self, filename):
return pass_none(json.loads)(
self.read_text(filename),
object_hook=lambda data: types.SimpleNamespace(**data),
)
|
Distribution
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_versioning.py
|
{
"start": 61035,
"end": 64697
}
|
class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
version_id = Column(Integer)
updated_by_id = Column(
Integer,
ForeignKey("user.id"),
)
updated_by = relationship(
"User",
foreign_keys=[updated_by_id],
post_update=True,
)
__mapper_args__ = {
"version_id_col": version_id,
}
def test_bumped_version_id_on_update(self):
"""test for #10800"""
User, Parent = self.classes("User", "Parent")
session = fixture_session()
u1 = User(id=1)
u2 = User(id=2)
p1 = Parent(id=1, updated_by=u1)
session.add(u1)
session.add(u2)
session.add(p1)
u2id = u2.id
session.commit()
session.close()
p1 = session.get(Parent, 1)
p1.updated_by
p1.version_id = p1.version_id
p1.updated_by_id = u2id
assert "version_id" in inspect(p1).committed_state
with self.sql_execution_asserter(testing.db) as asserter:
session.commit()
asserter.assert_(
CompiledSQL(
"UPDATE parent SET version_id=:version_id, "
"updated_by_id=:updated_by_id WHERE parent.id = :parent_id "
"AND parent.version_id = :parent_version_id",
[
{
"version_id": 2,
"updated_by_id": 2,
"parent_id": 1,
"parent_version_id": 1,
}
],
),
CompiledSQL(
"UPDATE parent SET version_id=:version_id, "
"updated_by_id=:updated_by_id WHERE parent.id = :parent_id "
"AND parent.version_id = :parent_version_id",
[
{
"version_id": 3,
"updated_by_id": 2,
"parent_id": 1,
"parent_version_id": 2,
}
],
),
)
def test_bumped_version_id_on_delete(self):
"""test for #10967"""
User, Parent = self.classes("User", "Parent")
session = fixture_session()
u1 = User(id=1)
p1 = Parent(id=1, updated_by=u1)
session.add(u1)
session.add(p1)
session.flush()
session.delete(p1)
with self.sql_execution_asserter(testing.db) as asserter:
session.commit()
asserter.assert_(
CompiledSQL(
"UPDATE parent SET version_id=:version_id, "
"updated_by_id=:updated_by_id WHERE parent.id = :parent_id "
"AND parent.version_id = :parent_version_id",
[
{
"version_id": 2,
"updated_by_id": None,
"parent_id": 1,
"parent_version_id": 1,
}
],
),
CompiledSQL(
"DELETE FROM parent WHERE parent.id = :id AND "
"parent.version_id = :version_id",
[{"id": 1, "version_id": 2}],
),
)
|
PostUpdateVersioningTest
|
python
|
bokeh__bokeh
|
src/bokeh/settings.py
|
{
"start": 16871,
"end": 31237
}
|
class ____:
'''
'''
_config_override: dict[str, Any]
_config_user: dict[str, Any]
_config_system: dict[str, Any]
def __init__(self) -> None:
self._config_override = {}
self._config_user = self._try_load_config(_config_user_locations)
self._config_system = {} # TODO (bev)
for x in self.__class__.__dict__.values():
if isinstance(x, PrioritizedSetting):
x._parent = self
@property
def config_system(self) -> dict[str, Any]:
return dict(self._config_system)
@property
def config_user(self) -> dict[str, Any]:
return dict(self._config_user)
@property
def config_override(self) -> dict[str, Any]:
return dict(self._config_override)
@property
def dev(self) -> bool:
return is_dev()
allowed_ws_origin: PrioritizedSetting[list[str]] = PrioritizedSetting("allowed_ws_origin", "BOKEH_ALLOW_WS_ORIGIN", default=[], convert=convert_str_seq, help="""
A comma-separated list of allowed websocket origins for Bokeh server applications.
""")
auth_module: PrioritizedSetting[str | None] = PrioritizedSetting("auth_module", "BOKEH_AUTH_MODULE", default=None, help="""
A path to a Python modules that implements user authentication functions for
the Bokeh server.
.. warning::
The contents of this module will be executed!
""")
browser: PrioritizedSetting[str | None] = PrioritizedSetting("browser", "BOKEH_BROWSER", default=None, dev_default="none", help="""
The default browser that Bokeh should use to show documents with.
Valid values are any of the predefined browser names understood by the
Python standard library :doc:`webbrowser <python:library/webbrowser>`
module.
""")
cdn_version: PrioritizedSetting[str | None] = PrioritizedSetting("version", "BOKEH_CDN_VERSION", default=None, help="""
What version of BokehJS to use with CDN resources.
See the :class:`~bokeh.resources.Resources` class reference for full details.
""")
chromedriver_path: PrioritizedSetting[str | None] = PrioritizedSetting("chromedriver_path", "BOKEH_CHROMEDRIVER_PATH", default=None, help="""
The name of or full path to chromedriver's executable.
This is used to allow ``bokeh.io.export`` to work on systems that use a
different name for ``chromedriver``, like ``chromedriver-binary`` or
``chromium.chromedriver`` (or its variant, which is used for example
by Snap package manager; see https://snapcraft.io/).
""")
compression_level: PrioritizedSetting[int] = PrioritizedSetting("compression_level", "BOKEH_COMPRESSION_LEVEL", default=9, convert=convert_compression, help="""
In contexts where array buffers are base64-encoded (e.g. to embed inside
an HTML file), the buffer will first be compressed to save space.
Valid values are the standard gzip compression levels 0-9. A setting of 9
(the default) will result in the highest compression. A setting of 1 will
result in the least compression, but be faster. A setting of 0 will result
in no compression.
""")
cookie_secret: PrioritizedSetting[str | None] = PrioritizedSetting("cookie_secret", "BOKEH_COOKIE_SECRET", default=None, help="""
Configure the ``cookie_secret`` setting in Tornado. This value is required
if you use ``get_secure_cookie`` or ``set_secure_cookie``. It should be a
long, random sequence of bytes
""")
docs_cdn: PrioritizedSetting[str | None] = PrioritizedSetting("docs_cdn", "BOKEH_DOCS_CDN", default=None, help="""
The version of BokehJS that should be use for loading CDN resources when
building the docs.
To build and display the docs using a locally built BokehJS, use ``local``.
For example:
.. code-block:: sh
BOKEH_DOCS_CDN=local make clean serve
Will build a fresh copy of the docs using the locally built BokehJS and open
a new browser tab to view them.
Otherwise, the value is interpreted a version for CDN. For example:
.. code-block:: sh
BOKEH_DOCS_CDN=1.4.0rc1 make clean
will build docs that use BokehJS version ``1.4.0rc1`` from CDN.
""")
docs_version: PrioritizedSetting[str | None] = PrioritizedSetting("docs_version", "BOKEH_DOCS_VERSION", default=None, help="""
The Bokeh version to stipulate when building the docs.
This setting is necessary to re-deploy existing versions of docs with new
fixes or changes.
""")
ico_path: PrioritizedSetting[str] = PrioritizedSetting("ico_path", "BOKEH_ICO_PATH",
default="default", dev_default="default-dev", convert=convert_ico_path, help="""
Configure the file path to a .ico file for the Bokeh server to use as a
favicon.ico file.
The value should be the full path to a .ico file, or one the following
special values:
- ``default`` to use the default project .ico file
- ``none`` to turn off favicon.ico support entirely
""")
ignore_filename: PrioritizedSetting[bool] = PrioritizedSetting("ignore_filename", "BOKEH_IGNORE_FILENAME", default=False, convert=convert_bool, help="""
Whether to ignore the current script filename when saving Bokeh content.
""")
log_level: PrioritizedSetting[LogLevel] = PrioritizedSetting("log_level", "BOKEH_LOG_LEVEL", default="info", dev_default="debug", help="""
Set the log level for JavaScript BokehJS code.
Valid values are, in order of increasing severity:
- ``trace``
- ``debug``
- ``info``
- ``warn``
- ``error``
- ``fatal``
""")
minified: PrioritizedSetting[bool] = PrioritizedSetting("minified", "BOKEH_MINIFIED", convert=convert_bool, default=True, dev_default=False, help="""
Whether Bokeh should use minified BokehJS resources.
""")
nodejs_path: PrioritizedSetting[str | None] = PrioritizedSetting("nodejs_path", "BOKEH_NODEJS_PATH", default=None, help="""
Path to the Node executable.
NodeJS is an optional dependency that is required for PNG and SVG export,
and for compiling custom extensions. Bokeh will try to automatically locate
an installed Node executable. Use this environment variable to override the
location Bokeh finds, or to point to a non-standard location.
""")
perform_document_validation: PrioritizedSetting[bool] = PrioritizedSetting("validate_doc", "BOKEH_VALIDATE_DOC", convert=convert_bool, default=True, help="""
whether Bokeh should perform validation checks on documents.
Setting this value to False may afford a small performance improvement.
""")
pretty: PrioritizedSetting[bool] = PrioritizedSetting("pretty", "BOKEH_PRETTY", default=False, dev_default=True, help="""
Whether JSON strings should be pretty-printed.
""")
py_log_level: PrioritizedSetting[PyLogLevel] = PrioritizedSetting("py_log_level", "BOKEH_PY_LOG_LEVEL",
default="none", dev_default="debug", convert=convert_logging, help="""
The log level for Python Bokeh code.
Valid values are, in order of increasing severity:
- ``trace``
- ``debug``
- ``info``
- ``warn``
- ``error``
- ``fatal``
- ``none``
""")
resources: PrioritizedSetting[ResourcesMode] = PrioritizedSetting("resources", "BOKEH_RESOURCES", default="cdn", dev_default="server", help="""
What kind of BokehJS resources to configure, e.g ``inline`` or ``cdn``
See the :class:`~bokeh.resources.Resources` class reference for full details.
""")
rootdir: PrioritizedSetting[PathLike | None] = PrioritizedSetting("rootdir", "BOKEH_ROOTDIR", default=None, help="""
Root directory to use with ``relative`` resources
See the :class:`~bokeh.resources.Resources` class reference for full details.
""")
default_server_host: PrioritizedSetting[str] = PrioritizedSetting("default_server_host", "BOKEH_DEFAULT_SERVER_HOST", default="localhost", help="""
Allows to define the default host used by Bokeh's server and resources.
""")
default_server_port: PrioritizedSetting[int] = PrioritizedSetting("default_server_port", "BOKEH_DEFAULT_SERVER_PORT", default=5006, convert=convert_int, help="""
Allows to define the default port used by Bokeh's server and resources.
""")
secret_key: PrioritizedSetting[str | None] = PrioritizedSetting("secret_key", "BOKEH_SECRET_KEY", default=None, help="""
A long, cryptographically-random secret unique to a Bokeh deployment.
""")
serialize_include_defaults: PrioritizedSetting[bool] = \
PrioritizedSetting("serialize_include_defaults", "BOKEH_SERIALIZE_INCLUDE_DEFAULTS", default=False, help="""
Whether to include default values when serializing ``HasProps`` instances.
This is primarily useful for testing, debugging serialization/protocol and other internal purpose.
""")
sign_sessions: PrioritizedSetting[bool] = PrioritizedSetting("sign_sessions", "BOKEH_SIGN_SESSIONS", default=False, help="""
Whether the Bokeh server should only allow sessions signed with a secret key.
If True, ``BOKEH_SECRET_KEY`` must also be set.
""")
simple_ids: PrioritizedSetting[bool] = PrioritizedSetting("simple_ids", "BOKEH_SIMPLE_IDS", default=True, convert=convert_bool, help="""
Whether Bokeh should use simple integers for model IDs (starting at 1000).
If False, Bokeh will use UUIDs for object identifiers. This might be needed,
e.g., if multiple processes are contributing to a single Bokeh Document.
""")
ssl_certfile: PrioritizedSetting[str | None] = PrioritizedSetting("ssl_certfile", "BOKEH_SSL_CERTFILE", default=None, help="""
The path to a certificate file for SSL termination.
""")
ssl_keyfile: PrioritizedSetting[str | None] = PrioritizedSetting("ssl_keyfile", "BOKEH_SSL_KEYFILE", default=None, help="""
The path to a private key file for SSL termination.
""")
ssl_password: PrioritizedSetting[str | None] = PrioritizedSetting("ssl_password", "BOKEH_SSL_PASSWORD", default=None, help="""
A password to decrypt the SSL keyfile, if necessary.
""")
validation_level: PrioritizedSetting[ValidationLevel] = PrioritizedSetting("validation_level", "BOKEH_VALIDATION_LEVEL",
default="none", convert=convert_validation, help="""
Whether validation checks should log or raise exceptions on errors and warnings.
Valid values are:
- ``none``: no exceptions raised (default).
- ``errors``: exception raised on errors (but not on warnings)
- ``all``: exception raised on both errors and warnings
""")
xsrf_cookies: PrioritizedSetting[bool] = PrioritizedSetting("xsrf_cookies", "BOKEH_XSRF_COOKIES", default=False, convert=convert_bool, help="""
Whether to enable Tornado XSRF cookie protection on the Bokeh server. This
is only applicable when also using an auth module or custom handlers. See
https://www.tornadoweb.org/en/stable/guide/security.html#cross-site-request-forgery-protection
for more information about XSRF protection in Tornado. All PUT, POST, and
DELETE handlers will need to be appropriately instrumented when this setting
is active.
""")
# Non-settings methods
def bokehjs_path(self) -> Path:
''' The location of the BokehJS source tree.
'''
return bokehjs_path(self.dev)
def bokehjsdir(self) -> str:
''' The location of the BokehJS source tree.
.. deprecated:: 3.4.0
Use ``bokehjs_path()`` method instead.
'''
from .util.deprecation import deprecated
deprecated((3, 4, 0), "bokehjsdir()", "bokehjs_path() method")
return str(self.bokehjs_path())
def css_files(self) -> list[str]:
''' The CSS files in the BokehJS directory.
'''
css_files: list[str] = []
for root, _, files in os.walk(self.bokehjs_path()):
for fname in files:
if fname.endswith(".css"):
css_files.append(join(root, fname))
return css_files
def js_files(self) -> list[str]:
''' The JS files in the BokehJS directory.
'''
js_files: list[str] = []
for root, _, files in os.walk(self.bokehjs_path()):
for fname in files:
if fname.endswith(".js"):
js_files.append(join(root, fname))
return js_files
def load_config(self, location: PathLike) -> None:
''' Load a user-specified override config file.
The file should be a YAML format with ``key: value`` lines.
'''
try:
with Path(location).absolute().open() as f:
self._config_override = yaml.load(f, Loader=yaml.SafeLoader)
except Exception:
raise RuntimeError(f"Could not load Bokeh config file: {location}")
def secret_key_bytes(self) -> bytes | None:
''' Return the secret_key, converted to bytes and cached.
'''
if not hasattr(self, '_secret_key_bytes'):
key = self.secret_key()
if key is None:
self._secret_key_bytes = None
else:
self._secret_key_bytes = key.encode("utf-8")
return self._secret_key_bytes
def _try_load_config(self, locations: Sequence[Path]) -> dict[str, Any]:
for location in locations:
try:
with location.open() as f:
return yaml.load(f, Loader=yaml.SafeLoader)
except Exception:
pass
return {}
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
settings = Settings()
_secret_key = settings.secret_key()
if _secret_key is not None and len(_secret_key) < 32:
from .util.warnings import warn
warn("BOKEH_SECRET_KEY is recommended to have at least 32 bytes of entropy chosen with a cryptographically-random algorithm")
del _secret_key
if settings.sign_sessions() and settings.secret_key() is None:
from .util.warnings import warn
warn("BOKEH_SECRET_KEY must be set if BOKEH_SIGN_SESSIONS is set to True")
|
Settings
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/server_lib_test.py
|
{
"start": 1631,
"end": 16029
}
|
class ____(test.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(GrpcServerTest, self).__init__(methodName)
self._cached_server = server_lib.Server.create_local_server()
def testRunStep(self):
server = self._cached_server
with ops.Graph().as_default():
with session.Session(server.target) as sess:
c = constant_op.constant([[2, 1]])
d = constant_op.constant([[1], [2]])
e = math_ops.matmul(c, d)
self.assertAllEqual([[4]], sess.run(e))
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
def testMultipleSessions(self):
server = self._cached_server
with ops.Graph().as_default():
c = constant_op.constant([[2, 1]])
d = constant_op.constant([[1], [2]])
e = math_ops.matmul(c, d)
sess_1 = session.Session(server.target)
sess_2 = session.Session(server.target)
self.assertAllEqual([[4]], sess_1.run(e))
self.assertAllEqual([[4]], sess_2.run(e))
sess_1.close()
sess_2.close()
# TODO(mrry): Add `server.stop()` and `server.join()` when these work.
# Verifies various reset failures.
def testResetFails(self):
with ops.Graph().as_default():
# Creates variable with container name.
with ops.container("test0"):
v0 = variable_v1.VariableV1(1.0, name="v0")
# Creates variable with default container.
v1 = variable_v1.VariableV1(2.0, name="v1")
# Verifies resetting the non-existent target returns error.
with self.assertRaises(errors_impl.NotFoundError):
session.Session.reset("nonexistent", ["test0"])
# Verifies resetting with config.
# Verifies that resetting target with no server times out.
with self.assertRaises(errors_impl.DeadlineExceededError):
session.Session.reset(
"grpc://localhost:0",
["test0"],
config=config_pb2.ConfigProto(operation_timeout_in_ms=5),
)
# Verifies no containers are reset with non-existent container.
server = self._cached_server
sess = session.Session(server.target)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# No container is reset, but the server is reset.
session.Session.reset(server.target, ["test1"])
# Verifies that both variables are still valid.
sess = session.Session(server.target)
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
def _useRPCConfig(self):
"""Return a `tf.compat.v1.ConfigProto` that ensures we use the RPC stack for tests.
This configuration ensures that we continue to exercise the gRPC
stack when testing, rather than using the in-process optimization,
which avoids using gRPC as the transport between a client and
master in the same process.
Returns:
A `tf.compat.v1.ConfigProto`.
"""
return config_pb2.ConfigProto(
rpc_options=rpc_options_pb2.RPCOptions(
use_rpc_for_inprocess_master=True
)
)
def testLargeConstant(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
const_val = np.empty([10000, 3000], dtype=np.float32)
const_val.fill(0.5)
c = constant_op.constant(const_val)
shape_t = array_ops.shape(c)
self.assertAllEqual([10000, 3000], sess.run(shape_t))
def testLargeFetch(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
c = array_ops.fill([10000, 3000], 0.5)
expected_val = np.empty([10000, 3000], dtype=np.float32)
expected_val.fill(0.5)
self.assertAllEqual(expected_val, sess.run(c))
def testLargeFeed(self):
server = self._cached_server
with session.Session(server.target, config=self._useRPCConfig()) as sess:
feed_val = np.empty([10000, 3000], dtype=np.float32)
feed_val.fill(0.5)
p = array_ops.placeholder(dtypes.float32, shape=[10000, 3000])
min_t = math_ops.reduce_min(p)
max_t = math_ops.reduce_max(p)
min_val, max_val = sess.run([min_t, max_t], feed_dict={p: feed_val})
self.assertEqual(0.5, min_val)
self.assertEqual(0.5, max_val)
def testCloseCancelsBlockingOperation(self):
server = self._cached_server
with ops.Graph().as_default():
sess = session.Session(server.target, config=self._useRPCConfig())
q = data_flow_ops.FIFOQueue(10, [dtypes.float32])
enqueue_op = q.enqueue(37.0)
dequeue_t = q.dequeue()
sess.run(enqueue_op)
sess.run(dequeue_t)
def blocking_dequeue():
with self.assertRaisesRegex(
errors_impl.CancelledError, "Session::Close"
):
sess.run(dequeue_t)
blocking_thread = self.checkedThread(blocking_dequeue)
blocking_thread.start()
time.sleep(0.5)
sess.close()
blocking_thread.join()
def testInteractiveSession(self):
server = self._cached_server
# Session creation will warn (in C++) that the place_pruned_graph option
# is not supported, but it should successfully ignore it.
sess = session.InteractiveSession(server.target)
c = constant_op.constant(42.0)
self.assertEqual(42.0, self.evaluate(c))
sess.close()
def testSetConfiguration(self):
config = config_pb2.ConfigProto(
gpu_options=config_pb2.GPUOptions(per_process_gpu_memory_fraction=0.1)
)
# Configure a server using the default local server options.
server = server_lib.Server.create_local_server(config=config, start=False)
self.assertEqual(
0.1,
server.server_def.default_session_config.gpu_options.per_process_gpu_memory_fraction,
)
# Configure a server using an explicit ServerDefd with an
# overridden config.
cluster_def = server_lib.ClusterSpec(
{"localhost": ["localhost:0"]}
).as_cluster_def()
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def, job_name="localhost", task_index=0, protocol="grpc"
)
server = server_lib.Server(server_def, config=config, start=False)
self.assertEqual(
0.1,
server.server_def.default_session_config.gpu_options.per_process_gpu_memory_fraction,
)
def testRestartedMaster(self):
master_old = server_lib.Server.create_local_server()
master_new = server_lib.Server.create_local_server()
worker = self._cached_server
def get_cluster_def(master, worker):
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = "master"
job.tasks[0] = master.target[len("grpc://") :]
job = cluster_def.job.add()
job.name = "worker"
job.tasks[0] = worker.target[len("grpc://") :]
return cluster_def
def check_session_devices(sess):
# Make sure we have the correct set of cluster devices
devices = sess.list_devices()
device_names = set(d.name for d in devices)
self.assertIn("/job:master/replica:0/task:0/device:CPU:0", device_names)
self.assertIn("/job:worker/replica:0/task:0/device:CPU:0", device_names)
with ops.Graph().as_default():
# Construct a simple graph that runs ops on remote worker
with ops.device("/job:worker/replica:0/task:0/device:CPU:0"):
a = constant_op.constant([1.0])
b = a + a
config = config_pb2.ConfigProto(
cluster_def=get_cluster_def(master_old, worker)
)
sess_old = session.Session(master_old.target, config=config)
check_session_devices(sess_old)
# Create a session with the new master and the worker.
# The new master has the same task name ('/job:master/replica:0/task:0')
# as the old master, but is initiated from a different server thus has a
# different incarnation. This triggers the WorkerSession on worker with
# the old master incarnation to be garbage collected.
config = config_pb2.ConfigProto(
cluster_def=get_cluster_def(master_new, worker)
)
sess_new = session.Session(master_new.target, config=config)
check_session_devices(sess_new)
# Running on worker with the new session should work as expected
v = sess_new.run(b)
self.assertAllEqual(v, [2.0])
# Running on worker with the old session should raise an exception since
# the WorkerSession of the old session has been garbage collected
with self.assertRaisesRegex(
errors_impl.AbortedError, "Session handle is not found"
):
sess_old.run(b)
sess_old.close()
sess_new.close()
def testInvalidHostname(self):
with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "port"):
_ = server_lib.Server(
{"local": ["localhost"]}, job_name="local", task_index=0
)
def testTimeoutRaisesException(self):
server = self._cached_server
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(1, [dtypes.float32])
blocking_t = q.dequeue()
with session.Session(server.target) as sess:
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(
blocking_t, options=config_pb2.RunOptions(timeout_in_ms=1000)
)
with session.Session(server.target, config=self._useRPCConfig()) as sess:
with self.assertRaises(errors_impl.DeadlineExceededError):
sess.run(
blocking_t, options=config_pb2.RunOptions(timeout_in_ms=1000)
)
def testTwoServersSamePort(self):
# Starting a server with the same target as the cached server should fail.
server = self._cached_server
with self.assertRaises(errors_impl.UnknownError):
_ = server_lib.Server({"local_2": [server.target[len("grpc://") :]]})
def testExtendAfterQueueRunners(self):
server = self._cached_server
with session.Session(server.target) as sess:
input_queue = input_ops.input_producer(
constant_op.constant([0.0], dtype=dtypes.float32)
)
self.assertIsNotNone(input_queue)
var = variable_v1.VariableV1(
1.0, dtype=dtypes.float32, trainable=False, name="var"
)
sess.run(variables.global_variables_initializer())
queue_runner_impl.start_queue_runners(sess)
sess.run(var.assign(3.0))
def testIsolateSessionState(self):
server = self._cached_server
with ops.Graph().as_default():
init_value = array_ops.placeholder(dtypes.int32)
v = variable_v1.VariableV1(init_value, validate_shape=False, name="v")
sharing_config = config_pb2.ConfigProto(isolate_session_state=False)
sharing_sess_0 = session.Session(server.target, config=sharing_config)
sharing_sess_1 = session.Session(server.target, config=sharing_config)
isolate_config = config_pb2.ConfigProto(isolate_session_state=True)
isolate_sess_0 = session.Session(server.target, config=isolate_config)
isolate_sess_1 = session.Session(server.target, config=isolate_config)
# Initially all variables are initialized.
for sess in [
sharing_sess_0,
sharing_sess_1,
isolate_sess_0,
isolate_sess_1,
]:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(v)
# Shared sessions will see each other's updates, but isolated sessions
# will not.
sharing_sess_0.run(v.initializer, feed_dict={init_value: 86})
self.assertAllEqual(86, sharing_sess_0.run(v))
self.assertAllEqual(86, sharing_sess_1.run(v))
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_0.run(v)
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_1.run(v)
# Changing the shape works because `validate_shape` is False.
sharing_sess_1.run(v.initializer, feed_dict={init_value: [86, 99]})
self.assertAllEqual([86, 99], sharing_sess_0.run(v))
self.assertAllEqual([86, 99], sharing_sess_1.run(v))
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_0.run(v)
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_1.run(v)
# Initializing in an isolated session will only affect the state in that
# session.
isolate_sess_0.run(v.initializer, feed_dict={init_value: 37})
self.assertAllEqual([86, 99], sharing_sess_0.run(v))
self.assertAllEqual([86, 99], sharing_sess_1.run(v))
self.assertAllEqual(37, isolate_sess_0.run(v))
with self.assertRaises(errors_impl.FailedPreconditionError):
isolate_sess_1.run(v)
# Isolated sessions can have different shapes for the same variable.
isolate_sess_1.run(v.initializer, feed_dict={init_value: [19, 86]})
self.assertAllEqual([86, 99], sharing_sess_0.run(v))
self.assertAllEqual([86, 99], sharing_sess_1.run(v))
self.assertAllEqual(37, isolate_sess_0.run(v))
self.assertAllEqual([19, 86], isolate_sess_1.run(v))
def testShapeChangingIsolateState(self):
server = self._cached_server
sharing_config = config_pb2.ConfigProto(isolate_session_state=False)
isolate_config = config_pb2.ConfigProto(isolate_session_state=True)
with ops.Graph().as_default():
w_vector = variable_v1.VariableV1([1, 2, 3], name="w")
with session.Session(server.target, config=sharing_config) as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(w_vector)
sess.run(w_vector.initializer)
self.assertAllEqual([1, 2, 3], sess.run(w_vector))
with ops.Graph().as_default():
w_vector = variable_v1.VariableV1([4, 5, 6], name="w")
with session.Session(server.target, config=sharing_config) as sess:
self.assertAllEqual([1, 2, 3], sess.run(w_vector))
sess.run(w_vector.initializer)
self.assertAllEqual([4, 5, 6], sess.run(w_vector))
with ops.Graph().as_default():
w_scalar = variable_v1.VariableV1(37, name="w")
with session.Session(server.target, config=isolate_config) as sess:
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(w_scalar)
sess.run(w_scalar.initializer)
self.assertAllEqual(37, sess.run(w_scalar))
|
GrpcServerTest
|
python
|
django-mptt__django-mptt
|
tests/myapp/tests.py
|
{
"start": 61352,
"end": 61903
}
|
class ____(TreeTestCase):
def test_unsaved(self):
for method in [
"get_ancestors",
"get_family",
"get_children",
"get_descendants",
"get_leafnodes",
"get_next_sibling",
"get_previous_sibling",
"get_root",
"get_siblings",
]:
self.assertRaisesRegex(
ValueError,
"Cannot call %s on unsaved Genre instances" % method,
getattr(Genre(), method),
)
|
TestUnsaved
|
python
|
getsentry__sentry
|
tests/sentry/incidents/test_logic.py
|
{
"start": 136086,
"end": 136551
}
|
class ____(BaseAlertRuleTriggerActionTest):
def test(self) -> None:
assert list(get_actions_for_trigger(self.trigger)) == []
action = create_alert_rule_trigger_action(
self.trigger,
AlertRuleTriggerAction.Type.EMAIL,
AlertRuleTriggerAction.TargetType.USER,
target_identifier=str(self.user.id),
)
assert list(get_actions_for_trigger(self.trigger)) == [action]
|
GetActionsForTriggerTest
|
python
|
pytorch__pytorch
|
torch/nn/modules/loss.py
|
{
"start": 15122,
"end": 19077
}
|
class ____(_Loss):
r"""Gaussian negative log likelihood loss.
The targets are treated as samples from Gaussian distributions with
expectations and variances predicted by the neural network. For a
``target`` tensor modelled as having Gaussian distribution with a tensor
of expectations ``input`` and a tensor of positive variances ``var`` the loss is:
.. math::
\text{loss} = \frac{1}{2}\left(\log\left(\text{max}\left(\text{var},
\ \text{eps}\right)\right) + \frac{\left(\text{input} - \text{target}\right)^2}
{\text{max}\left(\text{var}, \ \text{eps}\right)}\right) + \text{const.}
where :attr:`eps` is used for stability. By default, the constant term of
the loss function is omitted unless :attr:`full` is ``True``. If ``var`` is not the same
size as ``input`` (due to a homoscedastic assumption), it must either have a final dimension
of 1 or have one fewer dimension (with all other sizes being the same) for correct broadcasting.
Args:
full (bool, optional): include the constant term in the loss
calculation. Default: ``False``.
eps (float, optional): value used to clamp ``var`` (see note below), for
stability. Default: 1e-6.
reduction (str, optional): specifies the reduction to apply to the
output:``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
will be applied, ``'mean'``: the output is the average of all batch
member losses, ``'sum'``: the output is the sum of all batch member
losses. Default: ``'mean'``.
Shape:
- Input: :math:`(N, *)` or :math:`(*)` where :math:`*` means any number of additional
dimensions
- Target: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input
but with one dimension equal to 1 (to allow for broadcasting)
- Var: :math:`(N, *)` or :math:`(*)`, same shape as the input, or same shape as the input but
with one dimension equal to 1, or same shape as the input but with one fewer
dimension (to allow for broadcasting), or a scalar value
- Output: scalar if :attr:`reduction` is ``'mean'`` (default) or
``'sum'``. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same
shape as the input
Examples:
>>> loss = nn.GaussianNLLLoss()
>>> input = torch.randn(5, 2, requires_grad=True)
>>> target = torch.randn(5, 2)
>>> var = torch.ones(5, 2, requires_grad=True) # heteroscedastic
>>> output = loss(input, target, var)
>>> output.backward()
>>> loss = nn.GaussianNLLLoss()
>>> input = torch.randn(5, 2, requires_grad=True)
>>> target = torch.randn(5, 2)
>>> var = torch.ones(5, 1, requires_grad=True) # homoscedastic
>>> output = loss(input, target, var)
>>> output.backward()
Note:
The clamping of ``var`` is ignored with respect to autograd, and so the
gradients are unaffected by it.
Reference:
Nix, D. A. and Weigend, A. S., "Estimating the mean and variance of the
target probability distribution", Proceedings of 1994 IEEE International
Conference on Neural Networks (ICNN'94), Orlando, FL, USA, 1994, pp. 55-60
vol.1, doi: 10.1109/ICNN.1994.374138.
"""
__constants__ = ["full", "eps", "reduction"]
full: bool
eps: float
def __init__(
self, *, full: bool = False, eps: float = 1e-6, reduction: str = "mean"
) -> None:
super().__init__(None, None, reduction)
self.full = full
self.eps = eps
def forward(self, input: Tensor, target: Tensor, var: Tensor | float) -> Tensor:
"""
Runs the forward pass.
"""
return F.gaussian_nll_loss(
input, target, var, full=self.full, eps=self.eps, reduction=self.reduction
)
|
GaussianNLLLoss
|
python
|
pandas-dev__pandas
|
pandas/tests/tools/test_to_datetime.py
|
{
"start": 77421,
"end": 84751
}
|
class ____:
@pytest.fixture
def df(self):
return DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"hour": [6, 7],
"minute": [58, 59],
"second": [10, 11],
"ms": [1, 1],
"us": [2, 2],
"ns": [3, 3],
}
)
def test_dataframe(self, df, cache):
result = to_datetime(
{"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache
)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:0:00")]
)
tm.assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[["year", "month", "day"]].to_dict(), cache=cache)
expected.index = Index([0, 1])
tm.assert_series_equal(result, expected)
def test_dataframe_dict_with_constructable(self, df, cache):
# dict but with constructable
df2 = df[["year", "month", "day"]].to_dict()
df2["month"] = 2
result = to_datetime(df2, cache=cache)
expected2 = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160205 00:0:00")],
index=Index([0, 1]),
)
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize(
"unit",
[
{
"year": "years",
"month": "months",
"day": "days",
"hour": "hours",
"minute": "minutes",
"second": "seconds",
},
{
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
},
],
)
def test_dataframe_field_aliases_column_subset(self, df, cache, unit):
# unit mappings
result = to_datetime(df[list(unit.keys())].rename(columns=unit), cache=cache)
expected = Series(
[Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")],
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_dataframe_field_aliases(self, df, cache):
d = {
"year": "year",
"month": "month",
"day": "day",
"hour": "hour",
"minute": "minute",
"second": "second",
"ms": "ms",
"us": "us",
"ns": "ns",
}
result = to_datetime(df.rename(columns=d), cache=cache)
expected = Series(
[
Timestamp("20150204 06:58:10.001002003"),
Timestamp("20160305 07:59:11.001002003"),
]
)
tm.assert_series_equal(result, expected)
def test_dataframe_str_dtype(self, df, cache):
# coerce back to int
result = to_datetime(df.astype(str), cache=cache)
expected = Series(
[
Timestamp("20150204 06:58:10.001002003"),
Timestamp("20160305 07:59:11.001002003"),
]
)
tm.assert_series_equal(result, expected)
def test_dataframe_float32_dtype(self, df, cache):
# GH#60506
# coerce to float64
result = to_datetime(df.astype(np.float32), cache=cache)
expected = Series(
[
Timestamp("20150204 06:58:10.001002003"),
Timestamp("20160305 07:59:11.001002003"),
]
)
tm.assert_series_equal(result, expected)
def test_dataframe_coerce(self, cache):
# passing coerce
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
msg = (
r'^cannot assemble the datetimes: time data ".+" doesn\'t '
r'match format "%Y%m%d"\.'
)
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
result = to_datetime(df2, errors="coerce", cache=cache)
expected = Series([Timestamp("20150204 00:00:00"), NaT])
tm.assert_series_equal(result, expected)
def test_dataframe_extra_keys_raises(self, df, cache):
# extra columns
msg = r"extra keys have been passed to the datetime assemblage: \[foo\]"
df2 = df.copy()
df2["foo"] = 1
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
@pytest.mark.parametrize(
"cols",
[
["year"],
["year", "month"],
["year", "month", "second"],
["month", "day"],
["year", "day", "second"],
],
)
def test_dataframe_missing_keys_raises(self, df, cache, cols):
# not enough
msg = (
r"to assemble mappings requires at least that \[year, month, "
r"day\] be specified: \[.+\] is missing"
)
with pytest.raises(ValueError, match=msg):
to_datetime(df[cols], cache=cache)
def test_dataframe_duplicate_columns_raises(self, cache):
# duplicates
msg = "cannot assemble with duplicate keys"
df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]})
df2.columns = ["year", "year", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
df2 = DataFrame(
{"year": [2015, 2016], "month": [2, 20], "day": [4, 5], "hour": [4, 5]}
)
df2.columns = ["year", "month", "day", "day"]
with pytest.raises(ValueError, match=msg):
to_datetime(df2, cache=cache)
def test_dataframe_int16(self, cache):
# GH#13451
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
# int16
result = to_datetime(df.astype("int16"), cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
def test_dataframe_mixed(self, cache):
# mixed dtypes
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
df["month"] = df["month"].astype("int8")
df["day"] = df["day"].astype("int8")
result = to_datetime(df, cache=cache)
expected = Series(
[Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")]
)
tm.assert_series_equal(result, expected)
def test_dataframe_float(self, cache):
# float
df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]})
msg = (
r"^cannot assemble the datetimes: unconverted data remains when parsing "
r'with format ".*": "1".'
)
with pytest.raises(ValueError, match=msg):
to_datetime(df, cache=cache)
def test_dataframe_utc_true(self):
# GH#23760
df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
result = to_datetime(df, utc=True)
expected = Series(
np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[us]")
).dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
|
TestToDatetimeDataFrame
|
python
|
cython__cython
|
Cython/Compiler/Optimize.py
|
{
"start": 61490,
"end": 64280
}
|
class ____(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
of comparisons.
"""
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
return node
elif node.operator == 'in':
conjunction = 'or'
eq_or_neq = '=='
elif node.operator == 'not_in':
conjunction = 'and'
eq_or_neq = '!='
else:
return node
if not isinstance(node.operand2, (ExprNodes.TupleNode,
ExprNodes.ListNode,
ExprNodes.SetNode)):
return node
lhs = node.operand1
args = node.operand2.args
if len(args) == 0:
# note: lhs may have side effects, but ".is_simple()" may not work yet before type analysis.
if lhs.try_is_simple():
constant_result = node.operator == 'not_in'
return ExprNodes.BoolNode(node.pos, value=constant_result)
return node
if any([arg.is_starred for arg in args]):
# Starred arguments do not directly translate to comparisons or "in" tests.
return node
lhs = UtilNodes.ResultRefNode(lhs)
conds = []
temps = []
for arg in args:
# Trial optimisation to avoid redundant temp assignments.
if not arg.try_is_simple():
# must evaluate all non-simple RHS before doing the comparisons
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
cond = ExprNodes.PrimaryCmpNode(
pos = node.pos,
operand1 = lhs,
operator = eq_or_neq,
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
condition = reduce(concat, conds)
new_node = UtilNodes.EvalWithTempExprNode(lhs, condition)
for temp in temps[::-1]:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
return new_node
visit_Node = Visitor.VisitorTransform.recurse_to_children
|
FlattenInListTransform
|
python
|
python-markdown__markdown
|
markdown/extensions/nl2br.py
|
{
"start": 770,
"end": 1102
}
|
class ____(Extension):
def extendMarkdown(self, md):
""" Add a `SubstituteTagInlineProcessor` to Markdown. """
br_tag = SubstituteTagInlineProcessor(BR_RE, 'br')
md.inlinePatterns.register(br_tag, 'nl', 5)
def makeExtension(**kwargs): # pragma: no cover
return Nl2BrExtension(**kwargs)
|
Nl2BrExtension
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_project_template_detail.py
|
{
"start": 5198,
"end": 7367
}
|
class ____(ProjectTemplateAPIBase):
endpoint = "sentry-api-0-organization-project-template-detail"
method = "delete"
def test_delete__no_feature(self) -> None:
response = self.get_error_response(
self.organization.id, self.project_template.id, status_code=404
)
assert response.status_code == 404
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_delete(self) -> None:
template_id = self.project_template.id
response = self.get_success_response(self.organization.id, template_id, status_code=204)
assert response.status_code == 204
with pytest.raises(ProjectTemplate.DoesNotExist):
ProjectTemplate.objects.get(id=template_id)
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_delete__with_options(self) -> None:
template_id = self.project_template.id
self.project_template.options.create(
project_template=self.project_template, key="sentry:release_track", value="test"
)
self.project_template.options.create(
project_template=self.project_template, key="sentry:another_example", value="test"
)
# Ensure the options are created
assert ProjectTemplateOption.objects.filter(project_template_id=template_id).count() == 2
response = self.get_success_response(self.organization.id, template_id, status_code=204)
assert response.status_code == 204
# Ensure data is deleted
with pytest.raises(ProjectTemplate.DoesNotExist):
ProjectTemplate.objects.get(id=template_id)
assert ProjectTemplateOption.objects.filter(project_template_id=template_id).count() == 0
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_delete__as_member_without_permission(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.organization, role="member")
self.login_as(user)
response = self.get_error_response(
self.organization.id, self.project_template.id, status_code=403
)
assert response.status_code == 403
|
ProjectTemplateDetailDeleteTest
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol3.py
|
{
"start": 2420,
"end": 2513
}
|
class ____(Protocol):
@property
def bar(self: P6) -> ContextManager[P6]: ...
|
MockClass6
|
python
|
doocs__leetcode
|
solution/0900-0999/0973.K Closest Points to Origin/Solution3.py
|
{
"start": 0,
"end": 428
}
|
class ____:
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
dist = [x * x + y * y for x, y in points]
l, r = 0, max(dist)
while l < r:
mid = (l + r) >> 1
cnt = sum(d <= mid for d in dist)
if cnt >= k:
r = mid
else:
l = mid + 1
return [points[i] for i, d in enumerate(dist) if d <= l]
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 593284,
"end": 595040
}
|
class ____(sgqlc.types.Type):
"""A User who is an outside collaborator of an enterprise through one
or more organizations.
"""
__schema__ = github_schema
__field_names__ = ("cursor", "node", "repositories")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("User", graphql_name="node")
"""The item at the end of the edge."""
repositories = sgqlc.types.Field(
sgqlc.types.non_null("EnterpriseRepositoryInfoConnection"),
graphql_name="repositories",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("order_by", sgqlc.types.Arg(RepositoryOrder, graphql_name="orderBy", default={"field": "NAME", "direction": "ASC"})),
)
),
)
"""The enterprise organization repositories this user is a member of.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`RepositoryOrder`): Ordering options for
repositories. (default: `{field: NAME, direction: ASC}`)
"""
|
EnterpriseOutsideCollaboratorEdge
|
python
|
redis__redis-py
|
tests/test_maint_notifications_handling.py
|
{
"start": 1088,
"end": 6862
}
|
class ____:
"""Helper class containing static methods for validation in maintenance notifications tests."""
@staticmethod
def validate_in_use_connections_state(
in_use_connections: List[AbstractConnection],
expected_state=MaintenanceState.NONE,
expected_should_reconnect: Union[bool, str] = True,
expected_host_address=DEFAULT_ADDRESS.split(":")[0],
expected_socket_timeout=None,
expected_socket_connect_timeout=None,
expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0],
expected_orig_socket_timeout=None,
expected_orig_socket_connect_timeout=None,
expected_current_socket_timeout=None,
expected_current_peername=DEFAULT_ADDRESS.split(":")[0],
):
"""Helper method to validate state of in-use connections."""
# validate in use connections are still working with set flag for reconnect
# and timeout is updated
for connection in in_use_connections:
if expected_should_reconnect != "any":
assert connection.should_reconnect() == expected_should_reconnect
assert connection.host == expected_host_address
assert connection.socket_timeout == expected_socket_timeout
assert connection.socket_connect_timeout == expected_socket_connect_timeout
assert connection.orig_host_address == expected_orig_host_address
assert connection.orig_socket_timeout == expected_orig_socket_timeout
assert (
connection.orig_socket_connect_timeout
== expected_orig_socket_connect_timeout
)
conn_socket = connection._get_socket()
if conn_socket is not None:
assert conn_socket.gettimeout() == expected_current_socket_timeout
assert conn_socket.connected is True
if expected_current_peername != "any":
assert conn_socket.getpeername()[0] == expected_current_peername
assert connection.maintenance_state == expected_state
@staticmethod
def validate_free_connections_state(
pool,
should_be_connected_count=0,
connected_to_tmp_address=False,
tmp_address=AFTER_MOVING_ADDRESS.split(":")[0],
expected_state=MaintenanceState.MOVING,
expected_host_address=DEFAULT_ADDRESS.split(":")[0],
expected_socket_timeout=None,
expected_socket_connect_timeout=None,
expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0],
expected_orig_socket_timeout=None,
expected_orig_socket_connect_timeout=None,
):
"""Helper method to validate state of free/available connections."""
if isinstance(pool, BlockingConnectionPool):
free_connections = [conn for conn in pool.pool.queue if conn is not None]
elif isinstance(pool, ConnectionPool):
free_connections = pool._available_connections
else:
raise ValueError(f"Unsupported pool type: {type(pool)}")
connected_count = 0
for connection in free_connections:
assert connection.should_reconnect() is False
assert connection.host == expected_host_address
assert connection.socket_timeout == expected_socket_timeout
assert connection.socket_connect_timeout == expected_socket_connect_timeout
assert connection.orig_host_address == expected_orig_host_address
assert connection.orig_socket_timeout == expected_orig_socket_timeout
assert (
connection.orig_socket_connect_timeout
== expected_orig_socket_connect_timeout
)
assert connection.maintenance_state == expected_state
if expected_state == MaintenanceState.NONE:
assert connection.maintenance_notification_hash is None
conn_socket = connection._get_socket()
if conn_socket is not None:
assert conn_socket.connected is True
if connected_to_tmp_address and tmp_address != "any":
assert conn_socket.getpeername()[0] == tmp_address
connected_count += 1
assert connected_count == should_be_connected_count
@staticmethod
def validate_conn_kwargs(
pool,
expected_maintenance_state,
expected_maintenance_notification_hash,
expected_host_address,
expected_port,
expected_socket_timeout,
expected_socket_connect_timeout,
expected_orig_host_address,
expected_orig_socket_timeout,
expected_orig_socket_connect_timeout,
):
"""Helper method to validate connection kwargs."""
assert pool.connection_kwargs["maintenance_state"] == expected_maintenance_state
assert (
pool.connection_kwargs["maintenance_notification_hash"]
== expected_maintenance_notification_hash
)
assert pool.connection_kwargs["host"] == expected_host_address
assert pool.connection_kwargs["port"] == expected_port
assert pool.connection_kwargs["socket_timeout"] == expected_socket_timeout
assert (
pool.connection_kwargs["socket_connect_timeout"]
== expected_socket_connect_timeout
)
assert (
pool.connection_kwargs.get("orig_host_address", None)
== expected_orig_host_address
)
assert (
pool.connection_kwargs.get("orig_socket_timeout", None)
== expected_orig_socket_timeout
)
assert (
pool.connection_kwargs.get("orig_socket_connect_timeout", None)
== expected_orig_socket_connect_timeout
)
|
Helpers
|
python
|
django__django
|
tests/migrations/test_operations.py
|
{
"start": 889,
"end": 281092
}
|
class ____(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation,
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures
here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
self.assertEqual(operation.formatted_description(), "+ Create model Pony")
self.assertEqual(operation.migration_name_fragment, "pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["fields", "name"])
# And default manager not in set
operation = migrations.CreateModel(
"Foo", fields=[], managers=[("objects", models.Manager())]
)
definition = operation.deconstruct()
self.assertNotIn("managers", definition[2])
def test_create_model_with_duplicate_field_name(self):
with self.assertRaisesMessage(
ValueError, "Found duplicate value pink in CreateModel fields argument."
):
migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.TextField()),
("pink", models.IntegerField(default=1)),
],
)
def test_create_model_with_duplicate_base(self):
message = "Found duplicate value test_crmo.pony in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.Pony",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.pony",
),
)
message = (
"Found duplicate value migrations.unicodemodel in CreateModel bases "
"argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
UnicodeModel,
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.unicodemodel",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.UnicodeModel",
),
)
message = (
"Found duplicate value <class 'django.db.models.base.Model'> in "
"CreateModel bases argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
models.Model,
models.Model,
),
)
message = (
"Found duplicate value <class 'migrations.test_operations.Mixin'> in "
"CreateModel bases argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
Mixin,
Mixin,
),
)
def test_create_model_with_duplicate_manager_name(self):
with self.assertRaisesMessage(
ValueError,
"Found duplicate value objects in CreateModel managers argument.",
):
migrations.CreateModel(
"Pony",
fields=[],
managers=[
("objects", models.Manager()),
("objects", models.Manager()),
],
)
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation2.state_forwards("test_crmoua", new_state)
operation2.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation3.state_forwards("test_crmoua", new_state)
operation3.database_forwards(
"test_crmoua", editor, project_state, new_state
)
self.assertTableExists("test_crmoua_pony")
self.assertTableExists("test_crmoua_rider")
def test_create_model_m2m(self):
"""
Test the creation of a model with a ManyToMany field and the
auto-created "through" model.
"""
project_state = self.set_up_test_model("test_crmomm")
operation = migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("Pony", related_name="stables")),
],
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_crmomm", new_state)
# Test the database alteration
self.assertTableNotExists("test_crmomm_stable_ponies")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmomm", editor, project_state, new_state)
self.assertTableExists("test_crmomm_stable")
self.assertTableExists("test_crmomm_stable_ponies")
self.assertColumnNotExists("test_crmomm_stable", "ponies")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_crmomm", "Pony")
Stable = new_state.apps.get_model("test_crmomm", "Stable")
stable = Stable.objects.create()
p1 = Pony.objects.create(pink=False, weight=4.55)
p2 = Pony.objects.create(pink=True, weight=5.43)
stable.ponies.add(p1, p2)
self.assertEqual(stable.ponies.count(), 2)
stable.ponies.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmomm", editor, new_state, project_state
)
self.assertTableNotExists("test_crmomm_stable")
self.assertTableNotExists("test_crmomm_stable_ponies")
@skipUnlessDBFeature("supports_collation_on_charfield", "supports_foreign_keys")
def test_create_fk_models_to_pk_field_db_collation(self):
"""Creation of models with a FK to a PK with db_collation."""
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
app_label = "test_cfkmtopkfdbc"
operations = [
migrations.CreateModel(
"Pony",
[
(
"id",
models.CharField(
primary_key=True,
max_length=10,
db_collation=collation,
),
),
],
)
]
project_state = self.apply_operations(app_label, ProjectState(), operations)
# ForeignKey.
new_state = project_state.clone()
operation = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_rider", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# OneToOneField.
new_state = project_state.clone()
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony",
models.OneToOneField("Pony", models.CASCADE, primary_key=True),
),
("cuteness", models.IntegerField(default=1)),
],
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_shetlandpony", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
def test_create_model_inheritance(self):
"""
Tests the CreateModel operation on a multi-table inheritance setup.
"""
project_state = self.set_up_test_model("test_crmoih")
# Test the state alteration
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony_ptr",
models.OneToOneField(
"test_crmoih.Pony",
models.CASCADE,
auto_created=True,
primary_key=True,
to_field="id",
serialize=False,
),
),
("cuteness", models.IntegerField(default=1)),
],
)
new_state = project_state.clone()
operation.state_forwards("test_crmoih", new_state)
self.assertIn(("test_crmoih", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crmoih_shetlandpony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmoih", editor, project_state, new_state)
self.assertTableExists("test_crmoih_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmoih", editor, new_state, project_state
)
self.assertTableNotExists("test_crmoih_shetlandpony")
def test_create_proxy_model(self):
"""
CreateModel ignores proxy models.
"""
project_state = self.set_up_test_model("test_crprmo")
# Test the state alteration
operation = migrations.CreateModel(
"ProxyPony",
[],
options={"proxy": True},
bases=("test_crprmo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_crprmo", new_state)
self.assertIn(("test_crprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crprmo", editor, project_state, new_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crprmo", editor, new_state, project_state
)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["bases", "fields", "name", "options"])
def test_create_unmanaged_model(self):
"""
CreateModel ignores unmanaged models.
"""
project_state = self.set_up_test_model("test_crummo")
# Test the state alteration
operation = migrations.CreateModel(
"UnmanagedPony",
[],
options={"proxy": True},
bases=("test_crummo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony")
new_state = project_state.clone()
operation.state_forwards("test_crummo", new_state)
self.assertIn(("test_crummo", "unmanagedpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crummo", editor, project_state, new_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crummo", editor, new_state, project_state
)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_constraint(self):
where = models.Q(pink__gt=2)
check_constraint = models.CheckConstraint(
condition=where, name="test_constraint_pony_pink_gt_2"
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [check_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute("INSERT INTO test_crmo_pony (id, pink) VALUES (1, 1)")
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2]["options"]["constraints"], [check_constraint])
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_boolean_expression_in_check_constraint(self):
app_label = "test_crmobechc"
rawsql_constraint = models.CheckConstraint(
condition=models.expressions.RawSQL(
"price < %s", (1000,), output_field=models.BooleanField()
),
name=f"{app_label}_price_lt_1000_raw",
)
wrapper_constraint = models.CheckConstraint(
condition=models.expressions.ExpressionWrapper(
models.Q(price__gt=500) | models.Q(price__lt=500),
output_field=models.BooleanField(),
),
name=f"{app_label}_price_neq_500_wrap",
)
operation = migrations.CreateModel(
"Product",
[
("id", models.AutoField(primary_key=True)),
("price", models.IntegerField(null=True)),
],
options={"constraints": [rawsql_constraint, wrapper_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Add table.
self.assertTableNotExists(app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertTableExists(f"{app_label}_product")
insert_sql = f"INSERT INTO {app_label}_product (id, price) VALUES (%d, %d)"
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute(insert_sql % (1, 1000))
cursor.execute(insert_sql % (1, 999))
with self.assertRaises(IntegrityError):
cursor.execute(insert_sql % (2, 500))
cursor.execute(insert_sql % (2, 499))
def test_create_model_with_partial_unique_constraint(self):
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [partial_unique_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# Test constraint works
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"], [partial_unique_constraint]
)
def test_create_model_with_deferred_unique_constraint(self):
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferrable_pink_constraint",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [deferred_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[deferred_unique_constraint],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_create_model_with_covering_unique_constraint(self):
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
include=["weight"],
name="test_constraint_pony_pink_covering_weight",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [covering_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[covering_unique_constraint],
)
def test_create_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_cmoma")
# Test the state alteration
operation = migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Create model Food")
new_state = project_state.clone()
operation.state_forwards("test_cmoma", new_state)
self.assertIn(("test_cmoma", "food"), new_state.models)
managers = new_state.models["test_cmoma", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_delete_model(self):
"""
Tests the DeleteModel operation.
"""
project_state = self.set_up_test_model("test_dlmo")
# Test the state alteration
operation = migrations.DeleteModel("Pony")
self.assertEqual(operation.describe(), "Delete model Pony")
self.assertEqual(operation.formatted_description(), "- Delete model Pony")
self.assertEqual(operation.migration_name_fragment, "delete_pony")
new_state = project_state.clone()
operation.state_forwards("test_dlmo", new_state)
self.assertNotIn(("test_dlmo", "pony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlmo", editor, project_state, new_state)
self.assertTableNotExists("test_dlmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlmo", editor, new_state, project_state)
self.assertTableExists("test_dlmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "DeleteModel")
self.assertEqual(definition[1], [])
self.assertEqual(list(definition[2]), ["name"])
def test_delete_proxy_model(self):
"""
Tests the DeleteModel operation ignores proxy models.
"""
project_state = self.set_up_test_model("test_dlprmo", proxy_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_dlprmo", new_state)
self.assertIn(("test_dlprmo", "proxypony"), project_state.models)
self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlprmo", editor, project_state, new_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlprmo", editor, new_state, project_state
)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
def test_delete_mti_model(self):
project_state = self.set_up_test_model("test_dlmtimo", mti_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ShetlandPony")
new_state = project_state.clone()
operation.state_forwards("test_dlmtimo", new_state)
self.assertIn(("test_dlmtimo", "shetlandpony"), project_state.models)
self.assertNotIn(("test_dlmtimo", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_dlmtimo", editor, project_state, new_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableNotExists("test_dlmtimo_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlmtimo", editor, new_state, project_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
def test_rename_model(self):
"""
Tests the RenameModel operation.
"""
project_state = self.set_up_test_model("test_rnmo", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Pony", "Horse")
self.assertEqual(operation.describe(), "Rename model Pony to Horse")
self.assertEqual(
operation.formatted_description(), "~ Rename model Pony to Horse"
)
self.assertEqual(operation.migration_name_fragment, "rename_pony_horse")
# Test initial state and database
self.assertIn(("test_rnmo", "pony"), project_state.models)
self.assertNotIn(("test_rnmo", "horse"), project_state.models)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate forwards
new_state = project_state.clone()
new_state = self.apply_operations("test_rnmo", new_state, [operation])
# Test new state and database
self.assertNotIn(("test_rnmo", "pony"), new_state.models)
self.assertIn(("test_rnmo", "horse"), new_state.models)
# RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
new_state.models["test_rnmo", "rider"].fields["pony"].remote_field.model,
"test_rnmo.Horse",
)
self.assertTableNotExists("test_rnmo_pony")
self.assertTableExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate backwards
original_state = self.unapply_operations(
"test_rnmo", project_state, [operation]
)
# Test original state and database
self.assertIn(("test_rnmo", "pony"), original_state.models)
self.assertNotIn(("test_rnmo", "horse"), original_state.models)
self.assertEqual(
original_state.models["test_rnmo", "rider"]
.fields["pony"]
.remote_field.model,
"Pony",
)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"old_name": "Pony", "new_name": "Horse"})
def test_rename_model_state_forwards(self):
"""
RenameModel operations shouldn't trigger the caching of rendered apps
on state without prior apps.
"""
state = ProjectState()
state.add_model(ModelState("migrations", "Foo", []))
operation = migrations.RenameModel("Foo", "Bar")
operation.state_forwards("migrations", state)
self.assertNotIn("apps", state.__dict__)
self.assertNotIn(("migrations", "foo"), state.models)
self.assertIn(("migrations", "bar"), state.models)
# Now with apps cached.
apps = state.apps
operation = migrations.RenameModel("Bar", "Foo")
operation.state_forwards("migrations", state)
self.assertIs(state.apps, apps)
self.assertNotIn(("migrations", "bar"), state.models)
self.assertIn(("migrations", "foo"), state.models)
def test_rename_model_with_self_referential_fk(self):
"""
Tests the RenameModel operation on model with self referential FK.
"""
project_state = self.set_up_test_model("test_rmwsrf", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Rider", "HorseRider")
self.assertEqual(operation.describe(), "Rename model Rider to HorseRider")
new_state = project_state.clone()
operation.state_forwards("test_rmwsrf", new_state)
self.assertNotIn(("test_rmwsrf", "rider"), new_state.models)
self.assertIn(("test_rmwsrf", "horserider"), new_state.models)
# Remember, RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
"self",
new_state.models["test_rmwsrf", "horserider"]
.fields["friend"]
.remote_field.model,
)
HorseRider = new_state.apps.get_model("test_rmwsrf", "horserider")
self.assertIs(
HorseRider._meta.get_field("horserider").remote_field.model, HorseRider
)
# Test the database alteration
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
with connection.schema_editor() as editor:
operation.database_forwards("test_rmwsrf", editor, project_state, new_state)
self.assertTableNotExists("test_rmwsrf_rider")
self.assertTableExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKExists(
"test_rmwsrf_horserider",
["friend_id"],
("test_rmwsrf_horserider", "id"),
)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rmwsrf", editor, new_state, project_state
)
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
def test_rename_model_with_superclass_fk(self):
"""
Tests the RenameModel operation on a model which has a superclass that
has a foreign key.
"""
project_state = self.set_up_test_model(
"test_rmwsc", related_model=True, mti_model=True
)
# Test the state alteration
operation = migrations.RenameModel("ShetlandPony", "LittleHorse")
self.assertEqual(
operation.describe(), "Rename model ShetlandPony to LittleHorse"
)
new_state = project_state.clone()
operation.state_forwards("test_rmwsc", new_state)
self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models)
self.assertIn(("test_rmwsc", "littlehorse"), new_state.models)
# RenameModel shouldn't repoint the superclass's relations, only local
# ones
self.assertEqual(
project_state.models["test_rmwsc", "rider"]
.fields["pony"]
.remote_field.model,
new_state.models["test_rmwsc", "rider"].fields["pony"].remote_field.model,
)
# Before running the migration we have a table for Shetland Pony, not
# Little Horse.
self.assertTableExists("test_rmwsc_shetlandpony")
self.assertTableNotExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# and the foreign key on rider points to pony, not shetland pony
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id")
)
with connection.schema_editor() as editor:
operation.database_forwards("test_rmwsc", editor, project_state, new_state)
# Now we have a little horse table, not shetland pony
self.assertTableNotExists("test_rmwsc_shetlandpony")
self.assertTableExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# but the Foreign keys still point at pony, not little horse
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id")
)
def test_rename_model_no_relations_with_db_table_noop(self):
app_label = "test_rmwdbtnoop"
project_state = self.set_up_test_model(app_label, db_table="my_pony")
operation = migrations.RenameModel("Pony", "LittleHorse")
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor, self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
@skipUnlessDBFeature("supports_foreign_keys")
def test_rename_model_with_db_table_and_fk_noop(self):
app_label = "test_rmwdbtfk"
project_state = self.set_up_test_model(
app_label, db_table="my_pony", related_model=True
)
new_state = project_state.clone()
operation = migrations.RenameModel("Pony", "LittleHorse")
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor, self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
def test_rename_model_with_self_referential_m2m(self):
app_label = "test_rename_model_with_self_referential_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"ReflexivePony",
fields=[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("self")),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("ReflexivePony", "ReflexivePony2"),
],
)
Pony = project_state.apps.get_model(app_label, "ReflexivePony2")
pony = Pony.objects.create()
pony.ponies.add(pony)
def test_rename_model_with_m2m(self):
app_label = "test_rename_model_with_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Pony", "Pony2"),
],
)
Pony = project_state.apps.get_model(app_label, "Pony2")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_model_with_m2m_models_in_different_apps_with_same_name(self):
app_label_1 = "test_rmw_m2m_1"
app_label_2 = "test_rmw_m2m_2"
project_state = self.apply_operations(
app_label_1,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
],
)
project_state = self.apply_operations(
app_label_2,
project_state,
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField(f"{app_label_1}.Rider")),
],
),
],
)
m2m_table = f"{app_label_2}_rider_riders"
self.assertColumnExists(m2m_table, "from_rider_id")
self.assertColumnExists(m2m_table, "to_rider_id")
Rider_1 = project_state.apps.get_model(app_label_1, "Rider")
Rider_2 = project_state.apps.get_model(app_label_2, "Rider")
rider_2 = Rider_2.objects.create()
rider_2.riders.add(Rider_1.objects.create())
# Rename model.
project_state_2 = project_state.clone()
project_state = self.apply_operations(
app_label_2,
project_state,
operations=[migrations.RenameModel("Rider", "Pony")],
)
m2m_table = f"{app_label_2}_pony_riders"
self.assertColumnExists(m2m_table, "pony_id")
self.assertColumnExists(m2m_table, "rider_id")
Rider_1 = project_state.apps.get_model(app_label_1, "Rider")
Rider_2 = project_state.apps.get_model(app_label_2, "Pony")
rider_2 = Rider_2.objects.create()
rider_2.riders.add(Rider_1.objects.create())
self.assertEqual(Rider_1.objects.count(), 2)
self.assertEqual(Rider_2.objects.count(), 2)
self.assertEqual(
Rider_2._meta.get_field("riders").remote_field.through.objects.count(), 2
)
# Reversal.
self.unapply_operations(
app_label_2,
project_state_2,
operations=[migrations.RenameModel("Rider", "Pony")],
)
m2m_table = f"{app_label_2}_rider_riders"
self.assertColumnExists(m2m_table, "to_rider_id")
self.assertColumnExists(m2m_table, "from_rider_id")
def test_rename_model_with_db_table_rename_m2m(self):
app_label = "test_rmwdbrm2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
options={"db_table": "pony"},
),
],
)
new_state = self.apply_operations(
app_label,
project_state,
operations=[migrations.RenameModel("Pony", "PinkPony")],
)
Pony = new_state.apps.get_model(app_label, "PinkPony")
Rider = new_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
def test_rename_m2m_target_model(self):
app_label = "test_rename_m2m_target_model"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Rider", "Rider2"),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider2")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_m2m_through_model(self):
app_label = "test_rename_through"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"test_rename_through.Rider", models.CASCADE
),
),
(
"pony",
models.ForeignKey(
"test_rename_through.Pony", models.CASCADE
),
),
],
),
migrations.AddField(
"Pony",
"riders",
models.ManyToManyField(
"test_rename_through.Rider",
through="test_rename_through.PonyRider",
),
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
pony = Pony.objects.create()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("PonyRider", "PonyRider2"),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider2")
pony = Pony.objects.first()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
self.assertEqual(Pony.objects.count(), 1)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(PonyRider.objects.count(), 2)
self.assertEqual(pony.riders.count(), 2)
def test_rename_m2m_model_after_rename_field(self):
"""RenameModel renames a many-to-many column after a RenameField."""
app_label = "test_rename_multiple"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=20)),
],
),
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"test_rename_multiple.Pony", models.CASCADE
),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
migrations.RenameField(
model_name="pony", old_name="name", new_name="fancy_name"
),
migrations.RenameModel(old_name="Rider", new_name="Jockey"),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Jockey = project_state.apps.get_model(app_label, "Jockey")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
# No "no such column" error means the column was renamed correctly.
pony = Pony.objects.create(fancy_name="a good name")
jockey = Jockey.objects.create(pony=pony)
ponyrider = PonyRider.objects.create()
ponyrider.riders.add(jockey)
def test_rename_m2m_field_with_2_references(self):
app_label = "test_rename_many_refs"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
name="Person",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name="Relation",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"child",
models.ForeignKey(
on_delete=models.CASCADE,
related_name="relations_as_child",
to=f"{app_label}.person",
),
),
(
"parent",
models.ForeignKey(
on_delete=models.CASCADE,
related_name="relations_as_parent",
to=f"{app_label}.person",
),
),
],
),
migrations.AddField(
model_name="person",
name="parents_or_children",
field=models.ManyToManyField(
blank=True,
through=f"{app_label}.Relation",
to=f"{app_label}.person",
),
),
],
)
Person = project_state.apps.get_model(app_label, "Person")
Relation = project_state.apps.get_model(app_label, "Relation")
person1 = Person.objects.create(name="John Doe")
person2 = Person.objects.create(name="Jane Smith")
Relation.objects.create(child=person2, parent=person1)
self.assertTableExists(app_label + "_person")
self.assertTableNotExists(app_label + "_other")
self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel(old_name="Person", new_name="Other"),
],
)
self.assertTableNotExists(app_label + "_person")
self.assertTableExists(app_label + "_other")
def test_add_field(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
self.assertEqual(operation.describe(), "Add field height to Pony")
self.assertEqual(
operation.formatted_description(), "+ Add field height to Pony"
)
self.assertEqual(operation.migration_name_fragment, "pony_height")
project_state, new_state = self.make_test_state("test_adfl", operation)
self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 6)
field = new_state.models["test_adfl", "pony"].fields["height"]
self.assertEqual(field.default, 5)
# Test the database alteration
self.assertColumnNotExists("test_adfl_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfl", editor, project_state, new_state)
self.assertColumnExists("test_adfl_pony", "height")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfl", editor, new_state, project_state)
self.assertColumnNotExists("test_adfl_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_add_generated_field(self):
app_label = "test_add_generated_field"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=20)),
(
"rider",
models.ForeignKey(
f"{app_label}.Rider", on_delete=models.CASCADE
),
),
(
"name_and_id",
models.GeneratedField(
expression=Concat(("name"), ("rider_id")),
output_field=models.TextField(),
db_persist=True,
),
),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
rider = Rider.objects.create()
pony = Pony.objects.create(name="pony", rider=rider)
self.assertEqual(pony.name_and_id, str(pony.name) + str(rider.id))
new_rider = Rider.objects.create()
pony.rider = new_rider
pony.save()
pony.refresh_from_db()
self.assertEqual(pony.name_and_id, str(pony.name) + str(new_rider.id))
def test_add_charfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adchfl")
Pony = project_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adchfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.CharField(max_length=10, default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.CharField(max_length=10, default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.CharField(max_length=10, default="42"),
),
# Manual quoting is fragile and could trip on quotes.
migrations.AddField(
"Pony",
"quotes",
models.CharField(max_length=10, default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_textfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adtxtfl")
Pony = project_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adtxtfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.TextField(default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.TextField(default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.TextField(default="42"),
),
# Manual quoting is fragile and could trip on quotes.
migrations.AddField(
"Pony",
"quotes",
models.TextField(default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_binaryfield(self):
"""
Tests the AddField operation on TextField/BinaryField.
"""
project_state = self.set_up_test_model("test_adbinfl")
Pony = project_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adbinfl",
project_state,
[
migrations.AddField(
"Pony",
"blob",
models.BinaryField(default=b"some text"),
),
migrations.AddField(
"Pony",
"empty",
models.BinaryField(default=b""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.BinaryField(default=b"42"),
),
# Manual quoting is fragile and could trip on quotes.
migrations.AddField(
"Pony",
"quotes",
models.BinaryField(default=b'"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
# SQLite returns buffer/memoryview, cast to bytes for checking.
self.assertEqual(bytes(pony.blob), b"some text")
self.assertEqual(bytes(pony.empty), b"")
self.assertEqual(bytes(pony.digits), b"42")
self.assertEqual(bytes(pony.quotes), b'"\'"')
def test_column_name_quoting(self):
"""
Column names that are SQL keywords shouldn't cause problems when used
in migrations (#22168).
"""
project_state = self.set_up_test_model("test_regr22168")
operation = migrations.AddField(
"Pony",
"order",
models.IntegerField(default=0),
)
new_state = project_state.clone()
operation.state_forwards("test_regr22168", new_state)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_regr22168", editor, project_state, new_state
)
self.assertColumnExists("test_regr22168_pony", "order")
def test_add_field_preserve_default(self):
"""
Tests the AddField operation's state alteration
when preserve_default = False.
"""
project_state = self.set_up_test_model("test_adflpd")
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=4),
preserve_default=False,
)
new_state = project_state.clone()
operation.state_forwards("test_adflpd", new_state)
self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 6)
field = new_state.models["test_adflpd", "pony"].fields["height"]
self.assertEqual(field.default, models.NOT_PROVIDED)
# Test the database alteration
project_state.apps.get_model("test_adflpd", "pony").objects.create(
weight=4,
)
self.assertColumnNotExists("test_adflpd_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflpd", editor, project_state, new_state)
self.assertColumnExists("test_adflpd_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["field", "model_name", "name", "preserve_default"]
)
def test_add_field_database_default(self):
"""The AddField operation can set and unset a database default."""
app_label = "test_adfldd"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
operation = migrations.AddField(
"Pony", "height", models.FloatField(null=True, db_default=4)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].fields), 6)
field = new_state.models[app_label, "pony"].fields["height"]
self.assertEqual(field.default, models.NOT_PROVIDED)
self.assertEqual(field.db_default, 4)
project_state.apps.get_model(app_label, "pony").objects.create(weight=4)
self.assertColumnNotExists(table_name, "height")
# Add field.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(table_name, "height")
new_model = new_state.apps.get_model(app_label, "pony")
old_pony = new_model.objects.get()
self.assertEqual(old_pony.height, 4)
new_pony = new_model.objects.create(weight=5)
if not connection.features.can_return_columns_from_insert:
new_pony.refresh_from_db()
self.assertEqual(new_pony.height, 4)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertColumnNotExists(table_name, "height")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"field": field,
"model_name": "Pony",
"name": "height",
},
)
def test_add_field_database_default_special_char_escaping(self):
app_label = "test_adflddsce"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
old_pony_pk = (
project_state.apps.get_model(app_label, "pony").objects.create(weight=4).pk
)
tests = ["%", "'", '"']
for db_default in tests:
with self.subTest(db_default=db_default):
operation = migrations.AddField(
"Pony",
"special_char",
models.CharField(max_length=1, db_default=db_default),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].fields), 6)
field = new_state.models[app_label, "pony"].fields["special_char"]
self.assertEqual(field.default, models.NOT_PROVIDED)
self.assertEqual(field.db_default, db_default)
self.assertColumnNotExists(table_name, "special_char")
with connection.schema_editor() as editor:
operation.database_forwards(
app_label, editor, project_state, new_state
)
self.assertColumnExists(table_name, "special_char")
new_model = new_state.apps.get_model(app_label, "pony")
try:
new_pony = new_model.objects.create(weight=5)
if not connection.features.can_return_columns_from_insert:
new_pony.refresh_from_db()
self.assertEqual(new_pony.special_char, db_default)
old_pony = new_model.objects.get(pk=old_pony_pk)
if connection.vendor != "oracle" or db_default != "'":
# The single quotation mark ' is properly quoted and is
# set for new rows on Oracle, however it is not set on
# existing rows. Skip the assertion as it's probably a
# bug in Oracle.
self.assertEqual(old_pony.special_char, db_default)
finally:
with connection.schema_editor() as editor:
operation.database_backwards(
app_label, editor, new_state, project_state
)
@skipUnlessDBFeature("supports_expression_defaults")
def test_add_field_database_default_function(self):
app_label = "test_adflddf"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
operation = migrations.AddField(
"Pony", "height", models.FloatField(db_default=Pi())
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].fields), 6)
field = new_state.models[app_label, "pony"].fields["height"]
self.assertEqual(field.default, models.NOT_PROVIDED)
self.assertEqual(field.db_default, Pi())
project_state.apps.get_model(app_label, "pony").objects.create(weight=4)
self.assertColumnNotExists(table_name, "height")
# Add field.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(table_name, "height")
new_model = new_state.apps.get_model(app_label, "pony")
old_pony = new_model.objects.get()
self.assertAlmostEqual(old_pony.height, math.pi)
new_pony = new_model.objects.create(weight=5)
if not connection.features.can_return_columns_from_insert:
new_pony.refresh_from_db()
self.assertAlmostEqual(old_pony.height, math.pi)
def test_add_field_both_defaults(self):
"""The AddField operation with both default and db_default."""
app_label = "test_adflbddd"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
operation = migrations.AddField(
"Pony", "height", models.FloatField(default=3, db_default=4)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].fields), 6)
field = new_state.models[app_label, "pony"].fields["height"]
self.assertEqual(field.default, 3)
self.assertEqual(field.db_default, 4)
pre_pony_pk = (
project_state.apps.get_model(app_label, "pony").objects.create(weight=4).pk
)
self.assertColumnNotExists(table_name, "height")
# Add field.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(table_name, "height")
post_pony_pk = (
project_state.apps.get_model(app_label, "pony").objects.create(weight=10).pk
)
new_model = new_state.apps.get_model(app_label, "pony")
pre_pony = new_model.objects.get(pk=pre_pony_pk)
self.assertEqual(pre_pony.height, 4)
post_pony = new_model.objects.get(pk=post_pony_pk)
self.assertEqual(post_pony.height, 4)
new_pony = new_model.objects.create(weight=5)
if not connection.features.can_return_columns_from_insert:
new_pony.refresh_from_db()
self.assertEqual(new_pony.height, 3)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertColumnNotExists(table_name, "height")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"field": field,
"model_name": "Pony",
"name": "height",
},
)
def test_add_field_m2m(self):
"""
Tests the AddField operation with a ManyToManyField.
"""
project_state = self.set_up_test_model("test_adflmm", second_model=True)
# Test the state alteration
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")
)
new_state = project_state.clone()
operation.state_forwards("test_adflmm", new_state)
self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 6)
# Test the database alteration
self.assertTableNotExists("test_adflmm_pony_stables")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflmm", editor, project_state, new_state)
self.assertTableExists("test_adflmm_pony_stables")
self.assertColumnNotExists("test_adflmm_pony", "stables")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_adflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.stables.create()
self.assertEqual(p.stables.count(), 1)
p.stables.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adflmm", editor, new_state, project_state
)
self.assertTableNotExists("test_adflmm_pony_stables")
def test_alter_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertFalse(Pony._meta.get_field("stables").blank)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"stables",
models.ManyToManyField(
to="Stable", related_name="ponies", blank=True
),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertTrue(Pony._meta.get_field("stables").blank)
def test_repoint_field_m2m(self):
project_state = self.set_up_test_model(
"test_alflmm", second_model=True, third_model=True
)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"places",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"places",
models.ManyToManyField(to="Van", related_name="ponies"),
)
],
)
# Ensure the new field actually works
Pony = project_state.apps.get_model("test_alflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.places.create()
self.assertEqual(p.places.count(), 1)
p.places.all().delete()
def test_remove_field_m2m(self):
project_state = self.set_up_test_model("test_rmflmm", second_model=True)
project_state = self.apply_operations(
"test_rmflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
self.assertTableExists("test_rmflmm_pony_stables")
with_field_state = project_state.clone()
operations = [migrations.RemoveField("Pony", "stables")]
project_state = self.apply_operations(
"test_rmflmm", project_state, operations=operations
)
self.assertTableNotExists("test_rmflmm_pony_stables")
# And test reversal
self.unapply_operations("test_rmflmm", with_field_state, operations=operations)
self.assertTableExists("test_rmflmm_pony_stables")
def test_remove_field_m2m_with_through(self):
project_state = self.set_up_test_model("test_rmflmmwt", second_model=True)
self.assertTableNotExists("test_rmflmmwt_ponystables")
project_state = self.apply_operations(
"test_rmflmmwt",
project_state,
operations=[
migrations.CreateModel(
"PonyStables",
fields=[
(
"pony",
models.ForeignKey("test_rmflmmwt.Pony", models.CASCADE),
),
(
"stable",
models.ForeignKey("test_rmflmmwt.Stable", models.CASCADE),
),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField(
"Stable",
related_name="ponies",
through="test_rmflmmwt.PonyStables",
),
),
],
)
self.assertTableExists("test_rmflmmwt_ponystables")
operations = [
migrations.RemoveField("Pony", "stables"),
migrations.DeleteModel("PonyStables"),
]
self.apply_operations("test_rmflmmwt", project_state, operations=operations)
def test_remove_field(self):
"""
Tests the RemoveField operation.
"""
project_state = self.set_up_test_model("test_rmfl")
# Test the state alteration
operation = migrations.RemoveField("Pony", "pink")
self.assertEqual(operation.describe(), "Remove field pink from Pony")
self.assertEqual(
operation.formatted_description(), "- Remove field pink from Pony"
)
self.assertEqual(operation.migration_name_fragment, "remove_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_rmfl", new_state)
self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 4)
# Test the database alteration
self.assertColumnExists("test_rmfl_pony", "pink")
with (
connection.schema_editor() as editor,
CaptureQueriesContext(connection) as ctx,
):
operation.database_forwards("test_rmfl", editor, project_state, new_state)
self.assertGreater(len(ctx.captured_queries), 0)
self.assertNotIn("CASCADE", ctx.captured_queries[-1]["sql"])
self.assertColumnNotExists("test_rmfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmfl", editor, new_state, project_state)
self.assertColumnExists("test_rmfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pink"})
def test_remove_fk(self):
"""
Tests the RemoveField operation on a foreign key.
"""
project_state = self.set_up_test_model("test_rfk", related_model=True)
self.assertColumnExists("test_rfk_rider", "pony_id")
operation = migrations.RemoveField("Rider", "pony")
new_state = project_state.clone()
operation.state_forwards("test_rfk", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_rfk", editor, project_state, new_state)
self.assertColumnNotExists("test_rfk_rider", "pony_id")
with connection.schema_editor() as editor:
operation.database_backwards("test_rfk", editor, new_state, project_state)
self.assertColumnExists("test_rfk_rider", "pony_id")
def test_alter_model_table(self):
"""
Tests the AlterModelTable operation.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony_2")
self.assertEqual(
operation.describe(), "Rename table for Pony to test_almota_pony_2"
)
self.assertEqual(
operation.formatted_description(),
"~ Rename table for Pony to test_almota_pony_2",
)
self.assertEqual(operation.migration_name_fragment, "alter_pony_table")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony_2",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableNotExists("test_almota_pony")
self.assertTableExists("test_almota_pony_2")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTable")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "table": "test_almota_pony_2"})
def test_alter_model_table_none(self):
"""
Tests the AlterModelTable operation if the table name is set to None.
"""
operation = migrations.AlterModelTable("Pony", None)
self.assertEqual(operation.describe(), "Rename table for Pony to (default)")
def test_alter_model_table_noop(self):
"""
Tests the AlterModelTable operation if the table name is not changed.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableExists("test_almota_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
def test_alter_model_table_m2m(self):
"""
AlterModelTable should rename auto-generated M2M tables.
"""
app_label = "test_talflmltlm2m"
pony_db_table = "pony_foo"
project_state = self.set_up_test_model(
app_label, second_model=True, db_table=pony_db_table
)
# Add the M2M field
first_state = project_state.clone()
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable")
)
operation.state_forwards(app_label, first_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, first_state)
original_m2m_table = "%s_%s" % (pony_db_table, "stables")
new_m2m_table = "%s_%s" % (app_label, "pony_stables")
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
# Rename the Pony db_table which should also rename the m2m table.
second_state = first_state.clone()
operation = migrations.AlterModelTable(name="pony", table=None)
operation.state_forwards(app_label, second_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, first_state, second_state)
self.assertTableExists(new_m2m_table)
self.assertTableNotExists(original_m2m_table)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, second_state, first_state)
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
def test_alter_model_table_m2m_field(self):
app_label = "test_talm2mfl"
project_state = self.set_up_test_model(app_label, second_model=True)
# Add the M2M field.
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable"),
)
],
)
m2m_table = f"{app_label}_pony_stables"
self.assertColumnExists(m2m_table, "pony_id")
self.assertColumnExists(m2m_table, "stable_id")
# Point the M2M field to self.
with_field_state = project_state.clone()
operations = [
migrations.AlterField(
model_name="Pony",
name="stables",
field=models.ManyToManyField("self"),
)
]
project_state = self.apply_operations(
app_label, project_state, operations=operations
)
self.assertColumnExists(m2m_table, "from_pony_id")
self.assertColumnExists(m2m_table, "to_pony_id")
# Reversal.
self.unapply_operations(app_label, with_field_state, operations=operations)
self.assertColumnExists(m2m_table, "pony_id")
self.assertColumnExists(m2m_table, "stable_id")
def test_alter_field(self):
"""
Tests the AlterField operation.
"""
project_state = self.set_up_test_model("test_alfl")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
self.assertEqual(operation.describe(), "Alter field pink on Pony")
self.assertEqual(
operation.formatted_description(), "~ Alter field pink on Pony"
)
self.assertEqual(operation.migration_name_fragment, "alter_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_alfl", new_state)
self.assertIs(
project_state.models["test_alfl", "pony"].fields["pink"].null, False
)
self.assertIs(new_state.models["test_alfl", "pony"].fields["pink"].null, True)
# Test the database alteration
self.assertColumnNotNull("test_alfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alfl", editor, project_state, new_state)
self.assertColumnNull("test_alfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alfl", editor, new_state, project_state)
self.assertColumnNotNull("test_alfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_alter_field_add_database_default(self):
app_label = "test_alfladd"
project_state = self.set_up_test_model(app_label)
operation = migrations.AlterField(
"Pony", "weight", models.FloatField(db_default=4.5)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
old_weight = project_state.models[app_label, "pony"].fields["weight"]
self.assertIs(old_weight.db_default, models.NOT_PROVIDED)
new_weight = new_state.models[app_label, "pony"].fields["weight"]
self.assertEqual(new_weight.db_default, 4.5)
with self.assertRaises(IntegrityError), transaction.atomic():
project_state.apps.get_model(app_label, "pony").objects.create()
# Alter field.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
pony = new_state.apps.get_model(app_label, "pony").objects.create()
if not connection.features.can_return_columns_from_insert:
pony.refresh_from_db()
self.assertEqual(pony.weight, 4.5)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
with self.assertRaises(IntegrityError), transaction.atomic():
project_state.apps.get_model(app_label, "pony").objects.create()
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"field": new_weight,
"model_name": "Pony",
"name": "weight",
},
)
def test_alter_field_change_default_to_database_default(self):
"""The AlterField operation changing default to db_default."""
app_label = "test_alflcdtdd"
project_state = self.set_up_test_model(app_label)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(db_default=4)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
old_pink = project_state.models[app_label, "pony"].fields["pink"]
self.assertEqual(old_pink.default, 3)
self.assertIs(old_pink.db_default, models.NOT_PROVIDED)
new_pink = new_state.models[app_label, "pony"].fields["pink"]
self.assertIs(new_pink.default, models.NOT_PROVIDED)
self.assertEqual(new_pink.db_default, 4)
pony = project_state.apps.get_model(app_label, "pony").objects.create(weight=1)
self.assertEqual(pony.pink, 3)
# Alter field.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
pony = new_state.apps.get_model(app_label, "pony").objects.create(weight=1)
if not connection.features.can_return_columns_from_insert:
pony.refresh_from_db()
self.assertEqual(pony.pink, 4)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
pony = project_state.apps.get_model(app_label, "pony").objects.create(weight=1)
self.assertEqual(pony.pink, 3)
@skipUnlessDBFeature("supports_expression_defaults")
def test_alter_field_add_database_default_func(self):
app_label = "test_alfladdf"
project_state = self.set_up_test_model(app_label)
operation = migrations.AlterField(
"Pony", "weight", models.FloatField(db_default=Pi())
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
old_weight = project_state.models[app_label, "pony"].fields["weight"]
self.assertIs(old_weight.default, models.NOT_PROVIDED)
self.assertIs(old_weight.db_default, models.NOT_PROVIDED)
new_weight = new_state.models[app_label, "pony"].fields["weight"]
self.assertIs(new_weight.default, models.NOT_PROVIDED)
self.assertIsInstance(new_weight.db_default, Pi)
pony = project_state.apps.get_model(app_label, "pony").objects.create(weight=1)
self.assertEqual(pony.weight, 1)
# Alter field.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
pony = new_state.apps.get_model(app_label, "pony").objects.create()
if not connection.features.can_return_columns_from_insert:
pony.refresh_from_db()
self.assertAlmostEqual(pony.weight, math.pi)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
pony = project_state.apps.get_model(app_label, "pony").objects.create(weight=1)
self.assertEqual(pony.weight, 1)
def test_alter_field_change_nullable_to_database_default_not_null(self):
"""
The AlterField operation changing a null field to db_default.
"""
app_label = "test_alflcntddnn"
project_state = self.set_up_test_model(app_label)
operation = migrations.AlterField(
"Pony", "green", models.IntegerField(db_default=4)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
old_green = project_state.models[app_label, "pony"].fields["green"]
self.assertIs(old_green.db_default, models.NOT_PROVIDED)
new_green = new_state.models[app_label, "pony"].fields["green"]
self.assertEqual(new_green.db_default, 4)
old_pony = project_state.apps.get_model(app_label, "pony").objects.create(
weight=1
)
self.assertIsNone(old_pony.green)
# Alter field.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
old_pony.refresh_from_db()
self.assertEqual(old_pony.green, 4)
pony = new_state.apps.get_model(app_label, "pony").objects.create(weight=1)
if not connection.features.can_return_columns_from_insert:
pony.refresh_from_db()
self.assertEqual(pony.green, 4)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
pony = project_state.apps.get_model(app_label, "pony").objects.create(weight=1)
self.assertIsNone(pony.green)
def test_alter_field_change_nullable_to_decimal_database_default_not_null(self):
app_label = "test_alflcntdddn"
project_state = self.set_up_test_model(app_label)
operation_1 = migrations.AddField(
"Pony",
"height",
models.DecimalField(null=True, max_digits=5, decimal_places=2),
)
operation_2 = migrations.AlterField(
"Pony",
"height",
models.DecimalField(
max_digits=5, decimal_places=2, db_default=Decimal("12.22")
),
)
table_name = f"{app_label}_pony"
self.assertColumnNotExists(table_name, "height")
# Add field.
new_state = project_state.clone()
operation_1.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation_1.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(table_name, "height")
old_pony = new_state.apps.get_model(app_label, "pony").objects.create(weight=1)
self.assertIsNone(old_pony.height)
# Alter field.
project_state, new_state = new_state, new_state.clone()
operation_2.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation_2.database_forwards(app_label, editor, project_state, new_state)
old_pony.refresh_from_db()
self.assertEqual(old_pony.height, Decimal("12.22"))
pony = new_state.apps.get_model(app_label, "pony").objects.create(weight=2)
if not connection.features.can_return_columns_from_insert:
pony.refresh_from_db()
self.assertEqual(pony.height, Decimal("12.22"))
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_alter_field_change_blank_nullable_database_default_to_not_null(self):
app_label = "test_alflcbnddnn"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
default = "Yellow"
operation = migrations.AlterField(
"Pony",
"yellow",
models.CharField(blank=True, db_default=default, max_length=20),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertColumnNull(table_name, "yellow")
pony = project_state.apps.get_model(app_label, "pony").objects.create(
weight=1, yellow=None
)
self.assertIsNone(pony.yellow)
# Alter field.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnNotNull(table_name, "yellow")
pony.refresh_from_db()
self.assertEqual(pony.yellow, default)
pony = new_state.apps.get_model(app_label, "pony").objects.create(weight=1)
if not connection.features.can_return_columns_from_insert:
pony.refresh_from_db()
self.assertEqual(pony.yellow, default)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertColumnNull(table_name, "yellow")
pony = project_state.apps.get_model(app_label, "pony").objects.create(
weight=1, yellow=None
)
self.assertIsNone(pony.yellow)
def test_alter_field_add_db_column_noop(self):
"""
AlterField operation is a noop when adding only a db_column and the
column name is not changed.
"""
app_label = "test_afadbn"
project_state = self.set_up_test_model(app_label, related_model=True)
pony_table = "%s_pony" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Pony", "weight", models.FloatField(db_column="weight")
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "pony"].fields["weight"].db_column,
)
self.assertEqual(
new_state.models[app_label, "pony"].fields["weight"].db_column,
"weight",
)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
app_label, editor, new_state, project_state
)
self.assertColumnExists(pony_table, "weight")
rider_table = "%s_rider" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Rider",
"pony",
models.ForeignKey("Pony", models.CASCADE, db_column="pony_id"),
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "rider"].fields["pony"].db_column,
)
self.assertIs(
new_state.models[app_label, "rider"].fields["pony"].db_column,
"pony_id",
)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertColumnExists(rider_table, "pony_id")
def test_alter_field_foreignobject_noop(self):
app_label = "test_alflfo_noop"
project_state = self.set_up_test_model(app_label)
project_state = self.apply_operations(
app_label,
project_state,
[
migrations.CreateModel(
"Rider",
fields=[
("pony_id", models.IntegerField()),
(
"pony",
models.ForeignObject(
f"{app_label}.Pony",
models.CASCADE,
from_fields=("pony_id",),
to_fields=("id",),
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"pony",
models.ForeignObject(
f"{app_label}.Pony",
models.CASCADE,
from_fields=("pony_id",),
to_fields=("id",),
null=True,
),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with (
CaptureQueriesContext(connection) as ctx,
connection.schema_editor() as editor,
):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIs(
any("ALTER" in query["sql"] for query in ctx.captured_queries), False
)
@skipUnlessDBFeature("supports_comments")
def test_alter_model_table_comment(self):
app_label = "test_almotaco"
project_state = self.set_up_test_model(app_label)
pony_table = f"{app_label}_pony"
# Add table comment.
operation = migrations.AlterModelTableComment("Pony", "Custom pony comment")
self.assertEqual(operation.describe(), "Alter Pony table comment")
self.assertEqual(
operation.formatted_description(), "~ Alter Pony table comment"
)
self.assertEqual(operation.migration_name_fragment, "alter_pony_table_comment")
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
new_state.models[app_label, "pony"].options["db_table_comment"],
"Custom pony comment",
)
self.assertTableCommentNotExists(pony_table)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertTableComment(pony_table, "Custom pony comment")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertTableCommentNotExists(pony_table)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTableComment")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "table_comment": "Custom pony comment"}
)
def test_alter_field_pk(self):
"""
The AlterField operation on primary keys (things like PostgreSQL's
SERIAL weirdness).
"""
project_state = self.set_up_test_model("test_alflpk")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.IntegerField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpk", new_state)
self.assertIsInstance(
project_state.models["test_alflpk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpk", "pony"].fields["id"],
models.IntegerField,
)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpk", editor, project_state, new_state)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpk", editor, new_state, project_state
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_pk_fk(self):
"""
Tests the AlterField operation on primary keys changes any FKs pointing
to it.
"""
project_state = self.set_up_test_model("test_alflpkfk", related_model=True)
project_state = self.apply_operations(
"test_alflpkfk",
project_state,
[
migrations.CreateModel(
"Stable",
fields=[
("ponies", models.ManyToManyField("Pony")),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable"),
),
],
)
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.FloatField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpkfk", new_state)
self.assertIsInstance(
project_state.models["test_alflpkfk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpkfk", "pony"].fields["id"],
models.FloatField,
)
def assertIdTypeEqualsFkType():
with connection.cursor() as cursor:
id_type, id_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_pony"
)
if c.name == "id"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_rider"
)
if c.name == "pony_id"
][0]
m2m_fk_type, m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_pony_stables",
)
if c.name == "pony_id"
][0]
remote_m2m_fk_type, remote_m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_stable_ponies",
)
if c.name == "pony_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_type, m2m_fk_type)
self.assertEqual(id_type, remote_m2m_fk_type)
self.assertEqual(id_null, fk_null)
self.assertEqual(id_null, m2m_fk_null)
self.assertEqual(id_null, remote_m2m_fk_null)
assertIdTypeEqualsFkType()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alflpkfk", editor, project_state, new_state
)
assertIdTypeEqualsFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_alflpkfk_pony_stables",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
self.assertFKExists(
"test_alflpkfk_stable_ponies",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpkfk", editor, new_state, project_state
)
assertIdTypeEqualsFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_alflpkfk_pony_stables",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
self.assertFKExists(
"test_alflpkfk_stable_ponies",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
@skipUnlessDBFeature("supports_collation_on_charfield", "supports_foreign_keys")
def test_alter_field_pk_fk_db_collation(self):
"""
AlterField operation of db_collation on primary keys changes any FKs
pointing to it.
"""
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
app_label = "test_alflpkfkdbc"
project_state = self.apply_operations(
app_label,
ProjectState(),
[
migrations.CreateModel(
"Pony",
[
("id", models.CharField(primary_key=True, max_length=10)),
],
),
migrations.CreateModel(
"Rider",
[
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
),
migrations.CreateModel(
"Stable",
[
("ponies", models.ManyToManyField("Pony")),
],
),
],
)
# State alteration.
operation = migrations.AlterField(
"Pony",
"id",
models.CharField(
primary_key=True,
max_length=10,
db_collation=collation,
),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Database alteration.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_pony", "id", collation)
self.assertColumnCollation(f"{app_label}_rider", "pony_id", collation)
self.assertColumnCollation(f"{app_label}_stable_ponies", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
def test_alter_field_pk_mti_fk(self):
app_label = "test_alflpkmtifk"
project_state = self.set_up_test_model(app_label, mti_model=True)
project_state = self.apply_operations(
app_label,
project_state,
[
migrations.CreateModel(
"ShetlandRider",
fields=[
(
"pony",
models.ForeignKey(
f"{app_label}.ShetlandPony", models.CASCADE
),
),
],
),
],
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
mti_id_type = _get_column_id_type(cursor, "shetlandrider", "pony_id")
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, mti_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
def test_alter_field_pk_mti_and_fk_to_base(self):
app_label = "test_alflpkmtiftb"
project_state = self.set_up_test_model(
app_label,
mti_model=True,
related_model=True,
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
fk_id_type = _get_column_id_type(cursor, "rider", "pony_id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, fk_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_rider",
["pony_id"],
(f"{app_label}_pony", "id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_rider",
["pony_id"],
(f"{app_label}_pony", "id"),
)
def test_alter_id_pk_to_uuid_pk(self):
app_label = "test_alidpktuuidpk"
project_state = self.set_up_test_model(app_label)
new_state = project_state.clone()
# Add UUID field.
operation = migrations.AddField("Pony", "uuid", models.UUIDField())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
# Remove ID.
project_state = new_state
new_state = new_state.clone()
operation = migrations.RemoveField("Pony", "id")
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnNotExists(f"{app_label}_pony", "id")
# Rename to ID.
project_state = new_state
new_state = new_state.clone()
operation = migrations.RenameField("Pony", "uuid", "id")
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnNotExists(f"{app_label}_pony", "uuid")
self.assertColumnExists(f"{app_label}_pony", "id")
# Change to a primary key.
project_state = new_state
new_state = new_state.clone()
operation = migrations.AlterField(
"Pony", "id", models.UUIDField(primary_key=True)
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_on_fk_with_to_field_target_type_change(self):
app_label = "test_alflrsfkwtflttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="code"
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
id_type, id_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_rider" % app_label)
if c.name == "code"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_pony" % app_label)
if c.name == "rider_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_null, fk_null)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_fk_with_to_field_related_name_target_type_change(
self,
):
app_label = "test_alflrsfkwtflrnttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.PositiveIntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label,
models.CASCADE,
to_field="code",
related_name="+",
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
def test_alter_field_reloads_state_on_fk_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "id", models.CharField(primary_key=True, max_length=99)
),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
)
def test_alter_field_reloads_state_on_fk_with_to_field_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_with_to_field_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="slug"
),
),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"%s.Pony" % app_label, models.CASCADE, to_field="slug"
),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "slug", models.CharField(unique=True, max_length=99)
),
migrations.AlterField(
"Pony", "slug", models.CharField(unique=True, max_length=99)
),
],
)
def test_alter_field_pk_fk_char_to_int(self):
app_label = "alter_field_pk_fk_char_to_int"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
name="Parent",
fields=[
("id", models.CharField(max_length=255, primary_key=True)),
],
),
migrations.CreateModel(
name="Child",
fields=[
("id", models.BigAutoField(primary_key=True)),
(
"parent",
models.ForeignKey(
f"{app_label}.Parent",
on_delete=models.CASCADE,
),
),
],
),
],
)
self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
model_name="parent",
name="id",
field=models.BigIntegerField(primary_key=True),
),
],
)
def test_rename_field_reloads_state_on_fk_target_changes(self):
"""
If RenameField doesn't reload state appropriately, the AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_rename_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameField("Rider", "id", "id2"),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
)
def test_rename_field(self):
"""
Tests the RenameField operation.
"""
project_state = self.set_up_test_model("test_rnfl")
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
self.assertEqual(
operation.formatted_description(), "~ Rename field pink on Pony to blue"
)
self.assertEqual(operation.migration_name_fragment, "rename_pink_pony_blue")
new_state = project_state.clone()
operation.state_forwards("test_rnfl", new_state)
self.assertIn("blue", new_state.models["test_rnfl", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].fields)
# Rename field.
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnfl", editor, project_state, new_state)
self.assertColumnExists("test_rnfl_pony", "blue")
self.assertColumnNotExists("test_rnfl_pony", "pink")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_rnfl", editor, new_state, project_state)
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameField")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "old_name": "pink", "new_name": "blue"},
)
def test_rename_field_unique_together(self):
project_state = self.set_up_test_model("test_rnflut", unique_together=True)
operation = migrations.RenameField("Pony", "pink", "blue")
new_state = project_state.clone()
operation.state_forwards("test_rnflut", new_state)
# unique_together has the renamed column.
self.assertIn(
"blue",
new_state.models["test_rnflut", "pony"].options["unique_together"][0],
)
self.assertNotIn(
"pink",
new_state.models["test_rnflut", "pony"].options["unique_together"][0],
)
# Rename field.
self.assertColumnExists("test_rnflut_pony", "pink")
self.assertColumnNotExists("test_rnflut_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnflut", editor, project_state, new_state)
self.assertColumnExists("test_rnflut_pony", "blue")
self.assertColumnNotExists("test_rnflut_pony", "pink")
# The unique constraint has been ported over.
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_rnflut_pony (blue, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_rnflut_pony (blue, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_rnflut_pony")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rnflut", editor, new_state, project_state
)
self.assertColumnExists("test_rnflut_pony", "pink")
self.assertColumnNotExists("test_rnflut_pony", "blue")
def test_rename_field_index_together(self):
app_label = "test_rnflit"
operations = [
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={
"index_together": [("weight", "pink")],
},
),
]
project_state = self.apply_operations(app_label, ProjectState(), operations)
operation = migrations.RenameField("Pony", "pink", "blue")
new_state = project_state.clone()
operation.state_forwards("test_rnflit", new_state)
self.assertIn("blue", new_state.models["test_rnflit", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnflit", "pony"].fields)
# index_together has the renamed column.
self.assertIn(
"blue", new_state.models["test_rnflit", "pony"].options["index_together"][0]
)
self.assertNotIn(
"pink", new_state.models["test_rnflit", "pony"].options["index_together"][0]
)
# Rename field.
self.assertColumnExists("test_rnflit_pony", "pink")
self.assertColumnNotExists("test_rnflit_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnflit", editor, project_state, new_state)
self.assertColumnExists("test_rnflit_pony", "blue")
self.assertColumnNotExists("test_rnflit_pony", "pink")
# The index constraint has been ported over.
self.assertIndexExists("test_rnflit_pony", ["weight", "blue"])
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rnflit", editor, new_state, project_state
)
self.assertIndexExists("test_rnflit_pony", ["weight", "pink"])
def test_rename_field_add_non_nullable_field_with_composite_pk(self):
app_label = "test_rnfafnnwcpk"
operations = [
migrations.CreateModel(
name="Release",
fields=[
(
"pk",
models.CompositePrimaryKey("version", "name", primary_key=True),
),
("version", models.IntegerField()),
("name", models.CharField(max_length=20)),
],
),
]
project_state = self.apply_operations(app_label, ProjectState(), operations)
new_state = project_state.clone()
# Rename field used by CompositePrimaryKey.
operation = migrations.RenameField("Release", "name", "renamed_field")
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(f"{app_label}_release", "renamed_field")
project_state = new_state
new_state = new_state.clone()
# Add non-nullable field. Table is rebuilt on SQLite.
operation = migrations.AddField(
model_name="Release",
name="new_non_nullable_field",
field=models.CharField(default="x", max_length=20),
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(f"{app_label}_release", "new_non_nullable_field")
def test_rename_field_with_db_column(self):
project_state = self.apply_operations(
"test_rfwdbc",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(db_column="db_field")),
(
"fk_field",
models.ForeignKey(
"Pony",
models.CASCADE,
db_column="db_fk_field",
),
),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "renamed_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn("renamed_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertNotIn("field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "fk_field", "renamed_fk_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn(
"renamed_fk_field", new_state.models["test_rfwdbc", "pony"].fields
)
self.assertNotIn("fk_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
def test_rename_field_case(self):
project_state = self.apply_operations(
"test_rfmx",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField()),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "FiElD")
operation.state_forwards("test_rfmx", new_state)
self.assertIn("FiElD", new_state.models["test_rfmx", "pony"].fields)
self.assertColumnExists("test_rfmx_pony", "field")
with connection.schema_editor() as editor:
operation.database_forwards("test_rfmx", editor, project_state, new_state)
self.assertColumnExists(
"test_rfmx_pony",
connection.introspection.identifier_converter("FiElD"),
)
with connection.schema_editor() as editor:
operation.database_backwards("test_rfmx", editor, new_state, project_state)
self.assertColumnExists("test_rfmx_pony", "field")
def test_rename_missing_field(self):
state = ProjectState()
state.add_model(ModelState("app", "model", []))
with self.assertRaisesMessage(
FieldDoesNotExist, "app.model has no field named 'field'"
):
migrations.RenameField("model", "field", "new_field").state_forwards(
"app", state
)
def test_rename_referenced_field_state_forward(self):
state = ProjectState()
state.add_model(
ModelState(
"app",
"Model",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(unique=True)),
],
)
)
state.add_model(
ModelState(
"app",
"OtherModel",
[
("id", models.AutoField(primary_key=True)),
(
"fk",
models.ForeignKey("Model", models.CASCADE, to_field="field"),
),
(
"fo",
models.ForeignObject(
"Model",
models.CASCADE,
from_fields=("fk",),
to_fields=("field",),
),
),
],
)
)
operation = migrations.RenameField("Model", "field", "renamed")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].from_fields, ["self"]
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].to_fields, ("renamed",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields, ("fk",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
operation = migrations.RenameField("OtherModel", "fk", "renamed_fk")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"]
.fields["renamed_fk"]
.remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].from_fields,
("self",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].to_fields,
("renamed",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields,
("renamed_fk",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
def test_alter_unique_together(self):
"""
Tests the AlterUniqueTogether operation.
"""
project_state = self.set_up_test_model("test_alunto")
# Test the state alteration
operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.formatted_description(),
"~ Alter unique_together for Pony (1 constraint(s))",
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_unique_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
project_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# Make sure we can insert duplicate rows
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alunto", editor, project_state, new_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_alunto_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alunto", editor, new_state, project_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test flat unique_together
operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight"))
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterUniqueTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "unique_together": {("pink", "weight")}}
)
def test_alter_unique_together_remove(self):
operation = migrations.AlterUniqueTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (0 constraint(s))"
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_on_pk_field(self):
app_label = "test_rutopkf"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[("id", models.AutoField(primary_key=True))],
options={"unique_together": {("id",)}},
),
],
)
table_name = f"{app_label}_pony"
pk_constraint_name = f"{table_name}_pkey"
unique_together_constraint_name = f"{table_name}_id_fb61f881_uniq"
self.assertConstraintExists(table_name, pk_constraint_name, value=False)
self.assertConstraintExists(
table_name, unique_together_constraint_name, value=False
)
new_state = project_state.clone()
operation = migrations.AlterUniqueTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertConstraintExists(table_name, pk_constraint_name, value=False)
self.assertConstraintNotExists(table_name, unique_together_constraint_name)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_on_unique_field(self):
app_label = "test_rutouf"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=30, unique=True)),
],
options={"unique_together": {("name",)}},
),
],
)
table_name = f"{app_label}_pony"
unique_constraint_name = f"{table_name}_name_key"
unique_together_constraint_name = f"{table_name}_name_694f3b9f_uniq"
self.assertConstraintExists(table_name, unique_constraint_name, value=False)
self.assertConstraintExists(
table_name, unique_together_constraint_name, value=False
)
new_state = project_state.clone()
operation = migrations.AlterUniqueTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertConstraintExists(table_name, unique_constraint_name, value=False)
self.assertConstraintNotExists(table_name, unique_together_constraint_name)
def test_add_index(self):
"""
Test the AddIndex operation.
"""
project_state = self.set_up_test_model("test_adin")
msg = (
"Indexes passed to AddIndex operations require a name argument. "
"<Index: fields=['pink']> doesn't have one."
)
with self.assertRaisesMessage(ValueError, msg):
migrations.AddIndex("Pony", models.Index(fields=["pink"]))
index = models.Index(fields=["pink"], name="test_adin_pony_pink_idx")
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_adin_pony_pink_idx on field(s) pink of model Pony",
)
self.assertEqual(
operation.formatted_description(),
"+ Create index test_adin_pony_pink_idx on field(s) pink of model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adin_pony_pink_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_adin", new_state)
# Test the database alteration
self.assertEqual(
len(new_state.models["test_adin", "pony"].options["indexes"]), 1
)
self.assertIndexNotExists("test_adin_pony", ["pink"])
with connection.schema_editor() as editor:
operation.database_forwards("test_adin", editor, project_state, new_state)
self.assertIndexExists("test_adin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adin", editor, new_state, project_state)
self.assertIndexNotExists("test_adin_pony", ["pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
def test_remove_index(self):
"""
Test the RemoveIndex operation.
"""
project_state = self.set_up_test_model("test_rmin", multicol_index=True)
self.assertTableExists("test_rmin_pony")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
operation = migrations.RemoveIndex("Pony", "pony_test_idx")
self.assertEqual(operation.describe(), "Remove index pony_test_idx from Pony")
self.assertEqual(
operation.formatted_description(), "- Remove index pony_test_idx from Pony"
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_rmin", new_state)
# Test the state alteration
self.assertEqual(
len(new_state.models["test_rmin", "pony"].options["indexes"]), 0
)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_rmin", editor, project_state, new_state)
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmin", editor, new_state, project_state)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pony_test_idx"})
# Also test a field dropped with index - sqlite remake issue
operations = [
migrations.RemoveIndex("Pony", "pony_test_idx"),
migrations.RemoveField("Pony", "pink"),
]
self.assertColumnExists("test_rmin_pony", "pink")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test database alteration
new_state = project_state.clone()
self.apply_operations("test_rmin", new_state, operations=operations)
self.assertColumnNotExists("test_rmin_pony", "pink")
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
self.unapply_operations("test_rmin", project_state, operations=operations)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
def test_rename_index(self):
app_label = "test_rnin"
project_state = self.set_up_test_model(app_label, index=True)
table_name = app_label + "_pony"
self.assertIndexNameExists(table_name, "pony_pink_idx")
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_name="pony_pink_idx"
)
self.assertEqual(
operation.describe(),
"Rename index pony_pink_idx on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.formatted_description(),
"~ Rename index pony_pink_idx on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.migration_name_fragment,
"rename_pony_pink_idx_new_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
expected_queries = 1 if connection.features.can_rename_index else 2
with (
connection.schema_editor() as editor,
self.assertNumQueries(expected_queries),
):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, "pony_pink_idx")
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reversal.
with (
connection.schema_editor() as editor,
self.assertNumQueries(expected_queries),
):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "pony_pink_idx")
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameIndex")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"old_name": "pony_pink_idx",
"new_name": "new_pony_test_idx",
},
)
def test_rename_index_arguments(self):
msg = "RenameIndex.old_name and old_fields are mutually exclusive."
with self.assertRaisesMessage(ValueError, msg):
migrations.RenameIndex(
"Pony",
new_name="new_idx_name",
old_name="old_idx_name",
old_fields=("weight", "pink"),
)
msg = "RenameIndex requires one of old_name and old_fields arguments to be set."
with self.assertRaisesMessage(ValueError, msg):
migrations.RenameIndex("Pony", new_name="new_idx_name")
def test_rename_index_unnamed_index(self):
app_label = "test_rninui"
operations = [
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={
"index_together": [("weight", "pink")],
},
),
]
project_state = self.apply_operations(app_label, ProjectState(), operations)
table_name = app_label + "_pony"
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=("weight", "pink")
)
self.assertEqual(
operation.describe(),
"Rename unnamed index for ('weight', 'pink') on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.migration_name_fragment,
"rename_pony_weight_pink_new_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reverse is a no-op.
with connection.schema_editor() as editor, self.assertNumQueries(0):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reapply, RenameIndex operation is a noop when the old and new name
# match.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameIndex")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"new_name": "new_pony_test_idx",
"old_fields": ("weight", "pink"),
},
)
def test_rename_index_unknown_unnamed_index(self):
app_label = "test_rninuui"
project_state = self.set_up_test_model(app_label)
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=("weight", "pink")
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
msg = "Found wrong number (0) of indexes for test_rninuui_pony(weight, pink)."
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
operation.database_forwards(app_label, editor, project_state, new_state)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_rename_index_unnamed_index_with_unique_index(self):
app_label = "test_rninuniwui"
project_state = self.set_up_test_model(
app_label,
multicol_index=True,
unique_together=True,
)
table_name = app_label + "_pony"
self.assertIndexNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=["pink", "weight"]
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
def test_add_index_state_forwards(self):
project_state = self.set_up_test_model("test_adinsf")
index = models.Index(fields=["pink"], name="test_adinsf_pony_pink_idx")
old_model = project_state.apps.get_model("test_adinsf", "Pony")
new_state = project_state.clone()
operation = migrations.AddIndex("Pony", index)
operation.state_forwards("test_adinsf", new_state)
new_model = new_state.apps.get_model("test_adinsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_remove_index_state_forwards(self):
project_state = self.set_up_test_model("test_rminsf")
index = models.Index(fields=["pink"], name="test_rminsf_pony_pink_idx")
migrations.AddIndex("Pony", index).state_forwards("test_rminsf", project_state)
old_model = project_state.apps.get_model("test_rminsf", "Pony")
new_state = project_state.clone()
operation = migrations.RemoveIndex("Pony", "test_rminsf_pony_pink_idx")
operation.state_forwards("test_rminsf", new_state)
new_model = new_state.apps.get_model("test_rminsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_rename_index_state_forwards(self):
app_label = "test_rnidsf"
project_state = self.set_up_test_model(app_label, index=True)
old_model = project_state.apps.get_model(app_label, "Pony")
new_state = project_state.clone()
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_pink_idx", old_name="pony_pink_idx"
)
operation.state_forwards(app_label, new_state)
new_model = new_state.apps.get_model(app_label, "Pony")
self.assertIsNot(old_model, new_model)
self.assertEqual(new_model._meta.indexes[0].name, "new_pony_pink_idx")
def test_rename_index_state_forwards_unnamed_index(self):
app_label = "test_rnidsfui"
operations = [
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={
"index_together": [("weight", "pink")],
},
),
]
project_state = self.apply_operations(app_label, ProjectState(), operations)
old_model = project_state.apps.get_model(app_label, "Pony")
new_state = project_state.clone()
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_pink_idx", old_fields=("weight", "pink")
)
operation.state_forwards(app_label, new_state)
new_model = new_state.apps.get_model(app_label, "Pony")
self.assertIsNot(old_model, new_model)
self.assertEqual(new_model._meta.indexes[0].name, "new_pony_pink_idx")
@skipUnlessDBFeature("supports_expression_indexes")
def test_add_func_index(self):
app_label = "test_addfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
index = models.Index(Abs("weight"), name=index_name)
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_addfuncin_pony_abs_idx on Abs(F(weight)) on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_addfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 1)
self.assertIndexNameNotExists(table_name, index_name)
# Add index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
@skipUnlessDBFeature("supports_expression_indexes")
def test_remove_func_index(self):
app_label = "test_rmfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[
models.Index(Abs("weight"), name=index_name),
],
)
self.assertTableExists(table_name)
self.assertIndexNameExists(table_name, index_name)
operation = migrations.RemoveIndex("Pony", index_name)
self.assertEqual(
operation.describe(),
"Remove index test_rmfuncin_pony_abs_idx from Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 0)
# Remove index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": index_name})
@skipUnlessDBFeature("supports_expression_indexes")
def test_alter_field_with_func_index(self):
app_label = "test_alfuncin"
index_name = f"{app_label}_pony_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[models.Index(Abs("pink"), name=index_name)],
)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
def test_alter_field_with_index(self):
"""
Test AlterField operation with an index to ensure indexes created via
Meta.indexes don't get dropped with sqlite3 remake.
"""
project_state = self.set_up_test_model("test_alflin", index=True)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflin", new_state)
# Test the database alteration
self.assertColumnNotNull("test_alflin_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alflin", editor, project_state, new_state)
# Index hasn't been dropped
self.assertIndexExists("test_alflin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflin", editor, new_state, project_state
)
# Ensure the index is still there
self.assertIndexExists("test_alflin_pony", ["pink"])
def test_alter_index_together(self):
"""
Tests the AlterIndexTogether operation.
"""
project_state = self.set_up_test_model("test_alinto")
# Test the state alteration
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter index_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_index_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alinto", new_state)
self.assertEqual(
len(
project_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
1,
)
# Make sure there's no matching index
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alinto", editor, project_state, new_state)
self.assertIndexExists("test_alinto_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alinto", editor, new_state, project_state
)
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterIndexTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "index_together": {("pink", "weight")}}
)
def test_alter_index_together_remove(self):
operation = migrations.AlterIndexTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter index_together for Pony (0 constraint(s))"
)
self.assertEqual(
operation.formatted_description(),
"~ Alter index_together for Pony (0 constraint(s))",
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_alter_index_together_remove_with_unique_together(self):
app_label = "test_alintoremove_wunto"
table_name = "%s_pony" % app_label
project_state = self.set_up_test_model(app_label, unique_together=True)
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
# Add index together.
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ["pink", "weight"])
# Remove index together.
project_state = new_state
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNotExists(table_name, ["pink", "weight"])
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
def test_add_constraint(self):
project_state = self.set_up_test_model("test_addconstraint")
gt_check = models.Q(pink__gt=2)
gt_constraint = models.CheckConstraint(
condition=gt_check, name="test_add_constraint_pony_pink_gt_2"
)
gt_operation = migrations.AddConstraint("Pony", gt_constraint)
self.assertEqual(
gt_operation.describe(),
"Create constraint test_add_constraint_pony_pink_gt_2 on model Pony",
)
self.assertEqual(
gt_operation.formatted_description(),
"+ Create constraint test_add_constraint_pony_pink_gt_2 on model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"pony_test_add_constraint_pony_pink_gt_2",
)
# Test the state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
1,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with (
CaptureQueriesContext(connection) as ctx,
connection.schema_editor() as editor,
):
gt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
if connection.features.supports_table_check_constraints:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
else:
self.assertIs(
any("CHECK" in query["sql"] for query in ctx.captured_queries), False
)
Pony.objects.create(pink=1, weight=1.0)
# Add another one.
lt_check = models.Q(pink__lt=100)
lt_constraint = models.CheckConstraint(
condition=lt_check, name="test_add_constraint_pony_pink_lt_100"
)
lt_operation = migrations.AddConstraint("Pony", lt_constraint)
lt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
2,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 2)
with (
CaptureQueriesContext(connection) as ctx,
connection.schema_editor() as editor,
):
lt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
if connection.features.supports_table_check_constraints:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
else:
self.assertIs(
any("CHECK" in query["sql"] for query in ctx.captured_queries), False
)
Pony.objects.create(pink=100, weight=1.0)
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_addconstraint", editor, new_state, project_state
)
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"model_name": "Pony", "constraint": gt_constraint}
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_constraint_percent_escaping(self):
app_label = "add_constraint_string_quoting"
from_state = ProjectState()
checks = [
# "%" generated in startswith lookup should be escaped in a way
# that is considered a leading wildcard.
(
models.Q(name__startswith="Albert"),
{"name": "Alberta"},
{"name": "Artur"},
),
# Literal "%" should be escaped in a way that is not a considered a
# wildcard.
(models.Q(rebate__endswith="%"), {"rebate": "10%"}, {"rebate": "10%$"}),
# Right-hand-side baked "%" literals should not be used for
# parameters interpolation.
(
~models.Q(surname__startswith=models.F("name")),
{"name": "Albert"},
{"name": "Albert", "surname": "Alberto"},
),
# Exact matches against "%" literals should also be supported.
(
models.Q(name="%"),
{"name": "%"},
{"name": "Albert"},
),
]
for check, valid, invalid in checks:
with self.subTest(condition=check, valid=valid, invalid=invalid):
constraint = models.CheckConstraint(condition=check, name="constraint")
operation = migrations.CreateModel(
"Author",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("surname", models.CharField(max_length=100, db_default="")),
("rebate", models.CharField(max_length=100)),
],
options={"constraints": [constraint]},
)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
try:
with transaction.atomic():
Author.objects.create(**valid).delete()
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(**invalid)
finally:
with connection.schema_editor() as editor:
migrations.DeleteModel("Author").database_forwards(
app_label, editor, to_state, from_state
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_percent_escaping(self):
app_label = "add_constraint_string_quoting"
operations = [
migrations.CreateModel(
"Author",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("surname", models.CharField(max_length=100, default="")),
("rebate", models.CharField(max_length=100)),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
checks = [
# "%" generated in startswith lookup should be escaped in a way
# that is considered a leading wildcard.
(
models.Q(name__startswith="Albert"),
{"name": "Alberta"},
{"name": "Artur"},
),
# Literal "%" should be escaped in a way that is not a considered a
# wildcard.
(models.Q(rebate__endswith="%"), {"rebate": "10%"}, {"rebate": "10%$"}),
# Right-hand-side baked "%" literals should not be used for
# parameters interpolation.
(
~models.Q(surname__startswith=models.F("name")),
{"name": "Albert"},
{"name": "Albert", "surname": "Alberto"},
),
# Exact matches against "%" literals should also be supported.
(
models.Q(name="%"),
{"name": "%"},
{"name": "Albert"},
),
]
for check, valid, invalid in checks:
with self.subTest(condition=check, valid=valid, invalid=invalid):
constraint = models.CheckConstraint(condition=check, name="constraint")
operation = migrations.AddConstraint("Author", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
try:
with transaction.atomic():
Author.objects.create(**valid).delete()
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(**invalid)
finally:
with connection.schema_editor() as editor:
operation.database_backwards(
app_label, editor, from_state, to_state
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_or_constraint(self):
app_label = "test_addorconstraint"
constraint_name = "add_constraint_or"
from_state = self.set_up_test_model(app_label)
check = models.Q(pink__gt=2, weight__gt=2) | models.Q(weight__lt=0)
constraint = models.CheckConstraint(condition=check, name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Pony = to_state.apps.get_model(app_label, "Pony")
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=2, weight=3.0)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=3, weight=1.0)
Pony.objects.bulk_create(
[
Pony(pink=3, weight=-1.0),
Pony(pink=1, weight=-1.0),
Pony(pink=3, weight=3.0),
]
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_combinable(self):
app_label = "test_addconstraint_combinable"
operations = [
migrations.CreateModel(
"Book",
fields=[
("id", models.AutoField(primary_key=True)),
("read", models.PositiveIntegerField()),
("unread", models.PositiveIntegerField()),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
constraint = models.CheckConstraint(
condition=models.Q(read=(100 - models.F("unread"))),
name="test_addconstraint_combinable_sum_100",
)
operation = migrations.AddConstraint("Book", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Book = to_state.apps.get_model(app_label, "Book")
with self.assertRaises(IntegrityError), transaction.atomic():
Book.objects.create(read=70, unread=10)
Book.objects.create(read=70, unread=30)
def test_remove_constraint(self):
project_state = self.set_up_test_model(
"test_removeconstraint",
constraints=[
models.CheckConstraint(
condition=models.Q(pink__gt=2),
name="test_remove_constraint_pony_pink_gt_2",
),
models.CheckConstraint(
condition=models.Q(pink__lt=100),
name="test_remove_constraint_pony_pink_lt_100",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_gt_2"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony",
)
self.assertEqual(
gt_operation.formatted_description(),
"- Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"remove_pony_test_remove_constraint_pony_pink_gt_2",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
1,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=1, weight=1.0).delete()
if connection.features.supports_table_check_constraints:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
else:
Pony.objects.create(pink=100, weight=1.0)
# Remove the other one.
lt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_lt_100"
)
lt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
0,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=100, weight=1.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removeconstraint", editor, new_state, project_state
)
if connection.features.supports_table_check_constraints:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
else:
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "name": "test_remove_constraint_pony_pink_gt_2"},
)
def test_alter_constraint(self):
constraint = models.UniqueConstraint(
fields=["pink"], name="test_alter_constraint_pony_fields_uq"
)
project_state = self.set_up_test_model(
"test_alterconstraint", constraints=[constraint]
)
new_state = project_state.clone()
violation_error_message = "Pink isn't unique"
uq_constraint = models.UniqueConstraint(
fields=["pink"],
name="test_alter_constraint_pony_fields_uq",
violation_error_message=violation_error_message,
)
uq_operation = migrations.AlterConstraint(
"Pony", "test_alter_constraint_pony_fields_uq", uq_constraint
)
self.assertEqual(
uq_operation.describe(),
"Alter constraint test_alter_constraint_pony_fields_uq on Pony",
)
self.assertEqual(
uq_operation.formatted_description(),
"~ Alter constraint test_alter_constraint_pony_fields_uq on Pony",
)
self.assertEqual(
uq_operation.migration_name_fragment,
"alter_pony_test_alter_constraint_pony_fields_uq",
)
uq_operation.state_forwards("test_alterconstraint", new_state)
self.assertEqual(
project_state.models["test_alterconstraint", "pony"]
.options["constraints"][0]
.violation_error_message,
"Constraint “%(name)s” is violated.",
)
self.assertEqual(
new_state.models["test_alterconstraint", "pony"]
.options["constraints"][0]
.violation_error_message,
violation_error_message,
)
with connection.schema_editor() as editor, self.assertNumQueries(0):
uq_operation.database_forwards(
"test_alterconstraint", editor, project_state, new_state
)
self.assertConstraintExists(
"test_alterconstraint_pony",
"test_alter_constraint_pony_fields_uq",
value=False,
)
with connection.schema_editor() as editor, self.assertNumQueries(0):
uq_operation.database_backwards(
"test_alterconstraint", editor, project_state, new_state
)
self.assertConstraintExists(
"test_alterconstraint_pony",
"test_alter_constraint_pony_fields_uq",
value=False,
)
definition = uq_operation.deconstruct()
self.assertEqual(definition[0], "AlterConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "test_alter_constraint_pony_fields_uq",
"constraint": uq_constraint,
},
)
def test_add_partial_unique_constraint(self):
project_state = self.set_up_test_model("test_addpartialuniqueconstraint")
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.AddConstraint("Pony", partial_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_constraint_pony_pink_for_weight_gt_5_uniq "
"on model Pony",
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_addpartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_addpartialuniqueconstraint", "pony"].options[
"constraints"
]
),
1,
)
Pony = new_state.apps.get_model("test_addpartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_addpartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint works
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_addpartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": partial_unique_constraint},
)
def test_remove_partial_unique_constraint(self):
project_state = self.set_up_test_model(
"test_removepartialuniqueconstraint",
constraints=[
models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_constraint_pony_pink_for_weight_gt_5_uniq"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_constraint_pony_pink_for_weight_gt_5_uniq from "
"model Pony",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removepartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removepartialuniqueconstraint", "pony"].options[
"constraints"
]
),
0,
)
Pony = new_state.apps.get_model("test_removepartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removepartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
Pony.objects.create(pink=1, weight=7.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removepartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint works
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "test_constraint_pony_pink_for_weight_gt_5_uniq",
},
)
def test_add_deferred_unique_constraint(self):
app_label = "test_adddeferred_uc"
project_state = self.set_up_test_model(app_label)
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_add",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.AddConstraint("Pony", deferred_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint deferred_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with (
connection.schema_editor() as editor,
CaptureQueriesContext(connection) as ctx,
):
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": deferred_unique_constraint},
)
def test_remove_deferred_unique_constraint(self):
app_label = "test_removedeferred_uc"
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_rm",
deferrable=models.Deferrable.DEFERRED,
)
project_state = self.set_up_test_model(
app_label, constraints=[deferred_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", deferred_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint deferred_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with (
connection.schema_editor() as editor,
CaptureQueriesContext(connection) as ctx,
):
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_deferrable_unique_constraints:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "deferred_pink_constraint_rm",
},
)
def test_add_covering_unique_constraint(self):
app_label = "test_addcovering_uc"
project_state = self.set_up_test_model(app_label)
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_add",
include=["weight"],
)
operation = migrations.AddConstraint("Pony", covering_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint covering_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with (
connection.schema_editor() as editor,
CaptureQueriesContext(connection) as ctx,
):
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": covering_unique_constraint},
)
def test_remove_covering_unique_constraint(self):
app_label = "test_removecovering_uc"
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_rm",
include=["weight"],
)
project_state = self.set_up_test_model(
app_label, constraints=[covering_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", covering_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint covering_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with (
connection.schema_editor() as editor,
CaptureQueriesContext(connection) as ctx,
):
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_covering_indexes:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "covering_pink_constraint_rm",
},
)
def test_alter_field_with_func_unique_constraint(self):
app_label = "test_alfuncuc"
constraint_name = f"{app_label}_pony_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint("pink", "weight", name=constraint_name)
],
)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
def test_add_func_unique_constraint(self):
app_label = "test_adfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
constraint = models.UniqueConstraint(Abs("weight"), name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_adfuncuc_pony_abs_uq on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
self.assertIndexNameNotExists(table_name, constraint_name)
# Add constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = new_state.apps.get_model(app_label, "Pony")
Pony.objects.create(weight=4.0)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": constraint},
)
def test_remove_func_unique_constraint(self):
app_label = "test_rmfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint(Abs("weight"), name=constraint_name),
],
)
self.assertTableExists(table_name)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
operation = migrations.RemoveConstraint("Pony", constraint_name)
self.assertEqual(
operation.describe(),
"Remove constraint test_rmfuncuc_pony_abs_uq from model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Remove constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=-4.0).delete()
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": constraint_name})
def test_alter_model_options(self):
"""
Tests the AlterModelOptions operation.
"""
project_state = self.set_up_test_model("test_almoop")
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions(
"Pony", {"permissions": [("can_groom", "Can groom")]}
)
self.assertEqual(operation.describe(), "Change Meta options on Pony")
self.assertEqual(
operation.formatted_description(), "~ Change Meta options on Pony"
)
self.assertEqual(operation.migration_name_fragment, "alter_pony_options")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
0,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
1,
)
self.assertEqual(
new_state.models["test_almoop", "pony"].options["permissions"][0][0],
"can_groom",
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"name": "Pony", "options": {"permissions": [("can_groom", "Can groom")]}},
)
def test_alter_model_options_emptying(self):
"""
The AlterModelOptions operation removes keys from the dict (#23121)
"""
project_state = self.set_up_test_model("test_almoop", options=True)
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
1,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
0,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "options": {}})
def test_alter_order_with_respect_to(self):
"""
Tests the AlterOrderWithRespectTo operation.
"""
project_state = self.set_up_test_model("test_alorwrtto", related_model=True)
# Test the state alteration
operation = migrations.AlterOrderWithRespectTo("Rider", "pony")
self.assertEqual(
operation.describe(), "Set order_with_respect_to on Rider to pony"
)
self.assertEqual(
operation.formatted_description(),
"~ Set order_with_respect_to on Rider to pony",
)
self.assertEqual(
operation.migration_name_fragment,
"alter_rider_order_with_respect_to",
)
new_state = project_state.clone()
operation.state_forwards("test_alorwrtto", new_state)
self.assertIsNone(
project_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
)
)
self.assertEqual(
new_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
),
"pony",
)
# Make sure there's no matching index
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# Create some rows before alteration
rendered_state = project_state.apps
pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(
weight=50
)
rider1 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider1.friend = rider1
rider1.save()
rider2 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider2.friend = rider2
rider2.save()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alorwrtto", editor, project_state, new_state
)
self.assertColumnExists("test_alorwrtto_rider", "_order")
# Check for correct value in rows
updated_riders = new_state.apps.get_model(
"test_alorwrtto", "Rider"
).objects.all()
self.assertEqual(updated_riders[0]._order, 0)
self.assertEqual(updated_riders[1]._order, 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alorwrtto", editor, new_state, project_state
)
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterOrderWithRespectTo")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Rider", "order_with_respect_to": "pony"}
)
def test_alter_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almoma")
# Test the state alteration
operation = migrations.AlterModelManagers(
"Pony",
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Change managers on Pony")
self.assertEqual(operation.formatted_description(), "~ Change managers on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_managers")
managers = project_state.models["test_almoma", "pony"].managers
self.assertEqual(managers, [])
new_state = project_state.clone()
operation.state_forwards("test_almoma", new_state)
self.assertIn(("test_almoma", "pony"), new_state.models)
managers = new_state.models["test_almoma", "pony"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
rendered_state = new_state.apps
model = rendered_state.get_model("test_almoma", "pony")
self.assertIsInstance(model.food_qs, models.Manager)
self.assertIsInstance(model.food_mgr, FoodManager)
self.assertIsInstance(model.food_mgr_kwargs, FoodManager)
def test_alter_model_managers_emptying(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almomae", manager_model=True)
# Test the state alteration
operation = migrations.AlterModelManagers("Food", managers=[])
self.assertEqual(operation.describe(), "Change managers on Food")
self.assertIn(("test_almomae", "food"), project_state.models)
managers = project_state.models["test_almomae", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
new_state = project_state.clone()
operation.state_forwards("test_almomae", new_state)
managers = new_state.models["test_almomae", "food"].managers
self.assertEqual(managers, [])
def test_alter_fk(self):
"""
Creating and then altering an FK works correctly
and deals with the pending SQL (#23091)
"""
project_state = self.set_up_test_model("test_alfk")
# Test adding and then altering the FK in one go
create_operation = migrations.CreateModel(
name="Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
create_state = project_state.clone()
create_operation.state_forwards("test_alfk", create_state)
alter_operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.ForeignKey("Pony", models.CASCADE, editable=False),
)
alter_state = create_state.clone()
alter_operation.state_forwards("test_alfk", alter_state)
with connection.schema_editor() as editor:
create_operation.database_forwards(
"test_alfk", editor, project_state, create_state
)
alter_operation.database_forwards(
"test_alfk", editor, create_state, alter_state
)
def test_alter_fk_non_fk(self):
"""
Altering an FK to a non-FK works (#23244)
"""
# Test the state alteration
operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.FloatField(),
)
project_state, new_state = self.make_test_state(
"test_afknfk", operation, related_model=True
)
# Test the database alteration
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_afknfk", editor, project_state, new_state)
self.assertColumnExists("test_afknfk_rider", "pony")
self.assertColumnNotExists("test_afknfk_rider", "pony_id")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_afknfk", editor, new_state, project_state
)
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
def test_run_sql(self):
"""
Tests the RunSQL operation.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
# Use a multi-line string with a comment to test splitting on
# SQLite and MySQL respectively.
"CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n"
"INSERT INTO i_love_ponies (id, special_thing) "
"VALUES (1, 'i love ponies'); -- this is magic!\n"
"INSERT INTO i_love_ponies (id, special_thing) "
"VALUES (2, 'i love django');\n"
"UPDATE i_love_ponies SET special_thing = 'Ponies' "
"WHERE special_thing LIKE '%%ponies';"
"UPDATE i_love_ponies SET special_thing = 'Django' "
"WHERE special_thing LIKE '%django';",
# Run delete queries to test for parameter substitution failure
# reported in #23426
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';"
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';"
"DROP TABLE i_love_ponies",
state_operations=[
migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
],
)
self.assertEqual(operation.describe(), "Raw SQL operation")
self.assertEqual(operation.formatted_description(), "s Raw SQL operation")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_runsql", new_state)
self.assertEqual(
len(new_state.models["test_runsql", "somethingelse"].fields), 1
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test SQL collection
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql))
operation.database_backwards(
"test_runsql", editor, project_state, new_state
)
self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql))
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 2)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunSQL")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["reverse_sql", "sql", "state_operations"]
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunSQL("SELECT 1 FROM void;", elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_sql_params(self):
"""
#23426 - RunSQL should accept parameters.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"],
["DROP TABLE i_love_ponies"],
)
param_operation = migrations.RunSQL(
# forwards
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');",
[
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);",
["Ponies"],
],
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);",
(
3,
"Python",
),
),
),
# backwards
[
"DELETE FROM i_love_ponies WHERE special_thing = 'Django';",
["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None],
(
"DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;",
[3, "Python"],
),
],
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
new_state = project_state.clone()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
# Test parameter passing
with connection.schema_editor() as editor:
param_operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 3)
with connection.schema_editor() as editor:
param_operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
def test_run_sql_params_invalid(self):
"""
#23426 - RunSQL should fail when a list of statements with an incorrect
number of tuples is given.
"""
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
operation = migrations.RunSQL(
# forwards
[["INSERT INTO foo (bar) VALUES ('buz');"]],
# backwards
(("DELETE FROM foo WHERE bar = 'buz';", "invalid", "parameter count"),),
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 1"):
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 3"):
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
def test_run_sql_noop(self):
"""
#24098 - Tests no-op RunSQL operations.
"""
operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, None, None)
operation.database_backwards("test_runsql", editor, None, None)
def test_run_sql_add_missing_semicolon_on_collect_sql(self):
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
tests = [
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1);\n",
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1)\n",
]
for sql in tests:
with self.subTest(sql=sql):
operation = migrations.RunSQL(sql, migrations.RunPython.noop)
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
collected_sql = "\n".join(editor.collected_sql)
self.assertEqual(collected_sql.count(";"), 1)
def test_run_sql_backward_reverse_sql_required(self):
operation = migrations.RunSQL(sql=migrations.RunSQL.noop)
msg = "You cannot reverse this operation"
with (
connection.schema_editor() as editor,
self.assertRaisesMessage(NotImplementedError, msg),
):
operation.database_backwards("test_runsql", editor, None, None)
def test_run_python(self):
"""
Tests the RunPython operation
"""
project_state = self.set_up_test_model("test_runpython", mti_model=True)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
def inner_method_reverse(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.filter(pink=1, weight=3.55).delete()
Pony.objects.filter(weight=5).delete()
operation = migrations.RunPython(
inner_method, reverse_code=inner_method_reverse
)
self.assertEqual(operation.describe(), "Raw Python operation")
self.assertEqual(operation.formatted_description(), "p Raw Python operation")
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards("test_runpython", new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
# Now test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
# Now test we can't use a string
with self.assertRaisesMessage(
ValueError, "RunPython must be supplied with a callable"
):
migrations.RunPython("print 'ahahaha'")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code", "reverse_code"])
# Also test reversal fails, with an operation identical to above but
# without reverse_code set.
no_reverse_operation = migrations.RunPython(inner_method)
self.assertFalse(no_reverse_operation.reversible)
with connection.schema_editor() as editor:
no_reverse_operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
with self.assertRaises(NotImplementedError):
no_reverse_operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
def create_ponies(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
pony1 = Pony.objects.create(pink=1, weight=3.55)
self.assertIsNot(pony1.pk, None)
pony2 = Pony.objects.create(weight=5)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_ponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code"])
def create_shetlandponies(models, schema_editor):
ShetlandPony = models.get_model("test_runpython", "ShetlandPony")
pony1 = ShetlandPony.objects.create(weight=4.0)
self.assertIsNot(pony1.pk, None)
pony2 = ShetlandPony.objects.create(weight=5.0)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_shetlandponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6
)
self.assertEqual(
project_state.apps.get_model(
"test_runpython", "ShetlandPony"
).objects.count(),
2,
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunPython(inner_method, elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_python_invalid_reverse_code(self):
msg = "RunPython must be supplied with callable arguments"
with self.assertRaisesMessage(ValueError, msg):
migrations.RunPython(code=migrations.RunPython.noop, reverse_code="invalid")
def test_run_python_atomic(self):
"""
Tests the RunPython operation correctly handles the "atomic" keyword
"""
project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True)
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpythonatomic", "Pony")
Pony.objects.create(pink=1, weight=3.55)
raise ValueError("Adrian hates ponies.")
# Verify atomicity when applying.
atomic_migration = Migration("test", "test_runpythonatomic")
atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method)
]
non_atomic_migration = Migration("test", "test_runpythonatomic")
non_atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False)
]
# If we're a fully-transactional database, both versions should
# rollback
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation should leave a row there
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Reset object count to zero and verify atomicity when unapplying.
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.all().delete()
# On a fully-transactional database, both versions rollback.
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation leaves a row there.
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Verify deconstruction.
definition = non_atomic_migration.operations[0].deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["atomic", "code", "reverse_code"])
def test_run_python_related_assignment(self):
"""
#24282 - Model changes to a FK reverse side update the model
on the FK side as well.
"""
def inner_method(models, schema_editor):
Author = models.get_model("test_authors", "Author")
Book = models.get_model("test_books", "Book")
author = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author)
create_author = migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey("test_authors.Author", models.CASCADE)),
],
options={},
)
add_hometown = migrations.AddField(
"Author",
"hometown",
models.CharField(max_length=100),
)
create_old_man = migrations.RunPython(inner_method, inner_method)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_authors", new_state)
create_author.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_books", new_state)
create_book.database_forwards(
"test_books", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
add_hometown.state_forwards("test_authors", new_state)
add_hometown.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_old_man.state_forwards("test_books", new_state)
create_old_man.database_forwards(
"test_books", editor, project_state, new_state
)
def test_model_with_bigautofield(self):
"""
A model with BigAutoField can be created.
"""
def create_data(models, schema_editor):
Author = models.get_model("test_author", "Author")
Book = models.get_model("test_book", "Book")
author1 = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author1)
Book.objects.create(id=2**33, title="A farewell to arms", author=author1)
author2 = Author.objects.create(id=2**33, name="Remarque")
Book.objects.create(title="All quiet on the western front", author=author2)
Book.objects.create(title="Arc de Triomphe", author=author2)
create_author = migrations.CreateModel(
"Author",
[
("id", models.BigAutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.BigAutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
(
"author",
models.ForeignKey(
to="test_author.Author", on_delete=models.CASCADE
),
),
],
options={},
)
fill_data = migrations.RunPython(create_data)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_author", new_state)
create_author.database_forwards(
"test_author", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_book", new_state)
create_book.database_forwards("test_book", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_data.state_forwards("fill_data", new_state)
fill_data.database_forwards("fill_data", editor, project_state, new_state)
def _test_autofield_foreignfield_growth(
self, source_field, target_field, target_value
):
"""
A field may be migrated in the following ways:
- AutoField to BigAutoField
- SmallAutoField to AutoField
- SmallAutoField to BigAutoField
"""
def create_initial_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog = Blog.objects.create(name="web development done right")
Article.objects.create(name="Frameworks", blog=blog)
Article.objects.create(name="Programming Languages", blog=blog)
def create_big_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog2 = Blog.objects.create(name="Frameworks", id=target_value)
Article.objects.create(name="Django", blog=blog2)
Article.objects.create(id=target_value, name="Django2", blog=blog2)
create_blog = migrations.CreateModel(
"Blog",
[
("id", source_field(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_article = migrations.CreateModel(
"Article",
[
("id", source_field(primary_key=True)),
(
"blog",
models.ForeignKey(to="test_blog.Blog", on_delete=models.CASCADE),
),
("name", models.CharField(max_length=100)),
("data", models.TextField(default="")),
],
options={},
)
fill_initial_data = migrations.RunPython(
create_initial_data, create_initial_data
)
fill_big_data = migrations.RunPython(create_big_data, create_big_data)
grow_article_id = migrations.AlterField(
"Article", "id", target_field(primary_key=True)
)
grow_blog_id = migrations.AlterField(
"Blog", "id", target_field(primary_key=True)
)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_blog.state_forwards("test_blog", new_state)
create_blog.database_forwards("test_blog", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_article.state_forwards("test_article", new_state)
create_article.database_forwards(
"test_article", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_initial_data.state_forwards("fill_initial_data", new_state)
fill_initial_data.database_forwards(
"fill_initial_data", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_article_id.state_forwards("test_article", new_state)
grow_article_id.database_forwards(
"test_article", editor, project_state, new_state
)
state = new_state.clone()
article = state.apps.get_model("test_article.Article")
self.assertIsInstance(article._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_blog_id.state_forwards("test_blog", new_state)
grow_blog_id.database_forwards(
"test_blog", editor, project_state, new_state
)
state = new_state.clone()
blog = state.apps.get_model("test_blog.Blog")
self.assertIsInstance(blog._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_big_data.state_forwards("fill_big_data", new_state)
fill_big_data.database_forwards(
"fill_big_data", editor, project_state, new_state
)
def test_autofield__bigautofield_foreignfield_growth(self):
"""A field may be migrated from AutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.AutoField,
models.BigAutoField,
2**33,
)
def test_smallfield_autofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to AutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.AutoField,
2**22,
)
def test_smallfield_bigautofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.BigAutoField,
2**33,
)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
"""
project_state = ProjectState()
new_state = project_state.clone()
operation = migrations.RunPython(
migrations.RunPython.noop, migrations.RunPython.noop
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
def test_separate_database_and_state(self):
"""
Tests the SeparateDatabaseAndState operation.
"""
project_state = self.set_up_test_model("test_separatedatabaseandstate")
# Create the operation
database_operation = migrations.RunSQL(
"CREATE TABLE i_love_ponies (id int, special_thing int);",
"DROP TABLE i_love_ponies;",
)
state_operation = migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
operation = migrations.SeparateDatabaseAndState(
state_operations=[state_operation], database_operations=[database_operation]
)
self.assertEqual(
operation.describe(), "Custom state/database change combination"
)
self.assertEqual(
operation.formatted_description(),
"? Custom state/database change combination",
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_separatedatabaseandstate", new_state)
self.assertEqual(
len(
new_state.models[
"test_separatedatabaseandstate", "somethingelse"
].fields
),
1,
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_separatedatabaseandstate", editor, project_state, new_state
)
self.assertTableExists("i_love_ponies")
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_separatedatabaseandstate", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "SeparateDatabaseAndState")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["database_operations", "state_operations"]
)
def test_separate_database_and_state2(self):
"""
A complex SeparateDatabaseAndState operation: Multiple operations both
for state and database. Verify the state dependencies within each list
and that state ops don't affect the database.
"""
app_label = "test_separatedatabaseandstate2"
project_state = self.set_up_test_model(app_label)
# Create the operation
database_operations = [
migrations.CreateModel(
"ILovePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveponies"},
),
migrations.CreateModel(
"ILoveMorePonies",
# We use IntegerField and not AutoField because
# the model is going to be deleted immediately
# and with an AutoField this fails on Oracle
[("id", models.IntegerField(primary_key=True))],
options={"db_table": "ilovemoreponies"},
),
migrations.DeleteModel("ILoveMorePonies"),
migrations.CreateModel(
"ILoveEvenMorePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveevenmoreponies"},
),
]
state_operations = [
migrations.CreateModel(
"SomethingElse",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingelse"},
),
migrations.DeleteModel("SomethingElse"),
migrations.CreateModel(
"SomethingCompletelyDifferent",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingcompletelydifferent"},
),
]
operation = migrations.SeparateDatabaseAndState(
state_operations=state_operations,
database_operations=database_operations,
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
def assertModelsAndTables(after_db):
# Tables and models exist, or don't, as they should:
self.assertNotIn((app_label, "somethingelse"), new_state.models)
self.assertEqual(
len(new_state.models[app_label, "somethingcompletelydifferent"].fields),
1,
)
self.assertNotIn((app_label, "iloveponiesonies"), new_state.models)
self.assertNotIn((app_label, "ilovemoreponies"), new_state.models)
self.assertNotIn((app_label, "iloveevenmoreponies"), new_state.models)
self.assertTableNotExists("somethingelse")
self.assertTableNotExists("somethingcompletelydifferent")
self.assertTableNotExists("ilovemoreponies")
if after_db:
self.assertTableExists("iloveponies")
self.assertTableExists("iloveevenmoreponies")
else:
self.assertTableNotExists("iloveponies")
self.assertTableNotExists("iloveevenmoreponies")
assertModelsAndTables(after_db=False)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertModelsAndTables(after_db=True)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertModelsAndTables(after_db=False)
def _test_invalid_generated_field_changes(self, db_persist):
regular = models.IntegerField(default=1)
generated_1 = models.GeneratedField(
expression=F("pink") + F("pink"),
output_field=models.IntegerField(),
db_persist=db_persist,
)
generated_2 = models.GeneratedField(
expression=F("pink") + F("pink") + F("pink"),
output_field=models.IntegerField(),
db_persist=db_persist,
)
tests = [
("test_igfc_1", regular, generated_1),
("test_igfc_2", generated_1, regular),
("test_igfc_3", generated_1, generated_2),
]
for app_label, add_field, alter_field in tests:
project_state = self.set_up_test_model(app_label)
operations = [
migrations.AddField("Pony", "modified_pink", add_field),
migrations.AlterField("Pony", "modified_pink", alter_field),
]
msg = (
"Modifying GeneratedFields is not supported - the field "
f"{app_label}.Pony.modified_pink must be removed and re-added with the "
"new definition."
)
with self.assertRaisesMessage(ValueError, msg):
self.apply_operations(app_label, project_state, operations)
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_invalid_generated_field_changes_stored(self):
self._test_invalid_generated_field_changes(db_persist=True)
@skipUnlessDBFeature("supports_virtual_generated_columns")
def test_invalid_generated_field_changes_virtual(self):
self._test_invalid_generated_field_changes(db_persist=False)
def _test_invalid_generated_field_changes_on_rename(self, db_persist):
app_label = "test_igfcor"
operation = migrations.AddField(
"Pony",
"modified_pink",
models.GeneratedField(
expression=F("pink") + F("pink"),
output_field=models.IntegerField(),
db_persist=db_persist,
),
)
project_state, new_state = self.make_test_state(app_label, operation)
# Add generated column.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
# Rename field used in the generated field.
operations = [
migrations.RenameField("Pony", "pink", "renamed_pink"),
migrations.AlterField(
"Pony",
"modified_pink",
models.GeneratedField(
expression=F("renamed_pink"),
output_field=models.IntegerField(),
db_persist=db_persist,
),
),
]
msg = (
"Modifying GeneratedFields is not supported - the field "
f"{app_label}.Pony.modified_pink must be removed and re-added with the "
"new definition."
)
with self.assertRaisesMessage(ValueError, msg):
self.apply_operations(app_label, new_state, operations)
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_invalid_generated_field_changes_on_rename_stored(self):
self._test_invalid_generated_field_changes_on_rename(db_persist=True)
@skipUnlessDBFeature("supports_virtual_generated_columns")
def test_invalid_generated_field_changes_on_rename_virtual(self):
self._test_invalid_generated_field_changes_on_rename(db_persist=False)
@skipUnlessDBFeature(
"supports_stored_generated_columns",
"supports_virtual_generated_columns",
)
def test_invalid_generated_field_persistency_change(self):
app_label = "test_igfpc"
project_state = self.set_up_test_model(app_label)
operations = [
migrations.AddField(
"Pony",
"modified_pink",
models.GeneratedField(
expression=F("pink"),
output_field=models.IntegerField(),
db_persist=True,
),
),
migrations.AlterField(
"Pony",
"modified_pink",
models.GeneratedField(
expression=F("pink"),
output_field=models.IntegerField(),
db_persist=False,
),
),
]
msg = (
"Modifying GeneratedFields is not supported - the field "
f"{app_label}.Pony.modified_pink must be removed and re-added with the "
"new definition."
)
with self.assertRaisesMessage(ValueError, msg):
self.apply_operations(app_label, project_state, operations)
def _test_add_generated_field(self, db_persist):
app_label = "test_agf"
operation = migrations.AddField(
"Pony",
"modified_pink",
models.GeneratedField(
expression=F("pink") + F("pink"),
output_field=models.IntegerField(),
db_persist=db_persist,
),
)
project_state, new_state = self.make_test_state(app_label, operation)
self.assertEqual(len(new_state.models[app_label, "pony"].fields), 6)
# Add generated column.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(f"{app_label}_pony", "modified_pink")
Pony = new_state.apps.get_model(app_label, "Pony")
obj = Pony.objects.create(pink=5, weight=3.23)
self.assertEqual(obj.modified_pink, 10)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertColumnNotExists(f"{app_label}_pony", "modified_pink")
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_generated_field_changes_output_field(self):
app_label = "test_gfcof"
operation = migrations.AddField(
"Pony",
"modified_pink",
models.GeneratedField(
expression=F("pink") + F("pink"),
output_field=models.IntegerField(),
db_persist=True,
),
)
from_state, to_state = self.make_test_state(app_label, operation)
# Add generated column.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
# Update output_field used in the generated field.
operation = migrations.AlterField(
"Pony",
"modified_pink",
models.GeneratedField(
expression=F("pink") + F("pink"),
output_field=models.DecimalField(decimal_places=2, max_digits=16),
db_persist=True,
),
)
from_state = to_state.clone()
to_state = self.apply_operations(app_label, from_state, [operation])
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_add_generated_field_stored(self):
self._test_add_generated_field(db_persist=True)
@skipUnlessDBFeature("supports_virtual_generated_columns")
def test_add_generated_field_virtual(self):
self._test_add_generated_field(db_persist=False)
def _test_remove_generated_field(self, db_persist):
app_label = "test_rgf"
operation = migrations.AddField(
"Pony",
"modified_pink",
models.GeneratedField(
expression=F("pink") + F("pink"),
output_field=models.IntegerField(),
db_persist=db_persist,
),
)
project_state, new_state = self.make_test_state(app_label, operation)
self.assertEqual(len(new_state.models[app_label, "pony"].fields), 6)
# Add generated column.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
project_state = new_state
new_state = project_state.clone()
operation = migrations.RemoveField("Pony", "modified_pink")
operation.state_forwards(app_label, new_state)
# Remove generated column.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnNotExists(f"{app_label}_pony", "modified_pink")
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_remove_generated_field_stored(self):
self._test_remove_generated_field(db_persist=True)
@skipUnlessDBFeature("supports_virtual_generated_columns")
def test_remove_generated_field_virtual(self):
self._test_remove_generated_field(db_persist=False)
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_add_field_after_generated_field(self):
app_label = "test_adfagf"
project_state = self.set_up_test_model(app_label)
operation_1 = migrations.AddField(
"Pony",
"generated",
models.GeneratedField(
expression=Value(1),
output_field=models.IntegerField(),
db_persist=True,
),
)
operation_2 = migrations.AddField(
"Pony",
"static",
models.IntegerField(default=2),
)
new_state = project_state.clone()
operation_1.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation_1.database_forwards(app_label, editor, project_state, new_state)
project_state, new_state = new_state, new_state.clone()
pony_old = new_state.apps.get_model(app_label, "Pony").objects.create(weight=20)
self.assertEqual(pony_old.generated, 1)
operation_2.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation_2.database_forwards(app_label, editor, project_state, new_state)
Pony = new_state.apps.get_model(app_label, "Pony")
pony_old = Pony.objects.get(pk=pony_old.pk)
self.assertEqual(pony_old.generated, 1)
self.assertEqual(pony_old.static, 2)
pony_new = Pony.objects.create(weight=20)
self.assertEqual(pony_new.generated, 1)
self.assertEqual(pony_new.static, 2)
def test_composite_pk_operations(self):
app_label = "test_d8d90af6"
project_state = self.set_up_test_model(app_label)
operation_0 = migrations.AlterField(
"Pony", "id", models.IntegerField(primary_key=True)
)
operation_1 = migrations.AddField(
"Pony", "pk", models.CompositePrimaryKey("id", "pink")
)
operation_2 = migrations.AlterField("Pony", "id", models.IntegerField())
operation_3 = migrations.RemoveField("Pony", "pk")
table_name = f"{app_label}_pony"
# 1. Add field (pk).
new_state = project_state.clone()
new_state = self.apply_operations(
app_label, new_state, [operation_0, operation_1]
)
self.assertColumnNotExists(table_name, "pk")
Pony = new_state.apps.get_model(app_label, "pony")
obj_1 = Pony.objects.create(id=1, weight=1)
msg = (
f"obj_1={obj_1}, "
f"obj_1.id={obj_1.id}, "
f"obj_1.pink={obj_1.pink}, "
f"obj_1.pk={obj_1.pk}, "
f"Pony._meta.pk={repr(Pony._meta.pk)}, "
f"Pony._meta.get_field('id')={repr(Pony._meta.get_field('id'))}"
)
self.assertEqual(obj_1.pink, 3, msg)
self.assertEqual(obj_1.pk, (obj_1.id, obj_1.pink), msg)
# 2. Alter field (id -> IntegerField()).
project_state, new_state = new_state, new_state.clone()
operation_2.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation_2.database_forwards(app_label, editor, project_state, new_state)
Pony = new_state.apps.get_model(app_label, "pony")
obj_1 = Pony.objects.get(id=obj_1.id)
self.assertEqual(obj_1.pink, 3)
self.assertEqual(obj_1.pk, (obj_1.id, obj_1.pink))
obj_2 = Pony.objects.create(id=2, weight=2)
self.assertEqual(obj_2.id, 2)
self.assertEqual(obj_2.pink, 3)
self.assertEqual(obj_2.pk, (obj_2.id, obj_2.pink))
# 3. Remove field (pk).
project_state, new_state = new_state, new_state.clone()
operation_3.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation_3.database_forwards(app_label, editor, project_state, new_state)
Pony = new_state.apps.get_model(app_label, "pony")
obj_1 = Pony.objects.get(id=obj_1.id)
self.assertEqual(obj_1.pk, obj_1.id)
obj_2 = Pony.objects.get(id=obj_2.id)
self.assertEqual(obj_2.id, 2)
self.assertEqual(obj_2.pk, obj_2.id)
|
OperationTests
|
python
|
joke2k__faker
|
faker/providers/company/de_CH/__init__.py
|
{
"start": 45,
"end": 472
}
|
class ____(CompanyProvider):
# Source: https://de.wikipedia.org/wiki/Firma#Schweizerisches_Recht
formats = (
"{{last_name}} {{company_suffix}}",
"{{last_name}} {{last_name}} {{company_suffix}}",
)
company_suffixes = (
"AG",
"AG",
"AG",
"GmbH",
"GmbH",
"GmbH",
"& Co.",
"& Partner",
"& Cie.",
"& Söhne",
)
|
Provider
|
python
|
sympy__sympy
|
sympy/printing/numpy.py
|
{
"start": 1091,
"end": 14025
}
|
class ____(ArrayPrinter, PythonCodePrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_module = 'numpy'
_kf = _numpy_known_functions
_kc = _numpy_known_constants
def __init__(self, settings=None):
"""
`settings` is passed to CodePrinter.__init__()
`module` specifies the array module to use, currently 'NumPy', 'CuPy'
or 'JAX'.
"""
self.language = "Python with {}".format(self._module)
self.printmethod = "_{}code".format(self._module)
self._kf = {**PythonCodePrinter._kf, **self._kf}
super().__init__(settings=settings)
def _print_seq(self, seq):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
delimiter=', '
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_NegativeInfinity(self, expr):
return '-' + self._print(S.Infinity)
def _print_MatMul(self, expr):
"Matrix multiplication printer"
if expr.as_coeff_matrices()[0] is not S.One:
expr_list = expr.as_coeff_matrices()[1]+[(expr.as_coeff_matrices()[0])]
return '({})'.format(').dot('.join(self._print(i) for i in expr_list))
return '({})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_MatPow(self, expr):
"Matrix power printer"
return '{}({}, {})'.format(self._module_format(self._module + '.linalg.matrix_power'),
self._print(expr.args[0]), self._print(expr.args[1]))
def _print_Inverse(self, expr):
"Matrix inverse printer"
return '{}({})'.format(self._module_format(self._module + '.linalg.inv'),
self._print(expr.args[0]))
def _print_DotProduct(self, expr):
# DotProduct allows any shape order, but numpy.dot does matrix
# multiplication, so we have to make sure it gets 1 x n by n x 1.
arg1, arg2 = expr.args
if arg1.shape[0] != 1:
arg1 = arg1.T
if arg2.shape[1] != 1:
arg2 = arg2.T
return "%s(%s, %s)" % (self._module_format(self._module + '.dot'),
self._print(arg1),
self._print(arg2))
def _print_MatrixSolve(self, expr):
return "%s(%s, %s)" % (self._module_format(self._module + '.linalg.solve'),
self._print(expr.matrix),
self._print(expr.vector))
def _print_ZeroMatrix(self, expr):
return '{}({})'.format(self._module_format(self._module + '.zeros'),
self._print(expr.shape))
def _print_OneMatrix(self, expr):
return '{}({})'.format(self._module_format(self._module + '.ones'),
self._print(expr.shape))
def _print_FunctionMatrix(self, expr):
from sympy.abc import i, j
lamda = expr.lamda
if not isinstance(lamda, Lambda):
lamda = Lambda((i, j), lamda(i, j))
return '{}(lambda {}: {}, {})'.format(self._module_format(self._module + '.fromfunction'),
', '.join(self._print(arg) for arg in lamda.args[0]),
self._print(lamda.args[1]), self._print(expr.shape))
def _print_HadamardProduct(self, expr):
func = self._module_format(self._module + '.multiply')
return ''.join('{}({}, '.format(func, self._print(arg)) \
for arg in expr.args[:-1]) + "{}{}".format(self._print(expr.args[-1]),
')' * (len(expr.args) - 1))
def _print_KroneckerProduct(self, expr):
func = self._module_format(self._module + '.kron')
return ''.join('{}({}, '.format(func, self._print(arg)) \
for arg in expr.args[:-1]) + "{}{}".format(self._print(expr.args[-1]),
')' * (len(expr.args) - 1))
def _print_Adjoint(self, expr):
return '{}({}({}))'.format(
self._module_format(self._module + '.conjugate'),
self._module_format(self._module + '.transpose'),
self._print(expr.args[0]))
def _print_DiagonalOf(self, expr):
vect = '{}({})'.format(
self._module_format(self._module + '.diag'),
self._print(expr.arg))
return '{}({}, (-1, 1))'.format(
self._module_format(self._module + '.reshape'), vect)
def _print_DiagMatrix(self, expr):
return '{}({})'.format(self._module_format(self._module + '.diagflat'),
self._print(expr.args[0]))
def _print_DiagonalMatrix(self, expr):
return '{}({}, {}({}, {}))'.format(self._module_format(self._module + '.multiply'),
self._print(expr.arg), self._module_format(self._module + '.eye'),
self._print(expr.shape[0]), self._print(expr.shape[1]))
def _print_Piecewise(self, expr):
"Piecewise function printer"
from sympy.logic.boolalg import ITE, simplify_logic
def print_cond(cond):
""" Problem having an ITE in the cond. """
if cond.has(ITE):
return self._print(simplify_logic(cond))
else:
return self._print(cond)
exprs = '[{}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{}]'.format(','.join(print_cond(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return '{}({}, {}, default={})'.format(
self._module_format(self._module + '.select'), conds, exprs,
self._print(S.NaN))
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=self._module_format(self._module + '.'+op[expr.rel_op]),
lhs=lhs, rhs=rhs)
return super()._print_Relational(expr)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{}.reduce(({}))'.format(self._module_format(self._module + '.logical_and'), ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{}.reduce(({}))'.format(self._module_format(self._module + '.logical_or'), ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{}({})'.format(self._module_format(self._module + '.logical_not'), ','.join(self._print(i) for i in expr.args))
def _print_Pow(self, expr, rational=False):
# XXX Workaround for negative integer power error
if expr.exp.is_integer and expr.exp.is_negative:
expr = Pow(expr.base, expr.exp.evalf(), evaluate=False)
return self._hprint_Pow(expr, rational=rational, sqrt=self._module + '.sqrt')
def _helper_minimum_maximum(self, op: str, *args):
if len(args) == 0:
raise NotImplementedError(f"Need at least one argument for {op}")
elif len(args) == 1:
return self._print(args[0])
_reduce = self._module_format('functools.reduce')
s_args = [self._print(arg) for arg in args]
return f"{_reduce}({op}, [{', '.join(s_args)}])"
def _print_Min(self, expr):
return self._print_minimum(expr)
def _print_amin(self, expr):
return '{}({}, axis={})'.format(self._module_format(self._module + '.amin'), self._print(expr.array), self._print(expr.axis))
def _print_minimum(self, expr):
op = self._module_format(self._module + '.minimum')
return self._helper_minimum_maximum(op, *expr.args)
def _print_Max(self, expr):
return self._print_maximum(expr)
def _print_amax(self, expr):
return '{}({}, axis={})'.format(self._module_format(self._module + '.amax'), self._print(expr.array), self._print(expr.axis))
def _print_maximum(self, expr):
op = self._module_format(self._module + '.maximum')
return self._helper_minimum_maximum(op, *expr.args)
def _print_arg(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.angle'), self._print(expr.args[0]))
def _print_im(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.imag'), self._print(expr.args[0]))
def _print_Mod(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.mod'), ', '.join(
(self._print(arg) for arg in expr.args)))
def _print_re(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.real'), self._print(expr.args[0]))
def _print_sinc(self, expr):
return "%s(%s)" % (self._module_format(self._module + '.sinc'), self._print(expr.args[0]/S.Pi))
def _print_MatrixBase(self, expr):
if 0 in expr.shape:
func = self._module_format(f'{self._module}.{self._zeros}')
return f"{func}({self._print(expr.shape)})"
func = self.known_functions.get(expr.__class__.__name__, None)
if func is None:
func = self._module_format(f'{self._module}.array')
return "%s(%s)" % (func, self._print(expr.tolist()))
def _print_Identity(self, expr):
shape = expr.shape
if all(dim.is_Integer for dim in shape):
return "%s(%s)" % (self._module_format(self._module + '.eye'), self._print(expr.shape[0]))
else:
raise NotImplementedError("Symbolic matrix dimensions are not yet supported for identity matrices")
def _print_BlockMatrix(self, expr):
return '{}({})'.format(self._module_format(self._module + '.block'),
self._print(expr.args[0].tolist()))
def _print_NDimArray(self, expr):
if expr.rank() == 0:
func = self._module_format(f'{self._module}.array')
return f"{func}({self._print(expr[()])})"
if 0 in expr.shape:
func = self._module_format(f'{self._module}.{self._zeros}')
return f"{func}({self._print(expr.shape)})"
func = self._module_format(f'{self._module}.array')
return f"{func}({self._print(expr.tolist())})"
_add = "add"
_einsum = "einsum"
_transpose = "transpose"
_ones = "ones"
_zeros = "zeros"
_print_lowergamma = CodePrinter._print_not_supported
_print_uppergamma = CodePrinter._print_not_supported
_print_fresnelc = CodePrinter._print_not_supported
_print_fresnels = CodePrinter._print_not_supported
for func in _numpy_known_functions:
setattr(NumPyPrinter, f'_print_{func}', _print_known_func)
for const in _numpy_known_constants:
setattr(NumPyPrinter, f'_print_{const}', _print_known_const)
_known_functions_scipy_special = {
'Ei': 'expi',
'erf': 'erf',
'erfc': 'erfc',
'besselj': 'jv',
'bessely': 'yv',
'besseli': 'iv',
'besselk': 'kv',
'cosm1': 'cosm1',
'powm1': 'powm1',
'factorial': 'factorial',
'gamma': 'gamma',
'loggamma': 'gammaln',
'digamma': 'psi',
'polygamma': 'polygamma',
'RisingFactorial': 'poch',
'jacobi': 'eval_jacobi',
'gegenbauer': 'eval_gegenbauer',
'chebyshevt': 'eval_chebyt',
'chebyshevu': 'eval_chebyu',
'legendre': 'eval_legendre',
'hermite': 'eval_hermite',
'laguerre': 'eval_laguerre',
'assoc_laguerre': 'eval_genlaguerre',
'beta': 'beta',
'LambertW' : 'lambertw',
}
_known_constants_scipy_constants = {
'GoldenRatio': 'golden_ratio',
'Pi': 'pi',
}
_scipy_known_functions = {k : "scipy.special." + v for k, v in _known_functions_scipy_special.items()}
_scipy_known_constants = {k : "scipy.constants." + v for k, v in _known_constants_scipy_constants.items()}
|
NumPyPrinter
|
python
|
google__pytype
|
pytype/tests/test_cmp2.py
|
{
"start": 383,
"end": 1137
}
|
class ____(test_base.BaseTest):
"""Tests the __contains__ -> __iter__ -> __getitem__ fallbacks."""
def test_overload_contains(self):
self.CheckWithErrors("""
class F:
def __contains__(self, x: int):
if not isinstance(x, int):
raise TypeError("__contains__ only takes int")
return True
1 in F()
"not int" in F() # unsupported-operands
""")
def test_fallback_iter(self):
self.Check("""
class F:
def __iter__(self):
pass
1 in F()
"not int" in F()
""")
def test_fallback_getitem(self):
self.Check("""
class F:
def __getitem__(self, key):
pass
1 in F()
"not int" in F()
""")
|
ContainsFallbackTest
|
python
|
numba__numba
|
numba/core/types/functions.py
|
{
"start": 22054,
"end": 22170
}
|
class ____(Dispatcher):
"""Dispatcher subclass that enters objectmode function.
"""
pass
|
ObjModeDispatcher
|
python
|
kamyu104__LeetCode-Solutions
|
Python/largest-number.py
|
{
"start": 33,
"end": 314
}
|
class ____(object):
# @param num, a list of integers
# @return a string
def largestNumber(self, num):
num = [str(x) for x in num]
num.sort(cmp=lambda x, y: cmp(y + x, x + y))
largest = ''.join(num)
return largest.lstrip('0') or '0'
|
Solution
|
python
|
doocs__leetcode
|
solution/1600-1699/1673.Find the Most Competitive Subsequence/Solution.py
|
{
"start": 0,
"end": 329
}
|
class ____:
def mostCompetitive(self, nums: List[int], k: int) -> List[int]:
stk = []
n = len(nums)
for i, v in enumerate(nums):
while stk and stk[-1] > v and len(stk) + n - i > k:
stk.pop()
if len(stk) < k:
stk.append(v)
return stk
|
Solution
|
python
|
gevent__gevent
|
src/gevent/greenlet.py
|
{
"start": 5525,
"end": 39313
}
|
class ____(greenlet):
"""
A light-weight cooperatively-scheduled execution unit.
"""
# pylint:disable=too-many-public-methods,too-many-instance-attributes
spawning_stack_limit = 10
# pylint:disable=keyword-arg-before-vararg,super-init-not-called
def __init__(self, run=None, *args, **kwargs):
"""
:param args: The arguments passed to the ``run`` function.
:param kwargs: The keyword arguments passed to the ``run`` function.
:keyword callable run: The callable object to run. If not given, this object's
`_run` method will be invoked (typically defined by subclasses).
.. versionchanged:: 1.1b1
The ``run`` argument to the constructor is now verified to be a callable
object. Previously, passing a non-callable object would fail after the greenlet
was spawned.
.. versionchanged:: 1.3b1
The ``GEVENT_TRACK_GREENLET_TREE`` configuration value may be set to
a false value to disable ``spawn_tree_locals``, ``spawning_greenlet``,
and ``spawning_stack``. The first two will be None in that case, and the
latter will be empty.
.. versionchanged:: 1.5
Greenlet objects are now more careful to verify that their ``parent`` is really
a gevent hub, raising a ``TypeError`` earlier instead of an ``AttributeError`` later.
.. versionchanged:: 20.12.1
Greenlet objects now function as context managers. Exiting the ``with`` suite
ensures that the greenlet has completed by :meth:`joining <join>`
the greenlet (blocking, with
no timeout). If the body of the suite raises an exception, the greenlet is
:meth:`killed <kill>` with the default arguments and not joined in that case.
"""
# The attributes are documented in the .rst file
# greenlet.greenlet(run=None, parent=None)
# Calling it with both positional arguments instead of a keyword
# argument (parent=get_hub()) speeds up creation of this object ~30%:
# python -m timeit -s 'import gevent' 'gevent.Greenlet()'
# Python 3.5: 2.70usec with keywords vs 1.94usec with positional
# Python 3.4: 2.32usec with keywords vs 1.74usec with positional
# Python 3.3: 2.55usec with keywords vs 1.92usec with positional
# Python 2.7: 1.73usec with keywords vs 1.40usec with positional
# Timings taken Feb 21 2018 prior to integration of #755
# python -m perf timeit -s 'import gevent' 'gevent.Greenlet()'
# 3.6.4 : Mean +- std dev: 1.08 us +- 0.05 us
# 2.7.14 : Mean +- std dev: 1.44 us +- 0.06 us
# PyPy2 5.10.0: Mean +- std dev: 2.14 ns +- 0.08 ns
# After the integration of spawning_stack, spawning_greenlet,
# and spawn_tree_locals on that same date:
# 3.6.4 : Mean +- std dev: 8.92 us +- 0.36 us -> 8.2x
# 2.7.14 : Mean +- std dev: 14.8 us +- 0.5 us -> 10.2x
# PyPy2 5.10.0: Mean +- std dev: 3.24 us +- 0.17 us -> 1.5x
# Compiling with Cython gets us to these numbers:
# 3.6.4 : Mean +- std dev: 3.63 us +- 0.14 us
# 2.7.14 : Mean +- std dev: 3.37 us +- 0.20 us
# PyPy2 5.10.0 : Mean +- std dev: 4.44 us +- 0.28 us
# Switching to reified frames and some more tuning gets us here:
# 3.7.2 : Mean +- std dev: 2.53 us +- 0.15 us
# 2.7.16 : Mean +- std dev: 2.35 us +- 0.12 us
# PyPy2 7.1 : Mean +- std dev: 11.6 us +- 0.4 us
# Compared to the released 1.4 (tested at the same time):
# 3.7.2 : Mean +- std dev: 3.21 us +- 0.32 us
# 2.7.16 : Mean +- std dev: 3.11 us +- 0.19 us
# PyPy2 7.1 : Mean +- std dev: 12.3 us +- 0.8 us
_greenlet__init__(self, None, get_hub())
if run is not None:
self._run = run
# If they didn't pass a callable at all, then they must
# already have one. Note that subclassing to override the run() method
# itself has never been documented or supported.
if not callable(self._run):
raise TypeError("The run argument or self._run must be callable")
self.args = args
self.kwargs = kwargs
self.value = None
#: An event, such as a timer or a callback that fires. It is established in
#: start() and start_later() as those two objects, respectively.
#: Once this becomes non-None, the Greenlet cannot be started again. Conversely,
#: kill() and throw() check for non-None to determine if this object has ever been
#: scheduled for starting. A placeholder _cancelled_start_event is assigned by them to prevent
#: the greenlet from being started in the future, if necessary.
#: In the usual case, this transitions as follows: None -> event -> _start_completed_event.
#: A value of None means we've never been started.
self._start_event = None
self._notifier = None
self._formatted_info = None
self._links = []
self._ident = None
# Initial state: None.
# Completed successfully: (None, None, None)
# Failed with exception: (t, v, dump_traceback(tb)))
self._exc_info = None
if GEVENT_CONFIG.track_greenlet_tree:
spawner = getcurrent() # pylint:disable=undefined-variable
self.spawning_greenlet = wref(spawner)
try:
self.spawn_tree_locals = spawner.spawn_tree_locals
except AttributeError:
self.spawn_tree_locals = {}
if get_generic_parent(spawner) is not None: # pylint:disable=undefined-variable
# The main greenlet has no parent.
# Its children get separate locals.
spawner.spawn_tree_locals = self.spawn_tree_locals
self.spawning_stack = _extract_stack(self.spawning_stack_limit)
# Don't copy the spawning greenlet's
# '_spawning_stack_frames' into ours. That's somewhat
# confusing, and, if we're not careful, a deep spawn tree
# can lead to excessive memory usage (an infinite spawning
# tree could lead to unbounded memory usage without care
# --- see https://github.com/gevent/gevent/issues/1371)
# The _spawning_stack_frames may be cleared out later if we access spawning_stack
else:
# None is the default for all of these in Cython, but we
# need to declare them for pure-Python mode.
self.spawning_greenlet = None
self.spawn_tree_locals = None
self.spawning_stack = None
def _get_minimal_ident(self):
# Helper function for cython, to allow typing `reg` and making a
# C call to get_ident.
# If we're being accessed from a hub different than the one running
# us, aka get_hub() is not self.parent, then calling hub.ident_registry.get_ident()
# may be quietly broken: it's not thread safe.
# If our parent is no longer the hub for whatever reason, this will raise a
# AttributeError or TypeError.
hub = get_my_hub(self) # pylint:disable=undefined-variable
reg = hub.ident_registry
return reg.get_ident(self)
@property
def minimal_ident(self):
"""
A small, unique non-negative integer that identifies this object.
This is similar to :attr:`threading.Thread.ident` (and `id`)
in that as long as this object is alive, no other greenlet *in
this hub* will have the same id, but it makes a stronger
guarantee that the assigned values will be small and
sequential. Sometime after this object has died, the value
will be available for reuse.
To get ids that are unique across all hubs, combine this with
the hub's (``self.parent``) ``minimal_ident``.
Accessing this property from threads other than the thread running
this greenlet is not defined.
.. versionadded:: 1.3a2
"""
# Not @Lazy, implemented manually because _ident is in the structure
# of the greenlet for fast access
if self._ident is None:
self._ident = self._get_minimal_ident()
return self._ident
@readproperty
def name(self):
"""
The greenlet name. By default, a unique name is constructed using
the :attr:`minimal_ident`. You can assign a string to this
value to change it. It is shown in the `repr` of this object if it
has been assigned to or if the `minimal_ident` has already been generated.
.. versionadded:: 1.3a2
.. versionchanged:: 1.4
Stop showing generated names in the `repr` when the ``minimal_ident``
hasn't been requested. This reduces overhead and may be less confusing,
since ``minimal_ident`` can get reused.
"""
return 'Greenlet-%d' % (self.minimal_ident,)
def _raise_exception(self):
reraise(*self.exc_info)
@property
def loop(self):
# needed by killall
hub = get_my_hub(self) # pylint:disable=undefined-variable
return hub.loop
def __bool__(self):
return self._start_event is not None and self._exc_info is None
### Lifecycle
if _PYPY:
# oops - pypy's .dead relies on __nonzero__ which we overriden above
@property
def dead(self):
"Boolean indicating that the greenlet is dead and will not run again."
# pylint:disable=no-member
if self._greenlet__main:
return False
if self.__start_cancelled_by_kill() or self.__started_but_aborted():
return True
return self._greenlet__started and not _continulet.is_pending(self)
else:
@property
def dead(self):
"""
Boolean indicating that the greenlet is dead and will not run again.
This is true if:
1. We were never started, but were :meth:`killed <kill>`
immediately after creation (not possible with :meth:`spawn`); OR
2. We were started, but were killed before running; OR
3. We have run and terminated (by raising an exception out of the
started function or by reaching the end of the started function).
"""
return (
self.__start_cancelled_by_kill()
or self.__started_but_aborted()
or greenlet.dead.__get__(self)
)
def __never_started_or_killed(self):
return self._start_event is None
def __start_pending(self):
return (
self._start_event is not None
and (self._start_event.pending or getattr(self._start_event, 'active', False))
)
def __start_cancelled_by_kill(self):
return self._start_event is _cancelled_start_event
def __start_completed(self):
return self._start_event is _start_completed_event
def __started_but_aborted(self):
return (
not self.__never_started_or_killed() # we have been started or killed
and not self.__start_cancelled_by_kill() # we weren't killed, so we must have been started
and not self.__start_completed() # the start never completed
and not self.__start_pending() # and we're not pending, so we must have been aborted
)
def __cancel_start(self):
if self._start_event is None:
# prevent self from ever being started in the future
self._start_event = _cancelled_start_event
# cancel any pending start event
# NOTE: If this was a real pending start event, this will leave a
# "dangling" callback/timer object in the hub.loop.callbacks list;
# depending on where we are in the event loop, it may even be in a local
# variable copy of that list (in _run_callbacks). This isn't a problem,
# except for the leak-tests.
self._start_event.stop()
self._start_event.close()
def __handle_death_before_start(self, args):
# args is (t, v, tb) or simply t or v.
# The last two cases are transformed into (t, v, None);
# if the single argument is an exception type, a new instance
# is created; if the single argument is not an exception type and also
# not an exception, it is wrapped in a BaseException (this is not
# documented, but should result in better behaviour in the event of a
# user error---instead of silently printing something to stderr, we still
# kill the greenlet).
if self._exc_info is None and self.dead:
# the greenlet was never switched to before and it will
# never be; _report_error was not called, the result was
# not set, and the links weren't notified. Let's do it
# here.
#
# checking that self.dead is true is essential, because
# throw() does not necessarily kill the greenlet (if the
# exception raised by throw() is caught somewhere inside
# the greenlet).
if len(args) == 1:
arg = args[0]
if isinstance(arg, type) and issubclass(arg, BaseException):
args = (arg, arg(), None)
else:
args = (type(arg), arg, None)
elif not args:
args = (GreenletExit, GreenletExit(), None)
if not issubclass(args[0], BaseException):
# Random non-type, non-exception arguments.
args = (BaseException, BaseException(args), None)
assert issubclass(args[0], BaseException)
self.__report_error(args)
@property
def started(self):
# DEPRECATED
return bool(self)
def ready(self):
"""
Return a true value if and only if the greenlet has finished
execution.
.. versionchanged:: 1.1
This function is only guaranteed to return true or false *values*, not
necessarily the literal constants ``True`` or ``False``.
"""
return self.dead or self._exc_info is not None
def successful(self):
"""
Return a true value if and only if the greenlet has finished execution
successfully, that is, without raising an error.
.. tip:: A greenlet that has been killed with the default
:class:`GreenletExit` exception is considered successful.
That is, ``GreenletExit`` is not considered an error.
.. note:: This function is only guaranteed to return true or false *values*,
not necessarily the literal constants ``True`` or ``False``.
"""
return self._exc_info is not None and self._exc_info[1] is None
def __repr__(self):
classname = self.__class__.__name__
# If no name has been assigned, don't generate one, including a minimal_ident,
# if not necessary. This reduces the use of weak references and associated
# overhead.
if 'name' not in self.__dict__ and self._ident is None:
name = ' '
else:
name = ' "%s" ' % (self.name,)
result = '<%s%sat %s' % (classname, name, hex(id(self)))
formatted = self._formatinfo()
if formatted:
result += ': ' + formatted
return result + '>'
def _formatinfo(self):
info = self._formatted_info
if info is not None:
return info
# Are we running an arbitrary function provided to the constructor,
# or did a subclass override _run?
func = self._run
im_self = getattr(func, '__self__', None)
if im_self is self:
funcname = '_run'
elif im_self is not None:
funcname = repr(func)
else:
funcname = getattr(func, '__name__', '') or repr(func)
result = funcname
args = []
if self.args:
args = [repr(x)[:50] for x in self.args]
if self.kwargs:
args.extend(['%s=%s' % (key, repr(value)[:50]) for (key, value) in self.kwargs.items()])
if args:
result += '(' + ', '.join(args) + ')'
# it is important to save the result here, because once the greenlet exits '_run' attribute will be removed
self._formatted_info = result
return result
@property
def exception(self):
"""
Holds the exception instance raised by the function if the
greenlet has finished with an error. Otherwise ``None``.
"""
return self._exc_info[1] if self._exc_info is not None else None
@property
def exc_info(self):
"""
Holds the exc_info three-tuple raised by the function if the
greenlet finished with an error. Otherwise a false value.
.. note:: This is a provisional API and may change.
.. versionadded:: 1.1
"""
ei = self._exc_info
if ei is not None and ei[0] is not None:
return (
ei[0],
ei[1],
# The pickled traceback may be None if we couldn't pickle it.
load_traceback(ei[2]) if ei[2] else None
)
def throw(self, *args):
"""Immediately switch into the greenlet and raise an exception in it.
Should only be called from the HUB, otherwise the current greenlet is left unscheduled forever.
To raise an exception in a safe manner from any greenlet, use :meth:`kill`.
If a greenlet was started but never switched to yet, then also
a) cancel the event that will start it
b) fire the notifications as if an exception was raised in a greenlet
"""
self.__cancel_start()
try:
if not self.dead:
# Prevent switching into a greenlet *at all* if we had never
# started it. Usually this is the same thing that happens by throwing,
# but if this is done from the hub with nothing else running, prevents a
# LoopExit.
greenlet.throw(self, *args)
finally:
self.__handle_death_before_start(args)
def start(self):
"""Schedule the greenlet to run in this loop iteration"""
if self._start_event is None:
_call_spawn_callbacks(self)
hub = get_my_hub(self) # pylint:disable=undefined-variable
self._start_event = hub.loop.run_callback(self.switch)
def start_later(self, seconds):
"""
start_later(seconds) -> None
Schedule the greenlet to run in the future loop iteration
*seconds* later
"""
if self._start_event is None:
_call_spawn_callbacks(self)
hub = get_my_hub(self) # pylint:disable=undefined-variable
self._start_event = hub.loop.timer(seconds)
self._start_event.start(self.switch)
@staticmethod
def add_spawn_callback(callback):
"""
add_spawn_callback(callback) -> None
Set up a *callback* to be invoked when :class:`Greenlet` objects
are started.
The invocation order of spawn callbacks is unspecified. Adding the
same callback more than one time will not cause it to be called more
than once.
.. versionadded:: 1.4.0
"""
global _spawn_callbacks
if _spawn_callbacks is None: # pylint:disable=used-before-assignment
_spawn_callbacks = set()
_spawn_callbacks.add(callback)
@staticmethod
def remove_spawn_callback(callback):
"""
remove_spawn_callback(callback) -> None
Remove *callback* function added with :meth:`Greenlet.add_spawn_callback`.
This function will not fail if *callback* has been already removed or
if *callback* was never added.
.. versionadded:: 1.4.0
"""
global _spawn_callbacks
if _spawn_callbacks is not None:
_spawn_callbacks.discard(callback)
if not _spawn_callbacks:
_spawn_callbacks = None
@classmethod
def spawn(cls, *args, **kwargs):
"""
spawn(function, *args, **kwargs) -> Greenlet
Create a new :class:`Greenlet` object and schedule it to run ``function(*args, **kwargs)``.
This can be used as ``gevent.spawn`` or ``Greenlet.spawn``.
The arguments are passed to :meth:`Greenlet.__init__`.
.. versionchanged:: 1.1b1
If a *function* is given that is not callable, immediately raise a :exc:`TypeError`
instead of spawning a greenlet that will raise an uncaught TypeError.
"""
g = cls(*args, **kwargs)
g.start()
return g
@classmethod
def spawn_later(cls, seconds, *args, **kwargs):
"""
spawn_later(seconds, function, *args, **kwargs) -> Greenlet
Create and return a new `Greenlet` object scheduled to run ``function(*args, **kwargs)``
in a future loop iteration *seconds* later. This can be used as ``Greenlet.spawn_later``
or ``gevent.spawn_later``.
The arguments are passed to :meth:`Greenlet.__init__`.
.. versionchanged:: 1.1b1
If an argument that's meant to be a function (the first argument in *args*, or the ``run`` keyword )
is given to this classmethod (and not a classmethod of a subclass),
it is verified to be callable. Previously, the spawned greenlet would have failed
when it started running.
"""
if cls is Greenlet and not args and 'run' not in kwargs:
raise TypeError("")
g = cls(*args, **kwargs)
g.start_later(seconds)
return g
def _maybe_kill_before_start(self, exception):
# Helper for Greenlet.kill(), and also for killall()
self.__cancel_start()
self.__free()
dead = self.dead
if dead:
if isinstance(exception, tuple) and len(exception) == 3:
args = exception
else:
args = (exception,)
self.__handle_death_before_start(args)
return dead
def kill(self, exception=GreenletExit, block=True, timeout=None):
"""
Raise the ``exception`` in the greenlet.
If ``block`` is ``True`` (the default), wait until the greenlet
dies or the optional timeout expires; this may require switching
greenlets.
If block is ``False``, the current greenlet is not unscheduled.
This function always returns ``None`` and never raises an error. It
may be called multpile times on the same greenlet object, and may be
called on an unstarted or dead greenlet.
.. note::
Depending on what this greenlet is executing and the state
of the event loop, the exception may or may not be raised
immediately when this greenlet resumes execution. It may
be raised on a subsequent green call, or, if this greenlet
exits before making such a call, it may not be raised at
all. As of 1.1, an example where the exception is raised
later is if this greenlet had called :func:`sleep(0)
<gevent.sleep>`; an example where the exception is raised
immediately is if this greenlet had called
:func:`sleep(0.1) <gevent.sleep>`.
.. caution::
Use care when killing greenlets. If the code executing is not
exception safe (e.g., makes proper use of ``finally``) then an
unexpected exception could result in corrupted state. Using
a :meth:`link` or :meth:`rawlink` (cheaper) may be a safer way to
clean up resources.
See also :func:`gevent.kill` and :func:`gevent.killall`.
:keyword type exception: The type of exception to raise in the greenlet. The default
is :class:`GreenletExit`, which indicates a :meth:`successful` completion
of the greenlet.
.. versionchanged:: 0.13.0
*block* is now ``True`` by default.
.. versionchanged:: 1.1a2
If this greenlet had never been switched to, killing it will
prevent it from *ever* being switched to. Links (:meth:`rawlink`)
will still be executed, though.
.. versionchanged:: 20.12.1
If this greenlet is :meth:`ready`, immediately return instead of
requiring a trip around the event loop.
"""
if not self._maybe_kill_before_start(exception):
if self.ready():
return
waiter = Waiter() if block else None # pylint:disable=undefined-variable
hub = get_my_hub(self) # pylint:disable=undefined-variable
hub.loop.run_callback(_kill, self, exception, waiter)
if waiter is not None:
waiter.get()
self.join(timeout)
def get(self, block=True, timeout=None):
"""
get(block=True, timeout=None) -> object
Return the result the greenlet has returned or re-raise the
exception it has raised.
If block is ``False``, raise :class:`gevent.Timeout` if the
greenlet is still alive. If block is ``True``, unschedule the
current greenlet until the result is available or the timeout
expires. In the latter case, :class:`gevent.Timeout` is
raised.
"""
if self.ready():
if self.successful():
return self.value
self._raise_exception()
if not block:
raise Timeout()
switch = getcurrent().switch # pylint:disable=undefined-variable
self.rawlink(switch)
try:
t = Timeout._start_new_or_dummy(timeout)
try:
result = get_my_hub(self).switch() # pylint:disable=undefined-variable
if result is not self:
raise InvalidSwitchError('Invalid switch into Greenlet.get(): %r' % (result, ))
finally:
t.cancel()
except:
# unlinking in 'except' instead of finally is an optimization:
# if switch occurred normally then link was already removed in _notify_links
# and there's no need to touch the links set.
# Note, however, that if "Invalid switch" assert was removed and invalid switch
# did happen, the link would remain, causing another invalid switch later in this greenlet.
self.unlink(switch)
raise
if self.ready():
if self.successful():
return self.value
self._raise_exception()
def join(self, timeout=None):
"""
join(timeout=None) -> None
Wait until the greenlet finishes or *timeout* expires. Return
``None`` regardless.
"""
if self.ready():
return
switch = getcurrent().switch # pylint:disable=undefined-variable
self.rawlink(switch)
try:
t = Timeout._start_new_or_dummy(timeout)
try:
result = get_my_hub(self).switch() # pylint:disable=undefined-variable
if result is not self:
raise InvalidSwitchError('Invalid switch into Greenlet.join(): %r' % (result, ))
finally:
t.cancel()
except Timeout as ex:
self.unlink(switch)
if ex is not t:
raise
except:
self.unlink(switch)
raise
def __enter__(self):
return self
def __exit__(self, t, v, tb):
if t is None:
try:
self.join()
finally:
self.kill()
else:
self.kill((t, v, tb))
def __report_result(self, result):
self._exc_info = (None, None, None)
self.value = result
if self._links and not self._notifier:
hub = get_my_hub(self) # pylint:disable=undefined-variable
self._notifier = hub.loop.run_callback(self._notify_links)
def __report_error(self, exc_info):
if isinstance(exc_info[1], GreenletExit):
self.__report_result(exc_info[1])
return
# Depending on the error, we may not be able to pickle it.
# In particular, RecursionError can be a problem.
try:
tb = dump_traceback(exc_info[2])
except: # pylint:disable=bare-except
tb = None
self._exc_info = exc_info[0], exc_info[1], tb
hub = get_my_hub(self) # pylint:disable=undefined-variable
if self._links and not self._notifier:
self._notifier = hub.loop.run_callback(self._notify_links)
try:
hub.handle_error(self, *exc_info)
finally:
del exc_info
def run(self):
try:
self.__cancel_start()
self._start_event = _start_completed_event
try:
result = self._run(*self.args, **self.kwargs)
except: # pylint:disable=bare-except
self.__report_error(sys_exc_info())
else:
self.__report_result(result)
finally:
self.__free()
def __free(self):
try:
# It seems that Cython 0.29.13 sometimes miscompiles
# self.__dict__.pop('_run', None) ? When we moved this out of the
# inline finally: block in run(), we started getting strange
# exceptions from places that subclassed Greenlet.
del self._run
except AttributeError:
pass
self.args = ()
self.kwargs.clear()
def _run(self):
"""
Subclasses may override this method to take any number of
arguments and keyword arguments.
.. versionadded:: 1.1a3
Previously, if no callable object was
passed to the constructor, the spawned greenlet would later
fail with an AttributeError.
"""
# We usually override this in __init__
# pylint: disable=method-hidden
return
def has_links(self):
return len(self._links)
def rawlink(self, callback):
"""
Register a callable to be executed when the greenlet finishes
execution.
The *callback* will be called with this instance as an
argument.
The *callback* will be called even if linked after the greenlet
is already ready().
.. caution::
The *callback* will be called in the hub and
**MUST NOT** raise an exception.
"""
if not callable(callback):
raise TypeError('Expected callable: %r' % (callback, ))
self._links.append(callback) # pylint:disable=no-member
if self.ready() and self._links and not self._notifier:
hub = get_my_hub(self) # pylint:disable=undefined-variable
self._notifier = hub.loop.run_callback(self._notify_links)
def link(self, callback, SpawnedLink=SpawnedLink):
"""
Link greenlet's completion to a callable.
The *callback* will be called with this instance as an
argument once this greenlet is dead. A callable is called in
its own :class:`greenlet.greenlet` (*not* a
:class:`Greenlet`).
The *callback* will be called even if linked after the greenlet
is already ready().
"""
# XXX: Is the redefinition of SpawnedLink supposed to just be an
# optimization, or do people use it? It's not documented
# pylint:disable=redefined-outer-name
self.rawlink(SpawnedLink(callback))
def unlink(self, callback):
"""Remove the callback set by :meth:`link` or :meth:`rawlink`"""
try:
self._links.remove(callback) # pylint:disable=no-member
except ValueError:
pass
def unlink_all(self):
"""
Remove all the callbacks.
.. versionadded:: 1.3a2
"""
del self._links[:]
def link_value(self, callback, SpawnedLink=SuccessSpawnedLink):
"""
Like :meth:`link` but *callback* is only notified when the greenlet
has completed successfully.
"""
# pylint:disable=redefined-outer-name
self.link(callback, SpawnedLink=SpawnedLink)
def link_exception(self, callback, SpawnedLink=FailureSpawnedLink):
"""
Like :meth:`link` but *callback* is only notified when the
greenlet dies because of an unhandled exception.
"""
# pylint:disable=redefined-outer-name
self.link(callback, SpawnedLink=SpawnedLink)
def _notify_links(self):
while self._links:
# Early links are allowed to remove later links
# before we get to them, and they're also allowed to
# add new links, so we have to be careful about iterating.
# We don't expect this list to be very large, so the time spent
# manipulating it should be small. a deque is probably not justified.
# Cython has optimizations to transform this into a memmove anyway.
link = self._links.pop(0)
try:
link(self)
except: # pylint:disable=bare-except, undefined-variable
get_my_hub(self).handle_error((link, self), *sys_exc_info())
|
Greenlet
|
python
|
walkccc__LeetCode
|
solutions/2469. Convert the Temperature/2469.py
|
{
"start": 0,
"end": 129
}
|
class ____:
def convertTemperature(self, celsius: float) -> list[float]:
return [celsius + 273.15, celsius * 1.8 + 32]
|
Solution
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/triggers/file.py
|
{
"start": 3210,
"end": 4877
}
|
class ____(BaseEventTrigger):
"""
A trigger that fires exactly once after it finds the requested file and then delete the file.
The difference between ``FileTrigger`` and ``FileDeleteTrigger`` is ``FileDeleteTrigger`` can only find a
specific file.
:param filepath: File (relative to the base path set within the connection).
:param poke_interval: Time that the job should wait in between each try
"""
def __init__(
self,
filepath: str,
poke_interval: float = 5.0,
**kwargs,
):
super().__init__()
self.filepath = filepath
self.poke_interval = poke_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize FileDeleteTrigger arguments and classpath."""
return (
"airflow.providers.standard.triggers.file.FileDeleteTrigger",
{
"filepath": self.filepath,
"poke_interval": self.poke_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Loop until the relevant file is found."""
while True:
if os.path.isfile(self.filepath):
mod_time_f = os.path.getmtime(self.filepath)
mod_time = datetime.datetime.fromtimestamp(mod_time_f).strftime("%Y%m%d%H%M%S")
self.log.info("Found file %s last modified: %s", self.filepath, mod_time)
os.remove(self.filepath)
self.log.info("File %s has been deleted", self.filepath)
yield TriggerEvent(True)
return
await asyncio.sleep(self.poke_interval)
|
FileDeleteTrigger
|
python
|
scrapy__scrapy
|
tests/test_http_request.py
|
{
"start": 449,
"end": 16007
}
|
class ____:
request_class = Request
default_method = "GET"
default_headers: dict[bytes, list[bytes]] = {}
default_meta: dict[str, Any] = {}
def test_init(self):
# Request requires url in the __init__ method
with pytest.raises(TypeError):
self.request_class()
# url argument must be basestring
with pytest.raises(TypeError):
self.request_class(123)
r = self.request_class("http://www.example.com")
r = self.request_class("http://www.example.com")
assert isinstance(r.url, str)
assert r.url == "http://www.example.com"
assert r.method == self.default_method
assert isinstance(r.headers, Headers)
assert r.headers == self.default_headers
assert r.meta == self.default_meta
meta = {"lala": "lolo"}
headers = {b"caca": b"coco"}
r = self.request_class(
"http://www.example.com", meta=meta, headers=headers, body="a body"
)
assert r.meta is not meta
assert r.meta == meta
assert r.headers is not headers
assert r.headers[b"caca"] == b"coco"
def test_url_scheme(self):
# This test passes by not raising any (ValueError) exception
self.request_class("http://example.org")
self.request_class("https://example.org")
self.request_class("s3://example.org")
self.request_class("ftp://example.org")
self.request_class("about:config")
self.request_class("data:,Hello%2C%20World!")
def test_url_no_scheme(self):
msg = "Missing scheme in request url:"
with pytest.raises(ValueError, match=msg):
self.request_class("foo")
with pytest.raises(ValueError, match=msg):
self.request_class("/foo/")
with pytest.raises(ValueError, match=msg):
self.request_class("/foo:bar")
def test_headers(self):
# Different ways of setting headers attribute
url = "http://www.scrapy.org"
headers = {b"Accept": "gzip", b"Custom-Header": "nothing to tell you"}
r = self.request_class(url=url, headers=headers)
p = self.request_class(url=url, headers=r.headers)
assert r.headers == p.headers
assert r.headers is not headers
assert p.headers is not r.headers
# headers must not be unicode
h = Headers({"key1": "val1", "key2": "val2"})
h["newkey"] = "newval"
for k, v in h.items():
assert isinstance(k, bytes)
for s in v:
assert isinstance(s, bytes)
def test_eq(self):
url = "http://www.scrapy.org"
r1 = self.request_class(url=url)
r2 = self.request_class(url=url)
assert r1 != r2
set_ = set()
set_.add(r1)
set_.add(r2)
assert len(set_) == 2
def test_url(self):
r = self.request_class(url="http://www.scrapy.org/path")
assert r.url == "http://www.scrapy.org/path"
def test_url_quoting(self):
r = self.request_class(url="http://www.scrapy.org/blank%20space")
assert r.url == "http://www.scrapy.org/blank%20space"
r = self.request_class(url="http://www.scrapy.org/blank space")
assert r.url == "http://www.scrapy.org/blank%20space"
def test_url_encoding(self):
r = self.request_class(url="http://www.scrapy.org/price/£")
assert r.url == "http://www.scrapy.org/price/%C2%A3"
def test_url_encoding_other(self):
# encoding affects only query part of URI, not path
# path part should always be UTF-8 encoded before percent-escaping
r = self.request_class(url="http://www.scrapy.org/price/£", encoding="utf-8")
assert r.url == "http://www.scrapy.org/price/%C2%A3"
r = self.request_class(url="http://www.scrapy.org/price/£", encoding="latin1")
assert r.url == "http://www.scrapy.org/price/%C2%A3"
def test_url_encoding_query(self):
r1 = self.request_class(url="http://www.scrapy.org/price/£?unit=µ")
assert r1.url == "http://www.scrapy.org/price/%C2%A3?unit=%C2%B5"
# should be same as above
r2 = self.request_class(
url="http://www.scrapy.org/price/£?unit=µ", encoding="utf-8"
)
assert r2.url == "http://www.scrapy.org/price/%C2%A3?unit=%C2%B5"
def test_url_encoding_query_latin1(self):
# encoding is used for encoding query-string before percent-escaping;
# path is still UTF-8 encoded before percent-escaping
r3 = self.request_class(
url="http://www.scrapy.org/price/µ?currency=£", encoding="latin1"
)
assert r3.url == "http://www.scrapy.org/price/%C2%B5?currency=%A3"
def test_url_encoding_nonutf8_untouched(self):
# percent-escaping sequences that do not match valid UTF-8 sequences
# should be kept untouched (just upper-cased perhaps)
#
# See https://datatracker.ietf.org/doc/html/rfc3987#section-3.2
#
# "Conversions from URIs to IRIs MUST NOT use any character encoding
# other than UTF-8 in steps 3 and 4, even if it might be possible to
# guess from the context that another character encoding than UTF-8 was
# used in the URI. For example, the URI
# "http://www.example.org/r%E9sum%E9.html" might with some guessing be
# interpreted to contain two e-acute characters encoded as iso-8859-1.
# It must not be converted to an IRI containing these e-acute
# characters. Otherwise, in the future the IRI will be mapped to
# "http://www.example.org/r%C3%A9sum%C3%A9.html", which is a different
# URI from "http://www.example.org/r%E9sum%E9.html".
r1 = self.request_class(url="http://www.scrapy.org/price/%a3")
assert r1.url == "http://www.scrapy.org/price/%a3"
r2 = self.request_class(url="http://www.scrapy.org/r%C3%A9sum%C3%A9/%a3")
assert r2.url == "http://www.scrapy.org/r%C3%A9sum%C3%A9/%a3"
r3 = self.request_class(url="http://www.scrapy.org/résumé/%a3")
assert r3.url == "http://www.scrapy.org/r%C3%A9sum%C3%A9/%a3"
r4 = self.request_class(url="http://www.example.org/r%E9sum%E9.html")
assert r4.url == "http://www.example.org/r%E9sum%E9.html"
def test_body(self):
r1 = self.request_class(url="http://www.example.com/")
assert r1.body == b""
r2 = self.request_class(url="http://www.example.com/", body=b"")
assert isinstance(r2.body, bytes)
assert r2.encoding == "utf-8" # default encoding
r3 = self.request_class(
url="http://www.example.com/", body="Price: \xa3100", encoding="utf-8"
)
assert isinstance(r3.body, bytes)
assert r3.body == b"Price: \xc2\xa3100"
r4 = self.request_class(
url="http://www.example.com/", body="Price: \xa3100", encoding="latin1"
)
assert isinstance(r4.body, bytes)
assert r4.body == b"Price: \xa3100"
def test_copy(self):
"""Test Request copy"""
def somecallback():
pass
r1 = self.request_class(
"http://www.example.com",
flags=["f1", "f2"],
callback=somecallback,
errback=somecallback,
)
r1.meta["foo"] = "bar"
r1.cb_kwargs["key"] = "value"
r2 = r1.copy()
# make sure copy does not propagate callbacks
assert r1.callback is somecallback
assert r1.errback is somecallback
assert r2.callback is r1.callback
assert r2.errback is r2.errback
# make sure flags list is shallow copied
assert r1.flags is not r2.flags, "flags must be a shallow copy, not identical"
assert r1.flags == r2.flags
# make sure cb_kwargs dict is shallow copied
assert r1.cb_kwargs is not r2.cb_kwargs, (
"cb_kwargs must be a shallow copy, not identical"
)
assert r1.cb_kwargs == r2.cb_kwargs
# make sure meta dict is shallow copied
assert r1.meta is not r2.meta, "meta must be a shallow copy, not identical"
assert r1.meta == r2.meta
# make sure headers attribute is shallow copied
assert r1.headers is not r2.headers, (
"headers must be a shallow copy, not identical"
)
assert r1.headers == r2.headers
assert r1.encoding == r2.encoding
assert r1.dont_filter == r2.dont_filter
# Request.body can be identical since it's an immutable object (str)
def test_copy_inherited_classes(self):
"""Test Request children copies preserve their class"""
class CustomRequest(self.request_class):
pass
r1 = CustomRequest("http://www.example.com")
r2 = r1.copy()
assert isinstance(r2, CustomRequest)
def test_replace(self):
"""Test Request.replace() method"""
r1 = self.request_class("http://www.example.com", method="GET")
hdrs = Headers(r1.headers)
hdrs[b"key"] = b"value"
r2 = r1.replace(method="POST", body="New body", headers=hdrs)
assert r1.url == r2.url
assert (r1.method, r2.method) == ("GET", "POST")
assert (r1.body, r2.body) == (b"", b"New body")
assert (r1.headers, r2.headers) == (self.default_headers, hdrs)
# Empty attributes (which may fail if not compared properly)
r3 = self.request_class(
"http://www.example.com", meta={"a": 1}, dont_filter=True
)
r4 = r3.replace(
url="http://www.example.com/2", body=b"", meta={}, dont_filter=False
)
assert r4.url == "http://www.example.com/2"
assert r4.body == b""
assert r4.meta == {}
assert r4.dont_filter is False
def test_method_always_str(self):
r = self.request_class("http://www.example.com", method="POST")
assert isinstance(r.method, str)
def test_immutable_attributes(self):
r = self.request_class("http://example.com")
with pytest.raises(AttributeError):
r.url = "http://example2.com"
with pytest.raises(AttributeError):
r.body = "xxx"
def test_callback_and_errback(self):
def a_function():
pass
r1 = self.request_class("http://example.com")
assert r1.callback is None
assert r1.errback is None
r2 = self.request_class("http://example.com", callback=a_function)
assert r2.callback is a_function
assert r2.errback is None
r3 = self.request_class("http://example.com", errback=a_function)
assert r3.callback is None
assert r3.errback is a_function
r4 = self.request_class(
url="http://example.com",
callback=a_function,
errback=a_function,
)
assert r4.callback is a_function
assert r4.errback is a_function
r5 = self.request_class(
url="http://example.com",
callback=NO_CALLBACK,
errback=NO_CALLBACK,
)
assert r5.callback is NO_CALLBACK
assert r5.errback is NO_CALLBACK
def test_callback_and_errback_type(self):
with pytest.raises(TypeError):
self.request_class("http://example.com", callback="a_function")
with pytest.raises(TypeError):
self.request_class("http://example.com", errback="a_function")
with pytest.raises(TypeError):
self.request_class(
url="http://example.com",
callback="a_function",
errback="a_function",
)
def test_no_callback(self):
with pytest.raises(RuntimeError):
NO_CALLBACK()
def test_from_curl(self):
# Note: more curated tests regarding curl conversion are in
# `test_utils_curl.py`
curl_command = (
"curl 'http://httpbin.org/post' -X POST -H 'Cookie: _gauges_unique"
"_year=1; _gauges_unique=1; _gauges_unique_month=1; _gauges_unique"
"_hour=1; _gauges_unique_day=1' -H 'Origin: http://httpbin.org' -H"
" 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: en-US,en;q"
"=0.9,ru;q=0.8,es;q=0.7' -H 'Upgrade-Insecure-Requests: 1' -H 'Use"
"r-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTM"
"L, like Gecko) Ubuntu Chromium/62.0.3202.75 Chrome/62.0.3202.75 S"
"afari/537.36' -H 'Content-Type: application /x-www-form-urlencode"
"d' -H 'Accept: text/html,application/xhtml+xml,application/xml;q="
"0.9,image/webp,image/apng,*/*;q=0.8' -H 'Cache-Control: max-age=0"
"' -H 'Referer: http://httpbin.org/forms/post' -H 'Connection: kee"
"p-alive' --data 'custname=John+Smith&custtel=500&custemail=jsmith"
"%40example.org&size=small&topping=cheese&topping=onion&delivery=1"
"2%3A15&comments=' --compressed"
)
r = self.request_class.from_curl(curl_command)
assert r.method == "POST"
assert r.url == "http://httpbin.org/post"
assert (
r.body == b"custname=John+Smith&custtel=500&custemail=jsmith%40"
b"example.org&size=small&topping=cheese&topping=onion"
b"&delivery=12%3A15&comments="
)
assert r.cookies == {
"_gauges_unique_year": "1",
"_gauges_unique": "1",
"_gauges_unique_month": "1",
"_gauges_unique_hour": "1",
"_gauges_unique_day": "1",
}
assert r.headers == {
b"Origin": [b"http://httpbin.org"],
b"Accept-Encoding": [b"gzip, deflate"],
b"Accept-Language": [b"en-US,en;q=0.9,ru;q=0.8,es;q=0.7"],
b"Upgrade-Insecure-Requests": [b"1"],
b"User-Agent": [
b"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537."
b"36 (KHTML, like Gecko) Ubuntu Chromium/62.0.3202"
b".75 Chrome/62.0.3202.75 Safari/537.36"
],
b"Content-Type": [b"application /x-www-form-urlencoded"],
b"Accept": [
b"text/html,application/xhtml+xml,application/xml;q=0."
b"9,image/webp,image/apng,*/*;q=0.8"
],
b"Cache-Control": [b"max-age=0"],
b"Referer": [b"http://httpbin.org/forms/post"],
b"Connection": [b"keep-alive"],
}
def test_from_curl_with_kwargs(self):
r = self.request_class.from_curl(
'curl -X PATCH "http://example.org"', method="POST", meta={"key": "value"}
)
assert r.method == "POST"
assert r.meta == {"key": "value"}
def test_from_curl_ignore_unknown_options(self):
# By default: it works and ignores the unknown options: --foo and -z
with warnings.catch_warnings(): # avoid warning when executing tests
warnings.simplefilter("ignore")
r = self.request_class.from_curl(
'curl -X DELETE "http://example.org" --foo -z',
)
assert r.method == "DELETE"
# If `ignore_unknown_options` is set to `False` it raises an error with
# the unknown options: --foo and -z
with pytest.raises(ValueError, match="Unrecognized options:"):
self.request_class.from_curl(
'curl -X PATCH "http://example.org" --foo -z',
ignore_unknown_options=False,
)
|
TestRequest
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/key_binding/key_bindings.py
|
{
"start": 4516,
"end": 5987
}
|
class ____(metaclass=ABCMeta):
"""
Interface for a KeyBindings.
"""
@property
@abstractmethod
def _version(self) -> Hashable:
"""
For cache invalidation. - This should increase every time that
something changes.
"""
return 0
@abstractmethod
def get_bindings_for_keys(self, keys: KeysTuple) -> list[Binding]:
"""
Return a list of key bindings that can handle these keys.
(This return also inactive bindings, so the `filter` still has to be
called, for checking it.)
:param keys: tuple of keys.
"""
return []
@abstractmethod
def get_bindings_starting_with_keys(self, keys: KeysTuple) -> list[Binding]:
"""
Return a list of key bindings that handle a key sequence starting with
`keys`. (It does only return bindings for which the sequences are
longer than `keys`. And like `get_bindings_for_keys`, it also includes
inactive bindings.)
:param keys: tuple of keys.
"""
return []
@property
@abstractmethod
def bindings(self) -> list[Binding]:
"""
List of `Binding` objects.
(These need to be exposed, so that `KeyBindings` objects can be merged
together.)
"""
return []
# `add` and `remove` don't have to be part of this interface.
T = TypeVar("T", bound=Union[KeyHandlerCallable, Binding])
|
KeyBindingsBase
|
python
|
huggingface__transformers
|
src/transformers/models/data2vec/modeling_data2vec_audio.py
|
{
"start": 26795,
"end": 32762
}
|
class ____(Data2VecAudioPreTrainedModel):
def __init__(self, config: Data2VecAudioConfig):
super().__init__(config)
self.config = config
self.feature_extractor = Data2VecAudioFeatureEncoder(config)
self.feature_projection = Data2VecAudioFeatureProjection(config)
# model only needs masking vector if mask prob is > 0.0
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.encoder = Data2VecAudioEncoder(config)
self.adapter = Data2VecAudioAdapter(config) if config.add_adapter else None
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.feature_extractor._freeze_parameters()
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Data2VecAudioBaseModelOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(
extract_features.shape[1], attention_mask, add_adapter=False
)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if self.adapter is not None:
hidden_states = self.adapter(hidden_states)
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Data2VecAudioBaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
_HIDDEN_STATES_START_POSITION = 2
@auto_docstring(
custom_intro="""
Data2VecAudio Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
"""
)
|
Data2VecAudioModel
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_daemon/daemon.py
|
{
"start": 14703,
"end": 15134
}
|
class ____(IntervalDaemon):
@classmethod
def daemon_type(cls) -> str:
return "MONITORING"
def run_iteration(
self,
workspace_process_context: IWorkspaceProcessContext,
) -> DaemonIterator:
yield from execute_run_monitoring_iteration(workspace_process_context, self._logger)
yield from execute_concurrency_slots_iteration(workspace_process_context, self._logger)
|
MonitoringDaemon
|
python
|
doocs__leetcode
|
solution/1900-1999/1992.Find All Groups of Farmland/Solution.py
|
{
"start": 0,
"end": 671
}
|
class ____:
def findFarmland(self, land: List[List[int]]) -> List[List[int]]:
m, n = len(land), len(land[0])
ans = []
for i in range(m):
for j in range(n):
if (
land[i][j] == 0
or (j > 0 and land[i][j - 1] == 1)
or (i > 0 and land[i - 1][j] == 1)
):
continue
x, y = i, j
while x + 1 < m and land[x + 1][j] == 1:
x += 1
while y + 1 < n and land[x][y + 1] == 1:
y += 1
ans.append([i, j, x, y])
return ans
|
Solution
|
python
|
python__mypy
|
mypy/patterns.py
|
{
"start": 2857,
"end": 3339
}
|
class ____(Pattern):
keys: list[Expression]
values: list[Pattern]
rest: NameExpr | None
def __init__(
self, keys: list[Expression], values: list[Pattern], rest: NameExpr | None
) -> None:
super().__init__()
assert len(keys) == len(values)
self.keys = keys
self.values = values
self.rest = rest
def accept(self, visitor: PatternVisitor[T]) -> T:
return visitor.visit_mapping_pattern(self)
|
MappingPattern
|
python
|
django__django
|
tests/generic_views/views.py
|
{
"start": 7342,
"end": 7427
}
|
class ____(BookSigningConfig, generic.YearArchiveView):
pass
|
BookSigningYearArchive
|
python
|
pytorch__pytorch
|
test/dynamo/test_bytecode_utils.py
|
{
"start": 21015,
"end": 21659
}
|
class ____(torch._dynamo.test_case.TestCase):
def test_bytecode_hook(self):
def fn(a, b):
return a - b * 10
def hook(code, out_code):
print(code)
print(out_code)
return code
torch._dynamo.reset()
handle = torch._dynamo.convert_frame.register_bytecode_hook(hook)
try:
opt_fn = torch.compile(fn)
for i in range(2, 12):
opt_fn(torch.randn(i), torch.randn(i))
finally:
handle.remove()
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
BytecodeHookTests
|
python
|
encode__django-rest-framework
|
tests/test_throttling.py
|
{
"start": 1479,
"end": 1630
}
|
class ____(APIView):
throttle_classes = (User3MinRateThrottle,)
def get(self, request):
return Response('foo')
|
MockView_MinuteThrottling
|
python
|
scipy__scipy
|
scipy/special/tests/test_basic.py
|
{
"start": 172350,
"end": 172676
}
|
class ____:
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_allclose(obl, array([-0.348602,
1.393206,
5.486800,
11.492120]),
atol=1.5e-5, rtol=0)
|
TestOblCvSeq
|
python
|
bokeh__bokeh
|
src/bokeh/models/dom.py
|
{
"start": 3635,
"end": 3821
}
|
class ____(Model, Qualified):
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
|
Action
|
python
|
doocs__leetcode
|
solution/2600-2699/2697.Lexicographically Smallest Palindrome/Solution.py
|
{
"start": 0,
"end": 245
}
|
class ____:
def makeSmallestPalindrome(self, s: str) -> str:
cs = list(s)
i, j = 0, len(s) - 1
while i < j:
cs[i] = cs[j] = min(cs[i], cs[j])
i, j = i + 1, j - 1
return "".join(cs)
|
Solution
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/assertsql.py
|
{
"start": 12801,
"end": 13652
}
|
class ____(AssertRule):
def __init__(self, *rules):
self.rules = list(rules)
def process_statement(self, execute_observed):
if not self.rules:
self.is_consumed = True
self.consume_statement = False
while self.rules:
rule = self.rules[0]
rule.process_statement(execute_observed)
if rule.is_consumed:
self.rules.pop(0)
elif rule.errormessage:
self.errormessage = rule.errormessage
if rule.consume_statement:
break
if not self.rules:
self.is_consumed = True
def no_more_statements(self):
if self.rules and not self.rules[0].is_consumed:
self.rules[0].no_more_statements()
elif self.rules:
super().no_more_statements()
|
EachOf
|
python
|
ansible__ansible
|
lib/ansible/modules/hostname.py
|
{
"start": 23102,
"end": 23243
}
|
class ____(Hostname):
platform = 'Linux'
distribution = 'Cloudlinuxserver'
strategy_class = RedHatStrategy
|
CloudlinuxserverHostname
|
python
|
django-extensions__django-extensions
|
tests/management/commands/shell_plus_tests/test_import_subclasses.py
|
{
"start": 614,
"end": 5748
}
|
class ____(AutomaticShellPlusImportsTestCase):
def test_imports_no_subclasses(self):
self.assert_imports()
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[],
)
def test_imports_empty_list(self):
self.assert_imports()
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[BaseIncludedClass],
)
def test_imports_one_base_class(self):
self.assert_imports(
first={"FirstDerivedClass"},
second={"SecondDerivedClass"},
fourth={"FourthDerivedClass"},
)
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[
"tests.testapp.classes_to_include.BaseIncludedClass"
],
)
def test_imports_one_base_class_as_string(self):
self.assert_imports(
first={"FirstDerivedClass"},
second={"SecondDerivedClass"},
fourth={"FourthDerivedClass"},
)
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[IncludedMixin],
)
def test_imports_one_base_mixin(self):
self.assert_imports(
first={"FirstDerivedClass"},
third={"ThirdDerivedClass"},
)
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[BaseIncludedClass, IncludedMixin],
)
def test_imports_two_base_classes(self):
self.assert_imports(
first={"FirstDerivedClass"},
second={"SecondDerivedClass"},
third={"ThirdDerivedClass"},
fourth={"FourthDerivedClass"},
)
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[BaseIncludedClass, IncludedMixin],
SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST=settings.SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST
+ [
"tests.testapp",
],
)
def test_imports_two_base_classes_exclude_testapp(self):
self.assert_imports(
fourth={"FourthDerivedClass"},
)
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[BaseIncludedClass, IncludedMixin],
SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST=settings.SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST
+ [
"tests.testapp.derived_classes_for_testing",
],
)
def test_imports_two_base_classes_exclude_derived_class_for_testing(self):
self.assert_imports(
first={"FirstDerivedClass"},
fourth={"FourthDerivedClass"},
)
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[BaseIncludedClass, IncludedMixin],
SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST=settings.SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST
+ [
"tests.testapp.derived_classes_for_testing.test_module",
],
)
def test_imports_two_base_classes_exclude_test_module(self):
self.assert_imports(
first={"FirstDerivedClass"},
second={"SecondDerivedClass"},
fourth={"FourthDerivedClass"},
)
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[BaseIncludedClass, IncludedMixin],
SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST=settings.SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST
+ [
"tests.test_module_in_project_dir",
],
)
def test_imports_two_base_classes_exclude_classes_in_project_dir(self):
self.assert_imports(
first={"FirstDerivedClass"},
second={"SecondDerivedClass"},
third={"ThirdDerivedClass"},
)
@override_settings(
SHELL_PLUS_SUBCLASSES_IMPORT=[BaseIncludedClass, IncludedMixin],
SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST=settings.SHELL_PLUS_SUBCLASSES_IMPORT_MODULES_BLACKLIST
+ [
"tests.testapp.classes_to_include",
],
)
def test_imports_two_base_classes_exclude_classes_in_classes_to_include(self):
self.assert_imports(
second={"SecondDerivedClass"},
third={"ThirdDerivedClass"},
fourth={"FourthDerivedClass"},
)
def assert_imports(self, first=None, second=None, third=None, fourth=None):
"""
Auxiliary assertion which checks are classes imported under names.
:param first: set of expected names under which FirstDerivedClass should be available
:param second: set of expected names under which SecondDerivedClass should be available
:param third: set of expected names under which ThirdDerivedClass should be available
:param fourth: set of expected names under which FourthDerivedClass should be available
"""
# type: (Optional[Set[str]], Optional[Set[str]], Optional[Set[str]], Optional[Set[str]]) -> ()
self.run_shell_plus()
self.assert_imported_under_names(FirstDerivedClass, first or set())
self.assert_imported_under_names(SecondDerivedClass, second or set())
self.assert_imported_under_names(ThirdDerivedClass, third or set())
self.assert_imported_under_names(FourthDerivedClass, fourth or set())
self.assert_imported_under_names(ClassWhichShouldNotBeImported, set())
|
ImportSubclassesTestCase
|
python
|
docker__docker-py
|
docker/api/service.py
|
{
"start": 5046,
"end": 19215
}
|
class ____:
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
update_config=None, networks=None, endpoint_config=None,
endpoint_spec=None, rollback_config=None
):
"""
Create a service.
Args:
task_template (TaskTemplate): Specification of the task to start as
part of the new service.
name (string): User-defined name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
Returns:
A dictionary containing an ``ID`` key for the newly created
service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
_check_api_features(
self._version, task_template, update_config, endpoint_spec,
rollback_config
)
url = self._url('/services/create')
headers = {}
image = task_template.get('ContainerSpec', {}).get('Image', None)
if image is None:
raise errors.DockerException(
'Missing mandatory Image key in ContainerSpec'
)
if mode and not isinstance(mode, dict):
mode = ServiceMode(mode)
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
if utils.version_lt(self._version, '1.25'):
networks = networks or task_template.pop('Networks', None)
data = {
'Name': name,
'Labels': labels,
'TaskTemplate': task_template,
'Mode': mode,
'Networks': utils.convert_service_networks(networks),
'EndpointSpec': endpoint_spec
}
if update_config is not None:
data['UpdateConfig'] = update_config
if rollback_config is not None:
data['RollbackConfig'] = rollback_config
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def inspect_service(self, service, insert_defaults=None):
"""
Return information about a service.
Args:
service (str): Service name or ID.
insert_defaults (boolean): If true, default values will be merged
into the service inspect output.
Returns:
(dict): A dictionary of the server-side representation of the
service, including all relevant properties.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/services/{0}', service)
params = {}
if insert_defaults is not None:
if utils.version_lt(self._version, '1.29'):
raise errors.InvalidVersion(
'insert_defaults is not supported in API version < 1.29'
)
params['insertDefaults'] = insert_defaults
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
@utils.check_resource('task')
def inspect_task(self, task):
"""
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def remove_service(self, service):
"""
Stop and remove a service.
Args:
service (str): Service name or ID
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
return True
@utils.minimum_version('1.24')
def services(self, filters=None, status=None):
"""
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``.
status (bool): Include the service task count of running and
desired tasks. Default: ``None``.
Returns:
A list of dictionaries containing data about each service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
if status is not None:
if utils.version_lt(self._version, '1.41'):
raise errors.InvalidVersion(
'status is not supported in API version < 1.41'
)
params['status'] = status
url = self._url('/services')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.25')
@utils.check_resource('service')
def service_logs(self, service, details=False, follow=False, stdout=False,
stderr=False, since=0, timestamps=False, tail='all',
is_tty=None):
"""
Get log stream for a service.
Note: This endpoint works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
service (str): ID or name of the service
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
is_tty (bool): Whether the service's :py:class:`ContainerSpec`
enables the TTY option. If omitted, the method will query
the Engine for the information, causing an additional
roundtrip.
Returns (generator): Logs for the service.
"""
params = {
'details': details,
'follow': follow,
'stdout': stdout,
'stderr': stderr,
'since': since,
'timestamps': timestamps,
'tail': tail
}
url = self._url('/services/{0}/logs', service)
res = self._get(url, params=params, stream=True)
if is_tty is None:
is_tty = self.inspect_service(
service
)['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
return self._get_result_tty(True, res, is_tty)
@utils.minimum_version('1.24')
def tasks(self, filters=None):
"""
Retrieve a list of tasks.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``service``, ``node``,
``label`` and ``desired-state``.
Returns:
(:py:class:`list`): List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/tasks')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
endpoint_spec=None, fetch_current_spec=False,
rollback_config=None):
"""
Update a service.
Args:
service (string): A service identifier (either its name or service
ID).
version (int): The version number of the service object being
updated. This is required to avoid conflicting writes.
task_template (TaskTemplate): Specification of the updated task to
start as part of the service.
name (string): New name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``.
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
fetch_current_spec (boolean): Use the undefined settings from the
current specification of the service. Default: ``False``
Returns:
A dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
_check_api_features(
self._version, task_template, update_config, endpoint_spec,
rollback_config
)
if fetch_current_spec:
inspect_defaults = True
if utils.version_lt(self._version, '1.29'):
inspect_defaults = None
current = self.inspect_service(
service, insert_defaults=inspect_defaults
)['Spec']
else:
current = {}
url = self._url('/services/{0}/update', service)
data = {}
headers = {}
data['Name'] = current.get('Name') if name is None else name
data['Labels'] = current.get('Labels') if labels is None else labels
if mode is not None:
if not isinstance(mode, dict):
mode = ServiceMode(mode)
data['Mode'] = mode
else:
data['Mode'] = current.get('Mode')
data['TaskTemplate'] = _merge_task_template(
current.get('TaskTemplate', {}), task_template
)
container_spec = data['TaskTemplate'].get('ContainerSpec', {})
image = container_spec.get('Image', None)
if image is not None:
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
if update_config is not None:
data['UpdateConfig'] = update_config
else:
data['UpdateConfig'] = current.get('UpdateConfig')
if rollback_config is not None:
data['RollbackConfig'] = rollback_config
else:
data['RollbackConfig'] = current.get('RollbackConfig')
if networks is not None:
converted_networks = utils.convert_service_networks(networks)
if utils.version_lt(self._version, '1.25'):
data['Networks'] = converted_networks
else:
data['TaskTemplate']['Networks'] = converted_networks
elif utils.version_lt(self._version, '1.25'):
data['Networks'] = current.get('Networks')
elif data['TaskTemplate'].get('Networks') is None:
current_task_template = current.get('TaskTemplate', {})
current_networks = current_task_template.get('Networks')
if current_networks is None:
current_networks = current.get('Networks')
if current_networks is not None:
data['TaskTemplate']['Networks'] = current_networks
if endpoint_spec is not None:
data['EndpointSpec'] = endpoint_spec
else:
data['EndpointSpec'] = current.get('EndpointSpec')
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
)
return self._result(resp, json=True)
|
ServiceApiMixin
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/bigtable.py
|
{
"start": 1660,
"end": 1855
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Bigtable Tables link."""
name = "Bigtable Tables"
key = "tables_key"
format_str = BIGTABLE_TABLES_LINK
|
BigtableTablesLink
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/feature_preprocessing/random_trees_embedding.py
|
{
"start": 534,
"end": 4586
}
|
class ____(AutoSklearnPreprocessingAlgorithm):
def __init__(
self,
n_estimators,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_leaf_nodes,
bootstrap,
sparse_output=True,
n_jobs=1,
random_state=None,
):
self.n_estimators = n_estimators
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_leaf_nodes = max_leaf_nodes
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.bootstrap = bootstrap
self.sparse_output = sparse_output
self.n_jobs = n_jobs
self.random_state = random_state
def _fit(self, X, Y=None):
import sklearn.ensemble
self.n_estimators = int(self.n_estimators)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
self.min_samples_split = int(self.min_samples_split)
self.min_samples_leaf = int(self.min_samples_leaf)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
self.bootstrap = check_for_bool(self.bootstrap)
self.preprocessor = sklearn.ensemble.RandomTreesEmbedding(
n_estimators=self.n_estimators,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
max_leaf_nodes=self.max_leaf_nodes,
sparse_output=self.sparse_output,
n_jobs=self.n_jobs,
random_state=self.random_state,
)
self.preprocessor.fit(X, Y)
return self
def fit(self, X, y):
self._fit(X)
return self
def fit_transform(self, X, y=None):
return self._fit(X)
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "RandomTreesEmbedding",
"name": "Random Trees Embedding",
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (SPARSE, SIGNED_DATA),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
n_estimators = UniformIntegerHyperparameter(
name="n_estimators", lower=10, upper=100, default_value=10
)
max_depth = UniformIntegerHyperparameter(
name="max_depth", lower=2, upper=10, default_value=5
)
min_samples_split = UniformIntegerHyperparameter(
name="min_samples_split", lower=2, upper=20, default_value=2
)
min_samples_leaf = UniformIntegerHyperparameter(
name="min_samples_leaf", lower=1, upper=20, default_value=1
)
min_weight_fraction_leaf = Constant("min_weight_fraction_leaf", 1.0)
max_leaf_nodes = UnParametrizedHyperparameter(
name="max_leaf_nodes", value="None"
)
bootstrap = CategoricalHyperparameter("bootstrap", ["True", "False"])
cs = ConfigurationSpace()
cs.add_hyperparameters(
[
n_estimators,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_leaf_nodes,
bootstrap,
]
)
return cs
|
RandomTreesEmbedding
|
python
|
pola-rs__polars
|
py-polars/src/polars/_typing.py
|
{
"start": 1374,
"end": 1588
}
|
class ____(Protocol):
"""Type protocol for Arrow C Stream Interface via Arrow PyCapsule Interface."""
def __arrow_c_stream__(self, requested_schema: object | None = None) -> object: ...
|
ArrowStreamExportable
|
python
|
kamyu104__LeetCode-Solutions
|
Python/h-index-ii.py
|
{
"start": 32,
"end": 440
}
|
class ____(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
n = len(citations)
left, right = 0, n - 1
while left <= right:
mid = (left + right) / 2
if citations[mid] >= n - mid:
right = mid - 1
else:
left = mid + 1
return n - left
|
Solution
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/testing/suite/test_dialect.py
|
{
"start": 16110,
"end": 19453
}
|
class ____(fixtures.TestBase):
__backend__ = True
tough_parameters = testing.combinations(
("boring",),
("per cent",),
("per % cent",),
("%percent",),
("par(ens)",),
("percent%(ens)yah",),
("col:ons",),
("_starts_with_underscore",),
("dot.s",),
("more :: %colons%",),
("_name",),
("___name",),
("[BracketsAndCase]",),
("42numbers",),
("percent%signs",),
("has spaces",),
("/slashes/",),
("more/slashes",),
("q?marks",),
("1param",),
("1col:on",),
argnames="paramname",
)
@tough_parameters
@config.requirements.unusual_column_name_characters
def test_round_trip_same_named_column(
self, paramname, connection, metadata
):
name = paramname
t = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column(name, String(50), nullable=False),
)
# table is created
t.create(connection)
# automatic param generated by insert
connection.execute(t.insert().values({"id": 1, name: "some name"}))
# automatic param generated by criteria, plus selecting the column
stmt = select(t.c[name]).where(t.c[name] == "some name")
eq_(connection.scalar(stmt), "some name")
# use the name in a param explicitly
stmt = select(t.c[name]).where(t.c[name] == bindparam(name))
row = connection.execute(stmt, {name: "some name"}).first()
# name works as the key from cursor.description
eq_(row._mapping[name], "some name")
# use expanding IN
stmt = select(t.c[name]).where(
t.c[name].in_(["some name", "some other_name"])
)
connection.execute(stmt).first()
@testing.fixture
def multirow_fixture(self, metadata, connection):
mytable = Table(
"mytable",
metadata,
Column("myid", Integer),
Column("name", String(50)),
Column("desc", String(50)),
)
mytable.create(connection)
connection.execute(
mytable.insert(),
[
{"myid": 1, "name": "a", "desc": "a_desc"},
{"myid": 2, "name": "b", "desc": "b_desc"},
{"myid": 3, "name": "c", "desc": "c_desc"},
{"myid": 4, "name": "d", "desc": "d_desc"},
],
)
yield mytable
@tough_parameters
def test_standalone_bindparam_escape(
self, paramname, connection, multirow_fixture
):
tbl1 = multirow_fixture
stmt = select(tbl1.c.myid).where(
tbl1.c.name == bindparam(paramname, value="x")
)
res = connection.scalar(stmt, {paramname: "c"})
eq_(res, 3)
@tough_parameters
def test_standalone_bindparam_escape_expanding(
self, paramname, connection, multirow_fixture
):
tbl1 = multirow_fixture
stmt = (
select(tbl1.c.myid)
.where(tbl1.c.name.in_(bindparam(paramname, value=["a", "b"])))
.order_by(tbl1.c.myid)
)
res = connection.scalars(stmt, {paramname: ["d", "a"]}).all()
eq_(res, [1, 4])
|
DifficultParametersTest
|
python
|
pytorch__pytorch
|
torch/_inductor/mock_cache.py
|
{
"start": 3690,
"end": 4663
}
|
class ____(RemoteCacheBackend[Any]):
def __init__(self, name: str) -> None:
self._name = name
@staticmethod
def with_name(name: str) -> Callable[[], MockBackend]:
def wrapper() -> MockBackend:
return MockBackend(name)
return wrapper
@override
def _get(self, key: str) -> Optional[Any]:
stat = global_stats.get_stat(self._name)
if key in stat.cache:
stat += Stats(num_get_hit=1)
return stat.cache.get(key)
else:
stat += Stats(num_get_miss=1)
return None
@override
def _put(self, key: str, data: Any) -> None:
stat = global_stats.get_stat(self._name)
stat += Stats(num_put=1)
stat.cache[key] = data
# List of configs for each cache
_CACHE_CONFIG_EN = (
"fx_graph_cache",
"fx_graph_remote_cache",
"autotune_local_cache",
"autotune_remote_cache",
"bundled_autotune_remote_cache",
)
|
MockBackend
|
python
|
tensorflow__tensorflow
|
tensorflow/python/debug/cli/debugger_cli_common_test.py
|
{
"start": 40671,
"end": 41508
}
|
class ____(test_util.TensorFlowTestCase):
def testCommandTypeConstructorSucceeds(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertEqual("water flower", menu_node.caption)
self.assertEqual("water_flower", menu_node.content)
def testDisableWorks(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertTrue(menu_node.is_enabled())
menu_node.disable()
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
def testConstructAsDisabledWorks(self):
menu_node = debugger_cli_common.MenuItem(
"water flower", "water_flower", enabled=False)
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
|
MenuNodeTest
|
python
|
ray-project__ray
|
python/ray/_private/event/event_logger.py
|
{
"start": 469,
"end": 6184
}
|
class ____:
def __init__(self, source: Event.SourceType, logger: logging.Logger):
"""Adapter for the Python logger that's used to emit events.
When events are emitted, they are aggregated and available via
state API and dashboard.
This class is thread-safe.
"""
self.logger = logger
# Aligned with `event.proto`'s `message Event``
self.source = source
self.source_hostname = socket.gethostname()
self.source_pid = os.getpid()
# The below fields must be protected by this lock.
self.lock = threading.Lock()
# {str -> str} typed dict
self.global_context = {}
def set_global_context(self, global_context: Dict[str, str] = None):
"""Set the global metadata.
This method overwrites the global metadata if it is called more than once.
"""
with self.lock:
self.global_context = {} if not global_context else global_context
def trace(self, message: str, **kwargs):
self._emit(Event.Severity.TRACE, message, **kwargs)
def debug(self, message: str, **kwargs):
self._emit(Event.Severity.DEBUG, message, **kwargs)
def info(self, message: str, **kwargs):
self._emit(Event.Severity.INFO, message, **kwargs)
def warning(self, message: str, **kwargs):
self._emit(Event.Severity.WARNING, message, **kwargs)
def error(self, message: str, **kwargs):
self._emit(Event.Severity.ERROR, message, **kwargs)
def fatal(self, message: str, **kwargs):
self._emit(Event.Severity.FATAL, message, **kwargs)
def _emit(self, severity: Event.Severity, message: str, **kwargs):
# NOTE: Python logger is thread-safe,
# so we don't need to protect it using locks.
event = Event()
event.event_id = get_event_id()
event.timestamp = int(datetime.now().timestamp())
event.message = message
event.severity = severity
# TODO(sang): Support event type & schema.
event.label = ""
event.source_type = self.source
event.source_hostname = self.source_hostname
event.source_pid = self.source_pid
custom_fields = event.custom_fields
with self.lock:
for k, v in self.global_context.items():
if v is not None and k is not None:
custom_fields[k] = v
for k, v in kwargs.items():
if v is not None and k is not None:
custom_fields[k] = v
self.logger.info(
json.dumps(
message_to_dict(
event,
always_print_fields_with_no_presence=True,
preserving_proto_field_name=True,
use_integers_for_enums=False,
)
)
)
# Force flush so that we won't lose events
self.logger.handlers[0].flush()
def _build_event_file_logger(source: Event.SourceType, sink_dir: str):
logger = logging.getLogger("_ray_event_logger")
logger.setLevel(logging.INFO)
dir_path = pathlib.Path(sink_dir) / "events"
filepath = dir_path / f"event_{source}.log"
dir_path.mkdir(exist_ok=True)
filepath.touch(exist_ok=True)
# Configure the logger.
handler = logging.FileHandler(filepath)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
return logger
# This lock must be used when accessing or updating global event logger dict.
_event_logger_lock = threading.Lock()
_event_logger = {}
def get_event_logger(source: Event.SourceType, sink_dir: str):
"""Get the event logger of the current process.
There's only 1 event logger per (process, source).
TODO(sang): Support more impl than file-based logging.
Currently, the interface also ties to the
file-based logging impl.
Args:
source: The source of the event.
sink_dir: The directory to sink event logs.
"""
with _event_logger_lock:
global _event_logger
source_name = Event.SourceType.Name(source)
if source_name not in _event_logger:
logger = _build_event_file_logger(source_name, sink_dir)
_event_logger[source_name] = EventLoggerAdapter(source, logger)
return _event_logger[source_name]
def parse_event(event_str: str) -> Optional[Event]:
"""Parse an event from a string.
Args:
event_str: The string to parse. Expect to be a JSON serialized
Event protobuf.
Returns:
The parsed event if parsable, else None
"""
try:
return Parse(event_str, Event())
except Exception:
global_logger.exception(f"Failed to parse event: {event_str}")
return None
def filter_event_by_level(event: Event, filter_event_level: str) -> bool:
"""Filter an event based on event level.
Args:
event: The event to filter.
filter_event_level: The event level string to filter by. Any events
that are lower than this level will be filtered.
Returns:
True if the event should be filtered, else False.
"""
event_levels = {
Event.Severity.TRACE: 0,
Event.Severity.DEBUG: 1,
Event.Severity.INFO: 2,
Event.Severity.WARNING: 3,
Event.Severity.ERROR: 4,
Event.Severity.FATAL: 5,
}
filter_event_level = filter_event_level.upper()
filter_event_level = Event.Severity.Value(filter_event_level)
if event_levels[event.severity] < event_levels[filter_event_level]:
return True
return False
|
EventLoggerAdapter
|
python
|
jina-ai__jina
|
tests/integration/docarray_v2/test_issues.py
|
{
"start": 2966,
"end": 5672
}
|
class ____(Executor):
@requests(on="/stream")
async def stream(
self,
doc: InputWithComplexFields,
parameters: Optional[Dict] = None,
**kwargs,
) -> InputWithComplexFields:
for i in range(4):
yield InputWithComplexFields(text=f"hello world {doc.text} {i}")
@requests(on="/stream-simple")
async def stream_simple(
self,
doc: SimpleInput,
parameters: Optional[Dict] = None,
**kwargs,
) -> SimpleInput:
for i in range(4):
yield SimpleInput(text=f"hello world {doc.text} {i}")
@pytest.fixture(scope="module")
def streaming_deployment():
protocol = "http"
with Deployment(uses=MyExecutor, protocol=protocol) as dep:
yield dep
@pytest.mark.asyncio
async def test_issue_6090(streaming_deployment):
"""Tests if streaming works with pydantic models with complex fields which are not
str, int, or float.
"""
docs = []
protocol = "http"
client = Client(port=streaming_deployment.port, protocol=protocol, asyncio=True)
example_doc = InputWithComplexFields(text="my input text")
async for doc in client.stream_doc(
on="/stream",
inputs=example_doc,
input_type=InputWithComplexFields,
return_type=InputWithComplexFields,
):
docs.append(doc)
assert [d.text for d in docs] == [
'hello world my input text 0',
'hello world my input text 1',
'hello world my input text 2',
'hello world my input text 3',
]
assert docs[0].nested_field.name == "test_name"
@pytest.mark.asyncio
async def test_issue_6090_get_params(streaming_deployment):
"""Tests if streaming works with pydantic models with complex fields which are not
str, int, or float.
"""
docs = []
url = (
f"http://localhost:{streaming_deployment.port}/stream-simple?text=my_input_text"
)
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
async for chunk in resp.content.iter_any():
events = chunk.split(b'event: ')[1:]
for event in events:
if event.startswith(b'update'):
parsed = event[HTTPClientlet.UPDATE_EVENT_PREFIX :].decode()
parsed = SimpleInput.parse_raw(parsed)
docs.append(parsed)
elif event.startswith(b'end'):
pass
assert [d.text for d in docs] == [
'hello world my_input_text 0',
'hello world my_input_text 1',
'hello world my_input_text 2',
'hello world my_input_text 3',
]
|
MyExecutor
|
python
|
bokeh__bokeh
|
src/bokeh/models/widgets/inputs.py
|
{
"start": 14391,
"end": 15406
}
|
class ____(InputWidget):
''' Single-select widget.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
options = Either(Options, OptionsGroups, help="""
Available selection options.
Options may be provided either as a list of possible string values, which
also act as options' labels, or as a list of tuples, each of the form
``(value, label)``, where ``value`` can be of any type, not necessarily
a string. In the latter case, the visible widget text for each value will
be corresponding given label.
Option groupings can be provided by supplying a dictionary object whose
values are in the aforementioned list format.
""").accepts(List(Either(Null, String)), lambda v: [ NotSelected if item is None else item for item in v ])
value = AnyRef(default=NotSelected, help="""
Initial or selected value.
""").accepts(Null, lambda _: NotSelected)
|
Select
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/strategies/test_model_parallel_integration.py
|
{
"start": 1069,
"end": 2954
}
|
class ____(nn.Module):
def __init__(self):
super().__init__()
self.w1 = nn.Linear(32, 64)
self.w2 = nn.Linear(32, 64)
self.w3 = nn.Linear(64, 32)
def forward(self, x):
return self.w3(F.silu(self.w1(x)) * self.w2(x))
def _parallelize_feed_forward_tp(model, device_mesh):
from torch.distributed.tensor.parallel import ColwiseParallel, RowwiseParallel, parallelize_module
tp_mesh = device_mesh["tensor_parallel"]
tp_plan = {
"w1": ColwiseParallel(),
"w2": ColwiseParallel(),
"w3": RowwiseParallel(),
}
parallelize_module(model, tp_mesh, tp_plan)
return model
def _parallelize_feed_forward_fsdp2(model, device_mesh):
from torch.distributed._composable.fsdp.fully_shard import fully_shard
dp_mesh = device_mesh["data_parallel"]
assert dp_mesh.ndim == 1 # Hybrid-sharding not supported
# Fully-shard each layer
fully_shard(model.w1, mesh=dp_mesh)
fully_shard(model.w2, mesh=dp_mesh)
fully_shard(model.w3, mesh=dp_mesh)
# TODO: Re-enable activation checkpointing
# Currently, state dict keys get prefixed with '_checkpoint_wrapper' in the keys
# which leads to mismatches when loading weights into a checkpoint-wrapped module.
# PyTorch should handle this automatically.
# model = checkpoint_wrapper(model)
return model
def _parallelize_feed_forward_fsdp2_tp(model, device_mesh):
model = _parallelize_feed_forward_tp(model, device_mesh)
return _parallelize_feed_forward_fsdp2(model, device_mesh)
def _parallelize_with_compile(parallelize):
def fn(model, device_mesh):
model = parallelize(model, device_mesh)
return torch.compile(model)
return fn
@pytest.fixture
def distributed():
yield
if torch.distributed.is_initialized():
torch.distributed.destroy_process_group()
|
FeedForward
|
python
|
MongoEngine__mongoengine
|
mongoengine/context_managers.py
|
{
"start": 1513,
"end": 2806
}
|
class ____:
"""switch_db alias context manager.
Example ::
# Register connections
register_connection('default', 'mongoenginetest')
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group(name='test').save() # Saves in the default db
with switch_db(Group, 'testdb-1') as Group:
Group(name='hello testdb!').save() # Saves in testdb-1
"""
def __init__(self, cls, db_alias):
"""Construct the switch_db context manager
:param cls: the class to change the registered db
:param db_alias: the name of the specific database to use
"""
self.cls = cls
self.collection = cls._get_collection()
self.db_alias = db_alias
self.ori_db_alias = cls._meta.get("db_alias", DEFAULT_CONNECTION_NAME)
def __enter__(self):
"""Change the db_alias and clear the cached collection."""
self.cls._meta["db_alias"] = self.db_alias
self.cls._collection = None
return self.cls
def __exit__(self, t, value, traceback):
"""Reset the db_alias and collection."""
self.cls._meta["db_alias"] = self.ori_db_alias
self.cls._collection = self.collection
|
switch_db
|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/asyncpg.py
|
{
"start": 680,
"end": 6521
}
|
class ____(Integration):
identifier = "asyncpg"
origin = f"auto.db.{identifier}"
_record_params = False
def __init__(self, *, record_params: bool = False):
AsyncPGIntegration._record_params = record_params
@staticmethod
def setup_once() -> None:
# asyncpg.__version__ is a string containing the semantic version in the form of "<major>.<minor>.<patch>"
asyncpg_version = parse_version(asyncpg.__version__)
_check_minimum_version(AsyncPGIntegration, asyncpg_version)
asyncpg.Connection.execute = _wrap_execute(
asyncpg.Connection.execute,
)
asyncpg.Connection._execute = _wrap_connection_method(
asyncpg.Connection._execute
)
asyncpg.Connection._executemany = _wrap_connection_method(
asyncpg.Connection._executemany, executemany=True
)
asyncpg.Connection.cursor = _wrap_cursor_creation(asyncpg.Connection.cursor)
asyncpg.Connection.prepare = _wrap_connection_method(asyncpg.Connection.prepare)
asyncpg.connect_utils._connect_addr = _wrap_connect_addr(
asyncpg.connect_utils._connect_addr
)
T = TypeVar("T")
def _wrap_execute(f: Callable[..., Awaitable[T]]) -> Callable[..., Awaitable[T]]:
async def _inner(*args: Any, **kwargs: Any) -> T:
if sentry_sdk.get_client().get_integration(AsyncPGIntegration) is None:
return await f(*args, **kwargs)
# Avoid recording calls to _execute twice.
# Calls to Connection.execute with args also call
# Connection._execute, which is recorded separately
# args[0] = the connection object, args[1] is the query
if len(args) > 2:
return await f(*args, **kwargs)
query = args[1]
with record_sql_queries(
cursor=None,
query=query,
params_list=None,
paramstyle=None,
executemany=False,
span_origin=AsyncPGIntegration.origin,
) as span:
res = await f(*args, **kwargs)
with capture_internal_exceptions():
add_query_source(span)
return res
return _inner
SubCursor = TypeVar("SubCursor", bound=BaseCursor)
@contextlib.contextmanager
def _record(
cursor: SubCursor | None,
query: str,
params_list: tuple[Any, ...] | None,
*,
executemany: bool = False,
) -> Iterator[Span]:
integration = sentry_sdk.get_client().get_integration(AsyncPGIntegration)
if integration is not None and not integration._record_params:
params_list = None
param_style = "pyformat" if params_list else None
with record_sql_queries(
cursor=cursor,
query=query,
params_list=params_list,
paramstyle=param_style,
executemany=executemany,
record_cursor_repr=cursor is not None,
span_origin=AsyncPGIntegration.origin,
) as span:
yield span
def _wrap_connection_method(
f: Callable[..., Awaitable[T]], *, executemany: bool = False
) -> Callable[..., Awaitable[T]]:
async def _inner(*args: Any, **kwargs: Any) -> T:
if sentry_sdk.get_client().get_integration(AsyncPGIntegration) is None:
return await f(*args, **kwargs)
query = args[1]
params_list = args[2] if len(args) > 2 else None
with _record(None, query, params_list, executemany=executemany) as span:
_set_db_data(span, args[0])
res = await f(*args, **kwargs)
return res
return _inner
def _wrap_cursor_creation(f: Callable[..., T]) -> Callable[..., T]:
@ensure_integration_enabled(AsyncPGIntegration, f)
def _inner(*args: Any, **kwargs: Any) -> T: # noqa: N807
query = args[1]
params_list = args[2] if len(args) > 2 else None
with _record(
None,
query,
params_list,
executemany=False,
) as span:
_set_db_data(span, args[0])
res = f(*args, **kwargs)
span.set_data("db.cursor", res)
return res
return _inner
def _wrap_connect_addr(f: Callable[..., Awaitable[T]]) -> Callable[..., Awaitable[T]]:
async def _inner(*args: Any, **kwargs: Any) -> T:
if sentry_sdk.get_client().get_integration(AsyncPGIntegration) is None:
return await f(*args, **kwargs)
user = kwargs["params"].user
database = kwargs["params"].database
with sentry_sdk.start_span(
op=OP.DB,
name="connect",
origin=AsyncPGIntegration.origin,
) as span:
span.set_data(SPANDATA.DB_SYSTEM, "postgresql")
addr = kwargs.get("addr")
if addr:
try:
span.set_data(SPANDATA.SERVER_ADDRESS, addr[0])
span.set_data(SPANDATA.SERVER_PORT, addr[1])
except IndexError:
pass
span.set_data(SPANDATA.DB_NAME, database)
span.set_data(SPANDATA.DB_USER, user)
with capture_internal_exceptions():
sentry_sdk.add_breadcrumb(
message="connect", category="query", data=span._data
)
res = await f(*args, **kwargs)
return res
return _inner
def _set_db_data(span: Span, conn: Any) -> None:
span.set_data(SPANDATA.DB_SYSTEM, "postgresql")
addr = conn._addr
if addr:
try:
span.set_data(SPANDATA.SERVER_ADDRESS, addr[0])
span.set_data(SPANDATA.SERVER_PORT, addr[1])
except IndexError:
pass
database = conn._params.database
if database:
span.set_data(SPANDATA.DB_NAME, database)
user = conn._params.user
if user:
span.set_data(SPANDATA.DB_USER, user)
|
AsyncPGIntegration
|
python
|
PyCQA__pylint
|
tests/functional/u/used/used_before_assignment.py
|
{
"start": 402,
"end": 3677
}
|
class ____: # pylint: disable=too-few-public-methods
"""This test depends on earlier and later defined module-level functions."""
prop = property(redefine_time_import) # [used-before-assignment]
prop_defined_earlier = property(outer)
calculate(1.01, 2) # [used-before-assignment]
def calculate(value1: int, value2: float) -> int:
return value1 + value2
# pylint: disable=unused-import, wrong-import-position, import-outside-toplevel, reimported, redefined-outer-name, global-statement
import time
def redefine_time_import():
print(time.time()) # [used-before-assignment]
import time
def redefine_time_import_with_global():
global time # pylint: disable=invalid-name
print(time.time())
import time
# Control flow cases
FALSE = False
if FALSE:
VAR2 = True
if VAR2: # [used-before-assignment]
pass
if FALSE: # pylint: disable=simplifiable-if-statement
VAR3 = True
elif VAR2:
VAR3 = True
else:
VAR3 = False
if VAR3:
pass
if FALSE:
VAR4 = True
elif VAR2:
pass
else:
VAR4 = False
if VAR4: # [possibly-used-before-assignment]
pass
if FALSE:
VAR5 = True
elif VAR2:
if FALSE: # pylint: disable=simplifiable-if-statement
VAR5 = True
else:
VAR5 = True
if VAR5: # [possibly-used-before-assignment]
pass
if FALSE:
VAR6 = False
if VAR6: # [used-before-assignment]
pass
# Nested try
if FALSE:
try:
VAR7 = True
except ValueError:
pass
else:
VAR7 = False
if VAR7:
pass
if FALSE:
try:
VAR8 = True
except ValueError as ve:
print(ve)
raise
else:
VAR8 = False
if VAR8:
pass
if FALSE:
for i in range(5):
VAR9 = i
break
print(VAR9)
if FALSE:
with open(__name__, encoding='utf-8') as f:
VAR10 = __name__
print(VAR10) # [used-before-assignment]
for num in [0, 1]:
VAR11 = num
if VAR11:
VAR12 = False
print(VAR12) # [possibly-used-before-assignment]
if input("This tests terminating functions: "):
sys.exit()
else:
VAR13 = 1
print(VAR13)
def turn_on2(**kwargs):
"""https://github.com/pylint-dev/pylint/issues/7873"""
if "brightness" in kwargs:
brightness = kwargs["brightness"]
var, *args = (1, "set_dimmer_state", brightness)
else:
var, *args = (1, "restore_dimmer_state")
print(var, *args)
# Variables guarded by the same test when used.
# Always false
if 1 in []:
PERCENT = 20
SALE = True
if 1 in []:
print(PERCENT)
# Always true
if always_true := True:
ONE = 1
print(ONE if always_true else 2)
# Different test
if 1 in [1]:
print(SALE) # [used-before-assignment]
# Ambiguous, but same test
if not datetime.date.today():
WAS_TODAY = True
if not datetime.date.today():
print(WAS_TODAY)
# Different tests but same inferred values
# Need falsy values here
def give_me_zero():
return 0
def give_me_nothing():
return 0
if give_me_zero():
WE_HAVE_ZERO = True
ALL_DONE = True
if give_me_nothing():
print(WE_HAVE_ZERO)
# Different tests, different values
def give_me_none():
return None
if give_me_none():
print(ALL_DONE) # [used-before-assignment]
attr = 'test' # pylint: disable=invalid-name
|
ClassWithProperty
|
python
|
pytest-dev__pytest
|
src/_pytest/mark/expression.py
|
{
"start": 1289,
"end": 1578
}
|
class ____(enum.Enum):
LPAREN = "left parenthesis"
RPAREN = "right parenthesis"
OR = "or"
AND = "and"
NOT = "not"
IDENT = "identifier"
EOF = "end of input"
EQUAL = "="
STRING = "string literal"
COMMA = ","
@dataclasses.dataclass(frozen=True)
|
TokenType
|
python
|
pola-rs__polars
|
py-polars/src/polars/_utils/udfs.py
|
{
"start": 956,
"end": 1483
}
|
class ____(NamedTuple):
operator: str
operator_arity: int
left_operand: str
right_operand: str
from_module: str | None = None
MapTarget: TypeAlias = Literal["expr", "frame", "series"]
StackEntry: TypeAlias = Union[str, StackValue]
_MIN_PY311 = sys.version_info >= (3, 11)
_MIN_PY312 = _MIN_PY311 and sys.version_info >= (3, 12)
_MIN_PY314 = _MIN_PY312 and sys.version_info >= (3, 14)
_BYTECODE_PARSER_CACHE_: MutableMapping[
tuple[Callable[[Any], Any], str], BytecodeParser
] = LRUCache(32)
|
StackValue
|
python
|
celery__celery
|
t/unit/concurrency/test_eventlet.py
|
{
"start": 728,
"end": 1524
}
|
class ____(EventletCase):
def test_aaa_is_patched(self):
with patch('eventlet.monkey_patch', create=True) as monkey_patch:
from celery import maybe_patch_concurrency
maybe_patch_concurrency(['x', '-P', 'eventlet'])
monkey_patch.assert_called_with()
@patch('eventlet.debug.hub_blocking_detection', create=True)
@patch('eventlet.monkey_patch', create=True)
def test_aaa_blockdetecet(
self, monkey_patch, hub_blocking_detection, patching):
patching.setenv('EVENTLET_NOBLOCK', '10.3')
from celery import maybe_patch_concurrency
maybe_patch_concurrency(['x', '-P', 'eventlet'])
monkey_patch.assert_called_with()
hub_blocking_detection.assert_called_with(10.3, 10.3)
|
test_aaa_eventlet_patch
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/django/toystore/test_given_models.py
|
{
"start": 5766,
"end": 6586
}
|
class ____(TestCase):
@given(from_model(RestrictedFields))
def test_constructs_valid_instance(self, instance):
self.assertIsInstance(instance, RestrictedFields)
instance.full_clean()
self.assertLessEqual(len(instance.text_field_4), 4)
self.assertLessEqual(len(instance.char_field_4), 4)
self.assertIn(instance.choice_field_text, ("foo", "bar"))
self.assertIn(instance.choice_field_int, (1, 2))
self.assertIn(instance.null_choice_field_int, (1, 2, None))
self.assertEqual(
instance.choice_field_grouped, instance.choice_field_grouped.lower()
)
self.assertEqual(instance.even_number_field % 2, 0)
self.assertTrue(instance.non_blank_text_field)
@skipIf(User is None, "contrib.auth not installed")
|
TestRestrictedFields
|
python
|
OmkarPathak__pygorithm
|
tests/test_geometry.py
|
{
"start": 6913,
"end": 16747
}
|
class ____(unittest.TestCase):
def setUp(self):
random.seed()
self.vec_origin = vector2.Vector2(0, 0)
self.vec_1_1 = vector2.Vector2(1, 1)
self.vec_2_1 = vector2.Vector2(2, 1)
self.vec_1_2 = vector2.Vector2(1, 2)
self.vec_3_4 = vector2.Vector2(3, 4)
self.vec_neg_1_neg_1 = vector2.Vector2(-1, -1)
self.line_origin_1_1 = line2.Line2(self.vec_origin, self.vec_1_1)
self.line_1_1_3_4 = line2.Line2(self.vec_1_1, self.vec_3_4)
self.line_1_1_2_1 = line2.Line2(self.vec_1_1, self.vec_2_1)
self.line_1_1_1_2 = line2.Line2(self.vec_1_1, self.vec_1_2)
def test_constructor(self):
_line = self.line_origin_1_1
self.assertIsNotNone(_line.start)
self.assertIsNotNone(_line.end)
self.assertEqual(0, _line.start.x)
self.assertEqual(0, _line.start.y)
self.assertEqual(1, _line.end.x)
self.assertEqual(1, _line.end.y)
with self.assertRaises(ValueError):
_line2 = line2.Line2(self.vec_origin, self.vec_origin)
def test_delta(self):
self.assertEqual(1, self.line_origin_1_1.delta.x)
self.assertEqual(1, self.line_origin_1_1.delta.y)
self.assertEqual(2, self.line_1_1_3_4.delta.x)
self.assertEqual(3, self.line_1_1_3_4.delta.y)
def test_axis(self):
self.assertAlmostEqual(0.70710678118, self.line_origin_1_1.axis.x)
self.assertAlmostEqual(0.70710678118, self.line_origin_1_1.axis.y)
self.assertAlmostEqual(0.55470019622, self.line_1_1_3_4.axis.x)
self.assertAlmostEqual(0.83205029433, self.line_1_1_3_4.axis.y)
self.assertEqual(1, self.line_1_1_2_1.axis.x)
self.assertEqual(0, self.line_1_1_2_1.axis.y)
self.assertEqual(0, self.line_1_1_1_2.axis.x)
self.assertEqual(1, self.line_1_1_1_2.axis.y)
def test_normal(self):
self.assertAlmostEqual(-0.70710678118, self.line_origin_1_1.normal.x)
self.assertAlmostEqual(0.70710678118, self.line_origin_1_1.normal.y)
self.assertAlmostEqual(-0.83205029433, self.line_1_1_3_4.normal.x)
self.assertAlmostEqual(0.55470019622, self.line_1_1_3_4.normal.y)
self.assertEqual(0, self.line_1_1_2_1.normal.x)
self.assertEqual(1, self.line_1_1_2_1.normal.y)
self.assertEqual(-1, self.line_1_1_1_2.normal.x)
self.assertEqual(0, self.line_1_1_1_2.normal.y)
def test_magnitude_squared(self):
self.assertAlmostEqual(2, self.line_origin_1_1.magnitude_squared)
self.assertAlmostEqual(13, self.line_1_1_3_4.magnitude_squared)
self.assertEqual(1, self.line_1_1_2_1.magnitude_squared)
self.assertEqual(1, self.line_1_1_1_2.magnitude_squared)
def test_magnitude(self):
self.assertAlmostEqual(1.41421356237, self.line_origin_1_1.magnitude)
self.assertAlmostEqual(3.60555127546, self.line_1_1_3_4.magnitude)
self.assertEqual(1, self.line_1_1_2_1.magnitude)
self.assertEqual(1, self.line_1_1_1_2.magnitude)
def test_line_boundaries_x(self): # min_x, min_y, max_x, max_y
_line = line2.Line2(vector2.Vector2(-2, 3), vector2.Vector2(1, -1))
self.assertEqual(-2, _line.min_x)
self.assertEqual(1, _line.max_x)
self.assertEqual(-1, _line.min_y)
self.assertEqual(3, _line.max_y)
def test_slope(self):
self.assertEqual(1, self.line_origin_1_1.slope)
self.assertAlmostEqual(1.5, self.line_1_1_3_4.slope)
self.assertEqual(float('+inf'), self.line_1_1_1_2.slope)
self.assertEqual(0, self.line_1_1_2_1.slope)
def test_y_intercept(self):
self.assertEqual(0, self.line_origin_1_1.y_intercept)
self.assertAlmostEqual(-0.5, self.line_1_1_3_4.y_intercept)
self.assertIsNone(self.line_1_1_1_2.y_intercept)
self.assertEqual(1, self.line_1_1_2_1.y_intercept)
def test_horizontal(self):
self.assertFalse(self.line_origin_1_1.horizontal)
self.assertFalse(self.line_1_1_3_4.horizontal)
self.assertFalse(self.line_1_1_1_2.horizontal)
self.assertTrue(self.line_1_1_2_1.horizontal)
def test_vertical(self):
self.assertFalse(self.line_origin_1_1.vertical)
self.assertFalse(self.line_1_1_3_4.vertical)
self.assertTrue(self.line_1_1_1_2.vertical)
self.assertFalse(self.line_1_1_2_1.vertical)
def test_repr(self):
self.assertEqual('line2(start=vector2(x=1, y=1), end=vector2(x=3, y=4))', repr(self.line_1_1_3_4))
def test_str(self):
self.assertEqual('<1, 1> -> <3, 4>', str(self.line_1_1_3_4))
def test_calculate_y_intercept(self):
self.assertAlmostEqual(-1, self.line_1_1_3_4.calculate_y_intercept(self.vec_1_1))
def test_are_parallel(self):
self.assertFalse(line2.Line2.are_parallel(self.line_origin_1_1, self.line_1_1_3_4))
_line = line2.Line2(vector2.Vector2(5, 4), vector2.Vector2(3, 1))
self.assertTrue(line2.Line2.are_parallel(self.line_1_1_3_4, _line))
def test_contains_point(self):
self.assertFalse(line2.Line2.contains_point(self.line_origin_1_1, self.vec_1_1, self.vec_1_2))
self.assertTrue(line2.Line2.contains_point(self.line_origin_1_1, self.vec_1_1))
self.assertTrue(line2.Line2.contains_point(self.line_1_1_3_4, vector2.Vector2(2, 2.5)))
self.assertFalse(line2.Line2.contains_point(self.line_1_1_3_4, vector2.Vector2(2, 2.5), vector2.Vector2(1, 0)))
self.assertTrue(line2.Line2.contains_point(line2.Line2(vector2.Vector2(-3, -3), vector2.Vector2(6, 3)), vector2.Vector2(3, 1)))
def _find_intr_fuzzer(self, v1, v2, v3, v4, exp_touching, exp_overlap, exp_intr, number_fuzzes = 3):
for i in range(number_fuzzes):
offset1 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
offset2 = vector2.Vector2(random.uniform(-1000, 1000), random.uniform(-1000, 1000))
_line1 = line2.Line2(v1 - offset1, v2 - offset1)
_line2 = line2.Line2(v3 - offset2, v4 - offset2)
help_msg = 'v1={}, v2={}, offset1={}\n_line1={}\nv3={}, v4={}, offset2={}\n_line2={}'.format(repr(v1), \
repr(v2), repr(offset1), repr(_line1), repr(v3), repr(v4), repr(offset2), repr(_line2))
touching, overlap, intr = line2.Line2.find_intersection(_line1, _line2, offset1, offset2)
self.assertEqual(exp_touching, touching, help_msg)
self.assertEqual(exp_overlap, overlap, help_msg)
if exp_intr is None:
self.assertIsNone(intr, help_msg)
else:
self.assertIsNotNone(intr, help_msg)
if isinstance(exp_intr, vector2.Vector2):
self.assertIsInstance(intr, vector2.Vector2, help_msg)
self.assertAlmostEqual(exp_intr.x, intr.x)
self.assertAlmostEqual(exp_intr.y, intr.y)
else:
self.assertIsInstance(exp_intr, line2.Line2, help_msg)
self.assertIsInstance(intr, line2.Line2, help_msg)
self.assertAlmostEqual(exp_intr.start.x, intr.start.x)
self.assertAlmostEqual(exp_intr.start.y, intr.start.y)
self.assertAlmostEqual(exp_intr.end.x, intr.end.x)
self.assertAlmostEqual(exp_intr.end.y, intr.end.y)
def test_find_intersection_non_parallel_no_intersection(self):
self._find_intr_fuzzer(vector2.Vector2(3, 4), vector2.Vector2(5, 6),
vector2.Vector2(5, 4), vector2.Vector2(7, 3),
False, False, None)
def test_find_intersection_parallel_no_intersection(self):
self._find_intr_fuzzer(vector2.Vector2(1, 1), vector2.Vector2(3, 3),
vector2.Vector2(2, 1), vector2.Vector2(4, 3),
False, False, None)
def test_find_intersection_non_parallel_intersect_at_edge(self):
self._find_intr_fuzzer(vector2.Vector2(3, 4), vector2.Vector2(5, 6),
vector2.Vector2(1, 6), vector2.Vector2(5, 2),
True, False, vector2.Vector2(3, 4))
def test_find_intersection_non_parallel_intersect_not_edge(self):
self._find_intr_fuzzer(vector2.Vector2(3, 4), vector2.Vector2(5, 6),
vector2.Vector2(3.5, 7), vector2.Vector2(4.5, 4),
False, True, vector2.Vector2(4.125, 5.125))
def test_find_intersection_parallel_intersect_at_edge(self):
self._find_intr_fuzzer(vector2.Vector2(3, 4), vector2.Vector2(5, 6),
vector2.Vector2(5, 6), vector2.Vector2(7, 8),
True, False, vector2.Vector2(5, 6))
def test_find_intersection_parallel_intersect_overlap(self):
self._find_intr_fuzzer(vector2.Vector2(3, 4), vector2.Vector2(5, 6),
vector2.Vector2(4, 5), vector2.Vector2(7, 8),
False, True, line2.Line2(vector2.Vector2(4, 5), vector2.Vector2(5, 6)))
def test_find_intersection_parallel_overlap_compeletely(self):
self._find_intr_fuzzer(vector2.Vector2(3, 4), vector2.Vector2(5, 6),
vector2.Vector2(2, 3), vector2.Vector2(7, 8),
False, True, line2.Line2(vector2.Vector2(3, 4), vector2.Vector2(5, 6)))
|
TestLine2
|
python
|
pytorch__pytorch
|
test/profiler/test_memory_profiler.py
|
{
"start": 2350,
"end": 3399
}
|
class ____(torch.utils._python_dispatch.TorchDispatchMode):
def __init__(self) -> None:
self.results = []
def mark_region(self, name: str):
self.results.append((name, (), ()))
@staticmethod
def flat_ids(args):
flat_args = pytree.tree_leaves(args)
return tuple(
(t._cdata, t.storage().data_ptr())
for t in flat_args
if isinstance(t, torch.Tensor) and t.storage()
)
def __torch_dispatch__(self, func, types, args=..., kwargs=None):
args = args or []
kwargs = kwargs or {}
flat_inputs = self.flat_ids(args) + self.flat_ids(kwargs)
out = func(*args, **kwargs)
flat_outputs = self.flat_ids(out)
if (
flat_inputs or flat_outputs
) and "_record_function_enter" not in func.name():
self.results.append((func.name(), flat_inputs, flat_outputs))
return out
@skipIfTorchDynamo("TorchDynamo changes Python calls that memory profiling relies on.")
|
RecordInputOutputDispatchMode
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/memberAccess1.py
|
{
"start": 1787,
"end": 1839
}
|
class ____(type):
y = MetaDescriptorE()
|
MetaclassE
|
python
|
sphinx-doc__sphinx
|
doc/development/tutorials/examples/autodoc_intenum.py
|
{
"start": 361,
"end": 1920
}
|
class ____(ClassDocumenter):
objtype = 'intenum'
directivetype = ClassDocumenter.objtype
priority = 10 + ClassDocumenter.priority
option_spec = dict(ClassDocumenter.option_spec)
option_spec['hex'] = bool_option
@classmethod
def can_document_member(
cls, member: Any, membername: str, isattr: bool, parent: Documenter
) -> bool:
try:
return issubclass(member, IntEnum)
except TypeError:
return False
def add_directive_header(self, sig: str) -> None:
super().add_directive_header(sig)
self.add_line(' :final:', self.get_sourcename())
def add_content(
self,
more_content: StringList | None,
) -> None:
super().add_content(more_content)
source_name = self.get_sourcename()
enum_object: IntEnum = self.object
use_hex = self.options.hex
self.add_line('', source_name)
for the_member_name, enum_member in enum_object.__members__.items(): # type: ignore[attr-defined]
the_member_value = enum_member.value
if use_hex:
the_member_value = hex(the_member_value)
self.add_line(f'**{the_member_name}**: {the_member_value}', source_name)
self.add_line('', source_name)
def setup(app: Sphinx) -> ExtensionMetadata:
app.setup_extension('sphinx.ext.autodoc') # Require autodoc extension
app.add_autodocumenter(IntEnumDocumenter)
return {
'version': '1',
'parallel_read_safe': True,
}
|
IntEnumDocumenter
|
python
|
walkccc__LeetCode
|
solutions/964. Least Operators to Express Number/964.py
|
{
"start": 0,
"end": 535
}
|
class ____:
def leastOpsExpressTarget(self, x: int, target: int) -> int:
@functools.lru_cache(None)
def dfs(target):
if x > target:
return min(2 * target - 1, 2 * (x - target))
if x == target:
return 0
prod = x
n = 0
while prod < target:
prod *= x
n += 1
if prod == target:
return n
ans = dfs(target - prod // x) + n
if prod < 2 * target:
ans = min(ans, dfs(prod - target) + n + 1)
return ans
return dfs(target)
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/notifications/platform/slack/provider.py
|
{
"start": 1031,
"end": 1105
}
|
class ____(TypedDict):
blocks: list[Block]
text: str
|
SlackRenderable
|
python
|
pandas-dev__pandas
|
pandas/core/interchange/utils.py
|
{
"start": 1914,
"end": 5051
}
|
class ____:
"""Enum indicating the byte-order of a data-type."""
LITTLE = "<"
BIG = ">"
NATIVE = "="
NA = "|"
def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str:
"""
Represent pandas `dtype` as a format string in Apache Arrow C notation.
Parameters
----------
dtype : np.dtype
Datatype of pandas DataFrame to represent.
Returns
-------
str
Format string in Apache Arrow C notation of the given `dtype`.
"""
if isinstance(dtype, CategoricalDtype):
return ArrowCTypes.INT64
elif dtype == np.dtype("O"):
return ArrowCTypes.STRING
elif isinstance(dtype, ArrowDtype):
import pyarrow as pa
pa_type = dtype.pyarrow_dtype
if pa.types.is_decimal(pa_type):
return f"d:{pa_type.precision},{pa_type.scale}"
elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None:
return f"ts{pa_type.unit[0]}:{pa_type.tz}"
format_str = PYARROW_CTYPES.get(str(pa_type), None)
if format_str is not None:
return format_str
format_str = getattr(ArrowCTypes, dtype.name.upper(), None)
if format_str is not None:
return format_str
if isinstance(dtype, pd.StringDtype):
# TODO(infer_string) this should be LARGE_STRING for pyarrow storage,
# but current tests don't cover this distinction
return ArrowCTypes.STRING
elif lib.is_np_dtype(dtype, "M"):
# Selecting the first char of resolution string:
# dtype.str -> '<M8[ns]' -> 'n'
resolution = np.datetime_data(dtype)[0][0]
return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="")
elif isinstance(dtype, DatetimeTZDtype):
return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz)
elif isinstance(dtype, pd.BooleanDtype):
return ArrowCTypes.BOOL
raise NotImplementedError(
f"Conversion of {dtype} to Arrow C format string is not implemented."
)
def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None:
"""
Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary.
- Returns `None` if the input series is not backed by a multi-chunk pyarrow array
(and so doesn't need rechunking)
- Returns a single-chunk-backed-Series if the input is backed by a multi-chunk
pyarrow array and `allow_copy` is `True`.
- Raises a `RuntimeError` if `allow_copy` is `False` and input is a
based by a multi-chunk pyarrow array.
"""
if not isinstance(series.dtype, pd.ArrowDtype):
return None
chunked_array = series.array._pa_array # type: ignore[attr-defined]
if len(chunked_array.chunks) == 1:
return None
if not allow_copy:
raise RuntimeError(
"Found multi-chunk pyarrow array, but `allow_copy` is False. "
"Please rechunk the array before calling this function, or set "
"`allow_copy=True`."
)
arr = chunked_array.combine_chunks()
return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index)
|
Endianness
|
python
|
openai__openai-python
|
src/openai/types/static_file_chunking_strategy_param.py
|
{
"start": 222,
"end": 692
}
|
class ____(TypedDict, total=False):
chunk_overlap_tokens: Required[int]
"""The number of tokens that overlap between chunks. The default value is `400`.
Note that the overlap must not exceed half of `max_chunk_size_tokens`.
"""
max_chunk_size_tokens: Required[int]
"""The maximum number of tokens in each chunk.
The default value is `800`. The minimum value is `100` and the maximum value is
`4096`.
"""
|
StaticFileChunkingStrategyParam
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.