language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_urls.py | {
"start": 2289,
"end": 8454
} | class ____(ColumnMapExpectation):
"""Expect the column to be a valid url. Maps row values to regex to check if value is a valid url."""
examples = [
{
"data": {
"mostly_urls": [
"http://www.caseycaruso.com",
"http://www.bvp.com",
"http://www.tlccollective.space",
"kittens",
"wwww.googlecom",
],
"valid_urls": [
"http://www.facebook.com",
"http://www.twitter.com",
"http://www.github.com",
"http://www.stackoverflow.com",
"http://www.google.com",
],
},
"tests": [
{
"title": "mostly_valid_urls",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "mostly_urls", "mostly": 0.1},
"out": {
"success": True,
"unexpected_index_list": [3, 4],
"unexpected_list": ["kittens", "wwww.googlecom"],
},
},
{
"title": "valid_urls",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_urls", "mostly": 1},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon"],
"contributors": [ # Github handles for all contributors to this Expectation
"@ckathleen"
],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.to_be_valid_urls"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see {some doc}
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
ExpectColumnValuesToBeValidUrls().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidUrls |
python | huggingface__transformers | tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py | {
"start": 1548,
"end": 13880
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return XLMRobertaXLConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XLMRobertaXLModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = XLMRobertaXLModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = XLMRobertaXLForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = XLMRobertaXLForCausalLM(config=config).to(torch_device).eval()
# make sure that ids don't start with pad token
mask = input_ids.ne(config.pad_token_id).long()
input_ids = input_ids * mask
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
# make sure that ids don't start with pad token
mask = next_tokens.ne(config.pad_token_id).long()
next_tokens = next_tokens * mask
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XLMRobertaXLForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = XLMRobertaXLForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = XLMRobertaXLForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = XLMRobertaXLForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| XLMRobertaXLModelTester |
python | pypa__pipenv | pipenv/patched/pip/_vendor/typing_extensions.py | {
"start": 75938,
"end": 90191
} | class ____: ...
# 3.8-3.10
def _create_concatenate_alias(origin, parameters):
if parameters[-1] is ... and sys.version_info < (3, 9, 2):
# Hack: Arguments must be types, replace it with one.
parameters = (*parameters[:-1], _EllipsisDummy)
if sys.version_info >= (3, 10, 3):
concatenate = _ConcatenateGenericAlias(origin, parameters,
_typevar_types=(TypeVar, ParamSpec),
_paramspec_tvars=True)
else:
concatenate = _ConcatenateGenericAlias(origin, parameters)
if parameters[-1] is not _EllipsisDummy:
return concatenate
# Remove dummy again
concatenate.__args__ = tuple(p if p is not _EllipsisDummy else ...
for p in concatenate.__args__)
if sys.version_info < (3, 10):
# backport needs __args__ adjustment only
return concatenate
concatenate.__parameters__ = tuple(p for p in concatenate.__parameters__
if p is not _EllipsisDummy)
return concatenate
# 3.8-3.10
@typing._tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not (parameters[-1] is ... or isinstance(parameters[-1], ParamSpec)):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable or ellipsis.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = (*(typing._type_check(p, msg) for p in parameters[:-1]),
parameters[-1])
return _create_concatenate_alias(self, parameters)
# 3.11+; Concatenate does not accept ellipsis in 3.10
if sys.version_info >= (3, 11):
Concatenate = typing.Concatenate
# 3.9-3.10
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
# 3.8
else:
class _ConcatenateForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
# 3.10+
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.8
else:
class _TypeGuardForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
# 3.13+
if hasattr(typing, 'TypeIs'):
TypeIs = typing.TypeIs
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeIs(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type narrower function. ``TypeIs`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeIs`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeIs[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeIs`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the intersection of the type inside ``TypeIs`` and the argument's
previously known type.
For example::
def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]:
return hasattr(val, '__await__')
def f(val: Union[int, Awaitable[int]]) -> int:
if is_awaitable(val):
assert_type(val, Awaitable[int])
else:
assert_type(val, int)
``TypeIs`` also works with type variables. For more information, see
PEP 742 (Narrowing types with TypeIs).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.8
else:
class _TypeIsForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeIs = _TypeIsForm(
'TypeIs',
doc="""Special typing form used to annotate the return type of a user-defined
type narrower function. ``TypeIs`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeIs`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeIs[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeIs`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the intersection of the type inside ``TypeIs`` and the argument's
previously known type.
For example::
def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]:
return hasattr(val, '__await__')
def f(val: Union[int, Awaitable[int]]) -> int:
if is_awaitable(val):
assert_type(val, Awaitable[int])
else:
assert_type(val, int)
``TypeIs`` also works with type variables. For more information, see
PEP 742 (Narrowing types with TypeIs).
""")
# 3.14+?
if hasattr(typing, 'TypeForm'):
TypeForm = typing.TypeForm
# 3.9
elif sys.version_info[:2] >= (3, 9):
class _TypeFormForm(_ExtensionsSpecialForm, _root=True):
# TypeForm(X) is equivalent to X but indicates to the type checker
# that the object is a TypeForm.
def __call__(self, obj, /):
return obj
@_TypeFormForm
def TypeForm(self, parameters):
"""A special form representing the value that results from the evaluation
of a type expression. This value encodes the information supplied in the
type expression, and it represents the type described by that type expression.
When used in a type expression, TypeForm describes a set of type form objects.
It accepts a single type argument, which must be a valid type expression.
``TypeForm[T]`` describes the set of all type form objects that represent
the type T or types that are assignable to T.
Usage:
def cast[T](typ: TypeForm[T], value: Any) -> T: ...
reveal_type(cast(int, "x")) # int
See PEP 747 for more information.
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.8
else:
class _TypeFormForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
def __call__(self, obj, /):
return obj
TypeForm = _TypeFormForm(
'TypeForm',
doc="""A special form representing the value that results from the evaluation
of a type expression. This value encodes the information supplied in the
type expression, and it represents the type described by that type expression.
When used in a type expression, TypeForm describes a set of type form objects.
It accepts a single type argument, which must be a valid type expression.
``TypeForm[T]`` describes the set of all type form objects that represent
the type T or types that are assignable to T.
Usage:
def cast[T](typ: TypeForm[T], value: Any) -> T: ...
reveal_type(cast(int, "x")) # int
See PEP 747 for more information.
""")
# Vendored from cpython typing._SpecialFrom
| _EllipsisDummy |
python | django__django | tests/empty/tests.py | {
"start": 62,
"end": 365
} | class ____(TestCase):
def test_empty(self):
m = Empty()
self.assertIsNone(m.id)
m.save()
Empty.objects.create()
self.assertEqual(len(Empty.objects.all()), 2)
self.assertIsNotNone(m.id)
existing = Empty(m.id)
existing.save()
| EmptyModelTests |
python | spyder-ide__spyder | spyder/plugins/editor/panels/edgeline.py | {
"start": 362,
"end": 1941
} | class ____(Panel):
"""Source code editor's edge line (default: 79 columns, PEP8)"""
# --- Qt Overrides
# -----------------------------------------------------------------
def __init__(self):
Panel.__init__(self)
self.columns = (79,)
self.color = Qt.darkGray
def paintEvent(self, event):
"""Override Qt method"""
painter = QPainter(self)
size = self.size()
color = QColor(self.color)
color.setAlphaF(.5)
painter.setPen(color)
for column in self.columns:
# draw edge line at column n + 3 to account for line number margin
x = self.editor.fontMetrics().width(column * '9') + 3
painter.drawLine(x, 0, x, size.height())
def sizeHint(self):
"""Override Qt method."""
return self.size()
# --- Other methods
# -----------------------------------------------------------------
def set_enabled(self, state):
"""Toggle edge line visibility"""
self._enabled = state
self.setVisible(state)
def set_columns(self, columns):
"""Set edge line columns values."""
if isinstance(columns, tuple):
self.columns = columns
elif columns:
columns = str(columns)
self.columns = tuple(int(e) for e in columns.split(','))
self.update()
def update_color(self):
"""
Set edgeline color using syntax highlighter color for comments
"""
self.color = self.editor.highlighter.get_color_name('comment')
| EdgeLine |
python | huggingface__transformers | src/transformers/models/rwkv/modeling_rwkv.py | {
"start": 11589,
"end": 13138
} | class ____(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.config = config
self.layer_id = layer_id
hidden_size = config.hidden_size
intermediate_size = (
config.intermediate_size if config.intermediate_size is not None else 4 * config.hidden_size
)
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size))
self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size))
self.key = nn.Linear(hidden_size, intermediate_size, bias=False)
self.receptance = nn.Linear(hidden_size, hidden_size, bias=False)
self.value = nn.Linear(intermediate_size, hidden_size, bias=False)
def forward(self, hidden, state=None):
if hidden.size(1) == 1 and state is not None:
shifted = state[0][:, :, self.layer_id]
else:
shifted = self.time_shift(hidden)
if state is not None:
shifted[:, 0] = state[0][:, :, self.layer_id]
key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key)
receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance)
key = torch.square(torch.relu(self.key(key)))
value = self.value(key)
receptance = torch.sigmoid(self.receptance(receptance))
if state is not None:
state[0][:, :, self.layer_id] = hidden[:, -1]
return receptance * value, state
| RwkvFeedForward |
python | sqlalchemy__sqlalchemy | examples/adjacency_list/adjacency_list.py | {
"start": 530,
"end": 570
} | class ____(DeclarativeBase):
pass
| Base |
python | gevent__gevent | src/gevent/_ffi/callback.py | {
"start": 216,
"end": 1564
} | class ____(object):
__slots__ = ('callback', 'args')
def __init__(self, cb, args):
self.callback = cb
self.args = args
def stop(self):
self.callback = None
self.args = None
close = stop
# Note that __nonzero__ and pending are different
# bool() is used in contexts where we need to know whether to schedule another callback,
# so it's true if it's pending or currently running
# 'pending' has the same meaning as libev watchers: it is cleared before actually
# running the callback
def __bool__(self):
# it's nonzero if it's pending or currently executing
# NOTE: This depends on loop._run_callbacks setting the args property
# to None.
return self.args is not None
@property
def pending(self):
return self.callback is not None
def _format(self):
return ''
def __repr__(self):
result = "<%s at 0x%x" % (self.__class__.__name__, id(self))
if self.pending:
result += " pending"
if self.callback is not None:
result += " callback=%r" % (self.callback, )
if self.args is not None:
result += " args=%r" % (self.args, )
if self.callback is None and self.args is None:
result += " stopped"
return result + ">"
| callback |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_iter.py | {
"start": 2605,
"end": 2744
} | class ____:
def __init__(self, n):
self.n = n
def __iter__(self):
return BasicIterClass(self.n)
| IteratingSequenceClass |
python | spyder-ide__spyder | spyder/api/asyncdispatcher.py | {
"start": 17837,
"end": 21205
} | class ____(Future, typing.Generic[_T]):
"""Represents the result of an asynchronous computation.
This class is a subclass of `concurrent.Future` that adds a `connect`
method to allow attaching callbacks to be executed in the main Qt loop.
"""
QT_SLOT_ATTRIBUTE = "__dispatch_qt_slot__"
_callback_executor = _QCallbackExecutor()
def result(self, timeout: typing.Optional[float] = None) -> _T: # noqa: UP045
"""
Return the result of the call that the future represents.
Parameters
----------
timeout: float | None
The number of seconds to wait for the result. If None, then wait
indefinitely.
Returns
-------
DispatchedFuture@_T
The result of the call that the future represents.
Raises
------
CancelledError
If the future was cancelled.
TimeoutError
If the future didn't finish executing before the given timeout.
Exception
Exception raised by the call that the future represents.
""" # noqa: DOC502
return super().result(timeout=timeout)
def connect(self, fn: typing.Callable[[DispatcherFuture[_T]], None]):
"""Attaches a callable that will be called when the future finishes.
The callable will be called by a thread in the same process in which
it was added if the it was not marked with
`DispatherFuture.QT_SLOT_ATTRIBUTE`.
If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
Parameters
----------
fn: Callable
A callable that will be called with this future's as its only
argument when the future completes.
"""
if getattr(fn, self.QT_SLOT_ATTRIBUTE, False):
def callback(future: DispatcherFuture[_T]):
e = _QCallbackEvent(lambda: fn(future))
QCoreApplication.postEvent(self._callback_executor, e)
self.add_done_callback(callback) # type: ignore[arg-type]
else:
self.add_done_callback(fn) # type: ignore[arg-type]
def run_coroutine_threadsafe(
coro: typing.Coroutine[_T, None, _RT], loop: asyncio.AbstractEventLoop,
) -> DispatcherFuture[_RT]:
"""Submit a coroutine object to a given event loop.
Arguments
---------
coro: Coroutine
The coroutine object to be submitted.
loop: AbstractEventLoop
The event loop to run the coroutine.
Returns
-------
DispatcherFuture
A future object representing the result of the coroutine.
Raises
------
TypeError
If the object is not a coroutine.
"""
if not asyncio.iscoroutine(coro):
msg = "A coroutine object is required"
raise TypeError(msg)
future = DispatcherFuture()
def callback():
try:
_chain_future(asyncio.ensure_future(coro, loop=loop), future)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
if future.set_running_or_notify_cancel():
future.set_exception(exc)
raise
loop.call_soon_threadsafe(callback)
return future
| DispatcherFuture |
python | django-guardian__django-guardian | example_project/articles/views.py | {
"start": 1072,
"end": 1259
} | class ____(PermissionRequiredMixin, UpdateView):
model = Article
permission_required = ["view_article", "change_article"]
fields = ["title", "slug", "content"]
| ArticleUpdateView |
python | Pylons__pyramid | src/pyramid/authentication.py | {
"start": 24227,
"end": 27342
} | class ____(Exception):
"""
Exception raised when a ticket can't be parsed. If we get far enough to
determine what the expected digest should have been, expected is set.
This should not be shown by default, but can be useful for debugging.
"""
def __init__(self, msg, expected=None):
self.expected = expected
Exception.__init__(self, msg)
# this function licensed under the MIT license (stolen from Paste)
def parse_ticket(secret, ticket, ip, hashalg='md5'):
"""
Parse the ticket, returning (timestamp, userid, tokens, user_data).
If the ticket cannot be parsed, a ``BadTicket`` exception will be raised
with an explanation.
"""
ticket = text_(ticket).strip('"')
digest_size = hashlib.new(hashalg).digest_size * 2
digest = ticket[:digest_size]
try:
timestamp = int(ticket[digest_size : digest_size + 8], 16)
except ValueError as e:
raise BadTicket('Timestamp is not a hex integer: %s' % e)
try:
userid, data = ticket[digest_size + 8 :].split('!', 1)
except ValueError:
raise BadTicket('userid is not followed by !')
userid = unquote(userid)
if '!' in data:
tokens, user_data = data.split('!', 1)
else: # pragma: no cover (never generated)
# @@: Is this the right order?
tokens = ''
user_data = data
expected = calculate_digest(
ip, timestamp, secret, userid, tokens, user_data, hashalg
)
# Avoid timing attacks (see
# http://seb.dbzteam.org/crypto/python-oauth-timing-hmac.pdf)
if strings_differ(expected, digest):
raise BadTicket(
'Digest signature is not correct', expected=(expected, digest)
)
tokens = tokens.split(',')
return (timestamp, userid, tokens, user_data)
# this function licensed under the MIT license (stolen from Paste)
def calculate_digest(
ip, timestamp, secret, userid, tokens, user_data, hashalg='md5'
):
secret = bytes_(secret, 'utf-8')
userid = bytes_(userid, 'utf-8')
tokens = bytes_(tokens, 'utf-8')
user_data = bytes_(user_data, 'utf-8')
hash_obj = hashlib.new(hashalg)
# Check to see if this is an IPv6 address
if ':' in ip:
ip_timestamp = ip + str(int(timestamp))
ip_timestamp = bytes_(ip_timestamp)
else:
# encode_ip_timestamp not required, left in for backwards compatibility
ip_timestamp = encode_ip_timestamp(ip, timestamp)
hash_obj.update(
ip_timestamp + secret + userid + b'\0' + tokens + b'\0' + user_data
)
digest = hash_obj.hexdigest()
hash_obj2 = hashlib.new(hashalg)
hash_obj2.update(bytes_(digest) + secret)
return hash_obj2.hexdigest()
# this function licensed under the MIT license (stolen from Paste)
def encode_ip_timestamp(ip, timestamp):
ip_chars = ''.join(map(chr, map(int, ip.split('.'))))
t = int(timestamp)
ts = (
(t & 0xFF000000) >> 24,
(t & 0xFF0000) >> 16,
(t & 0xFF00) >> 8,
t & 0xFF,
)
ts_chars = ''.join(map(chr, ts))
return bytes_(ip_chars + ts_chars)
| BadTicket |
python | PrefectHQ__prefect | src/prefect/cli/deploy/_storage.py | {
"start": 86,
"end": 765
} | class ____:
"""
A shim storage class that allows passing pull steps to a `RunnerDeployment`.
"""
def __init__(self, pull_steps: list[dict[str, Any]]):
self._base_path = Path.cwd()
self.pull_steps = pull_steps
def set_base_path(self, path: Path):
self._base_path = path
@property
def destination(self):
return self._base_path
@property
def pull_interval(self):
return 60
async def pull_code(self):
pass
def to_pull_step(self):
return self.pull_steps
def __eq__(self, other: Any) -> bool:
return self.pull_steps == getattr(other, "pull_steps", None)
| _PullStepStorage |
python | pdm-project__pdm | src/pdm/cli/commands/self_cmd.py | {
"start": 5079,
"end": 7572
} | class ____(BaseCommand):
"""Remove packages from PDM's environment"""
arguments = (verbose_option,)
name = "remove"
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--pip-args",
help="Arguments that will be passed to pip uninstall",
default="",
)
parser.add_argument("-y", "--yes", action="store_true", help="Answer yes on the question")
parser.add_argument("packages", nargs="+", help="Specify one or many package names")
def _resolve_dependencies_to_remove(self, packages: list[str]) -> list[str]:
"""Perform a BFS to find all unneeded dependencies"""
result: set[str] = set()
to_resolve = list(packages)
ws = WorkingSet()
graph = build_dependency_graph(ws, env_spec=EnvSpec.current())
while to_resolve:
temp: list[PackageNode] = []
for name in to_resolve:
key = normalize_name(name)
if key in ws:
result.add(key)
package = PackageNode(key, "0.0.0", {})
if package not in graph:
continue
for dep in graph.iter_children(package):
temp.append(dep)
graph.remove(package)
to_resolve.clear()
for dep in temp:
if not any(graph.iter_parents(dep)) and dep.name != "pdm":
to_resolve.append(dep.name)
return sorted(result)
def handle(self, project: Project, options: argparse.Namespace) -> None:
packages_to_remove = self._resolve_dependencies_to_remove(options.packages)
if not packages_to_remove:
project.core.ui.echo("No package to remove.", err=True)
sys.exit(1)
if not (options.yes or termui.confirm(f"Will remove: {packages_to_remove}, continue?", default=True)):
return
pip_args = ["uninstall", "-y", *shlex.split(options.pip_args), *packages_to_remove]
try:
with project.core.ui.open_spinner(f"Uninstalling packages: [success]{', '.join(options.packages)}[/]"):
run_pip(project, pip_args)
except subprocess.CalledProcessError as e:
project.core.ui.echo("[error]Uninstallation failed:[/]\n" + e.output, err=True)
sys.exit(1)
else:
project.core.ui.echo("[success]Uninstallation succeeds.[/]")
| RemoveCommand |
python | aio-libs__aiohttp | aiohttp/tracing.py | {
"start": 8500,
"end": 8635
} | class ____:
"""Parameters sent by the `on_connection_queued_end` signal"""
@frozen_dataclass_decorator
| TraceConnectionQueuedEndParams |
python | keras-team__keras | keras/src/backend/common/dtypes_test.py | {
"start": 284,
"end": 10366
} | class ____(test_case.TestCase):
"""Test the dtype to verify that the behavior matches JAX."""
ALL_DTYPES = [
x
for x in dtypes.ALLOWED_DTYPES
if x
not in (
"string",
"complex128",
"float64",
"uint64",
"int64",
)
+ dtypes.FLOAT8_TYPES # Remove float8 dtypes for the following tests
] + [None]
if backend.backend() == "torch":
ALL_DTYPES = [x for x in ALL_DTYPES if x not in ("uint16", "uint32")]
elif backend.backend() == "tensorflow":
# TODO(hongyu): Re-enable uint32 tests once we determine how to handle
# dtypes.result_type(uint32, int*) -> int64 promotion.
# Since TF variables require int64 to be placed on the GPU, we
# exclusively enable the int64 dtype for TF. However, JAX does not
# natively support int64, which prevents us from comparing the dtypes.
ALL_DTYPES = [x for x in ALL_DTYPES if x not in ("uint32",)]
elif backend.backend() == "openvino":
ALL_DTYPES = [x for x in ALL_DTYPES if x not in ("complex64",)]
@parameterized.named_parameters(
named_product(dtype1=ALL_DTYPES, dtype2=[bool, int, float])
)
def test_result_type_with_python_scalar_types(self, dtype1, dtype2):
import jax.numpy as jnp
out = backend.result_type(dtype1, dtype2)
expected = jnp.result_type(dtype1, dtype2).name
self.assertEqual(out, expected)
@parameterized.named_parameters(
named_product(dtype1=ALL_DTYPES, dtype2=ALL_DTYPES)
)
def test_result_type_with_tensor(self, dtype1, dtype2):
import jax.numpy as jnp
x1 = ops.ones((1,), dtype=dtype1)
x2 = ops.ones((1,), dtype=dtype2)
x1_jax = jnp.ones((1,), dtype=dtype1)
x2_jax = jnp.ones((1,), dtype=dtype2)
out = backend.result_type(x1.dtype, x2.dtype)
expected = jnp.result_type(x1_jax, x2_jax).name
self.assertEqual(out, expected)
@parameterized.named_parameters(
named_product(
dtype=[
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
]
)
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="TensorFlow only"
)
def test_result_type_with_int64(self, dtype):
# https://github.com/keras-team/keras/issues/21677
x1 = ops.ones((1,), dtype="int64")
x2 = ops.ones((1,), dtype=dtype)
out = backend.result_type(x1.dtype, x2.dtype)
self.assertEqual(out, "int64")
@parameterized.named_parameters(
named_product(
dtype=[
"float16",
"bfloat16",
"float32",
"float64",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
]
)
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="TensorFlow only"
)
def test_result_type_with_float64(self, dtype):
# Float types have a similar issue as int64 in TF.:
# https://github.com/keras-team/keras/issues/21677
x1 = ops.ones((1,), dtype="float64")
x2 = ops.ones((1,), dtype=dtype)
out = backend.result_type(x1.dtype, x2.dtype)
self.assertEqual(out, "float64")
def test_result_type_with_none(self):
import jax.numpy as jnp
self.assertEqual(backend.result_type(None), jnp.result_type(None).name)
def test_result_type_empty_list(self):
self.assertEqual(backend.result_type(), "float32")
def test_respect_weak_type_for_bool(self):
self.assertEqual(dtypes._respect_weak_type("bool", True), "bool")
def test_respect_weak_type_for_int(self):
self.assertEqual(dtypes._respect_weak_type("int32", True), "int")
def test_respect_weak_type_for_float(self):
self.assertEqual(dtypes._respect_weak_type("float32", True), "float")
def test_resolve_weak_type_for_bfloat16(self):
self.assertEqual(dtypes._resolve_weak_type("bfloat16"), "float32")
def test_resolve_weak_type_for_bfloat16_with_precision(self):
self.assertEqual(
dtypes._resolve_weak_type("bfloat16", precision="64"), "float64"
)
def test_respect_weak_type_for_complex64(self):
self.assertAllEqual(
dtypes._respect_weak_type("complex64", True), "complex"
)
def test_respect_weak_type_for_complex128(self):
self.assertAllEqual(
dtypes._respect_weak_type("complex128", True), "complex"
)
def test_invalid_dtype_for_keras_promotion(self):
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion."
):
dtypes._least_upper_bound("invalid_dtype")
def test_resolve_weak_type_for_invalid_dtype(self):
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `dtype`. Expected one of"
):
dtypes._resolve_weak_type("invalid_dtype")
def test_resolve_weak_type_for_invalid_precision(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `precision`. Expected one of",
):
dtypes._resolve_weak_type("int32", precision="invalid_precision")
def test_cycle_detection_in_make_lattice_upper_bounds(self):
original_lattice_function = dtypes._type_promotion_lattice
def mock_lattice():
lattice = original_lattice_function()
lattice["int32"].append("float32")
lattice["float32"].append("int32")
return lattice
dtypes._type_promotion_lattice = mock_lattice
with self.assertRaisesRegex(
ValueError, "cycle detected in type promotion lattice for node"
):
dtypes._make_lattice_upper_bounds()
dtypes._type_promotion_lattice = original_lattice_function
def test_respect_weak_type_for_invalid_dtype(self):
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `dtype`. Expected one of"
):
dtypes._respect_weak_type("invalid_dtype", True)
def test_invalid_dtype_in_least_upper_bound(self):
invalid_dtype = "non_existent_dtype"
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion"
):
dtypes._least_upper_bound(invalid_dtype)
def test_empty_lub_in_least_upper_bound(self):
dtype1 = "float32"
dtype2 = "int32"
with patch.dict(
dtypes.LATTICE_UPPER_BOUNDS,
{"float32": set(), "int32": set()},
clear=True,
):
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound(dtype1, dtype2)
def test_valid_dtype_leading_to_single_lub_element(self):
self.assertEqual(
dtypes._least_upper_bound("float32", "int32"), "float32"
)
def test_valid_dtype_leading_to_keyerror_and_valueerror(self):
invalid_dtype = "non_existent_dtype"
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion"
):
dtypes._least_upper_bound(invalid_dtype)
def test_resolve_weak_type_bool(self):
self.assertEqual(dtypes._resolve_weak_type("bool"), "bool")
def test_resolve_weak_type_int(self):
self.assertEqual(
dtypes._resolve_weak_type("int32", precision="32"), "int32"
)
self.assertEqual(
dtypes._resolve_weak_type("int64", precision="64"), "int64"
)
def test_resolve_weak_type_uint(self):
self.assertEqual(
dtypes._resolve_weak_type("uint32", precision="32"), "uint32"
)
self.assertEqual(
dtypes._resolve_weak_type("uint64", precision="64"), "uint64"
)
def test_resolve_weak_type_float(self):
self.assertEqual(
dtypes._resolve_weak_type("float32", precision="32"), "float32"
)
self.assertEqual(
dtypes._resolve_weak_type("float64", precision="64"), "float64"
)
def test_least_upper_bound_ensure_order_independence(self):
# Test to ensure _least_upper_bound is order-independent.
result1 = dtypes._least_upper_bound("float32", "int32")
result2 = dtypes._least_upper_bound("int32", "float32")
self.assertEqual(result1, result2)
def test_least_upper_bound_single_element(self):
dtypes.LATTICE_UPPER_BOUNDS["test_dtype"] = {"test_dtype"}
self.assertEqual(dtypes._least_upper_bound("test_dtype"), "test_dtype")
def test_least_upper_bound_no_element(self):
dtypes.LATTICE_UPPER_BOUNDS["test_dtype"] = set()
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound("test_dtype")
def test_least_upper_bound_with_no_common_upper_bound(self):
with patch.dict(
dtypes.LATTICE_UPPER_BOUNDS,
{"test_dtype1": set(), "test_dtype2": set()},
clear=True,
):
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound("test_dtype1", "test_dtype2")
def test_invalid_float8_dtype(self):
with self.assertRaisesRegex(
ValueError, "There is no implicit conversions from float8 dtypes"
):
dtypes.result_type("float8_e4m3fn", "bfloat16")
with self.assertRaisesRegex(
ValueError, "There is no implicit conversions from float8 dtypes"
):
dtypes.result_type("float8_e5m2", "bfloat16")
| DtypesTest |
python | mlflow__mlflow | mlflow/models/evaluation/evaluators/shap.py | {
"start": 1000,
"end": 12105
} | class ____(BuiltInEvaluator):
"""
A built-in evaluator to get SHAP explainability insights for classifier and regressor models.
This evaluator often run with the main evaluator for the model like ClassifierEvaluator.
"""
name = "shap"
@classmethod
def can_evaluate(cls, *, model_type, evaluator_config, **kwargs):
return model_type in (_ModelType.CLASSIFIER, _ModelType.REGRESSOR) and evaluator_config.get(
"log_model_explainability", True
)
def _evaluate(
self,
model: Optional["mlflow.pyfunc.PyFuncModel"],
extra_metrics: list[EvaluationMetric],
custom_artifacts=None,
**kwargs,
) -> EvaluationResult | None:
if isinstance(model, _ServedPyFuncModel):
_logger.warning(
"Skipping model explainability because a model server is used for environment "
"restoration."
)
return
model_loader_module, raw_model = _extract_raw_model(model)
if model_loader_module == "mlflow.spark":
# TODO: Shap explainer need to manipulate on each feature values,
# but spark model input dataframe contains Vector type feature column
# which shap explainer does not support.
# To support this, we need expand the Vector type feature column into
# multiple scalar feature columns and pass it to shap explainer.
_logger.warning(
"Logging model explainability insights is not currently supported for PySpark "
"models."
)
return
self.y_true = self.dataset.labels_data
self.label_list = self.evaluator_config.get("label_list")
self.pos_label = self.evaluator_config.get("pos_label")
if not (np.issubdtype(self.y_true.dtype, np.number) or self.y_true.dtype == np.bool_):
# Note: python bool type inherits number type but np.bool_ does not inherit np.number.
_logger.warning(
"Skip logging model explainability insights because it requires all label "
"values to be numeric or boolean."
)
return
algorithm = self.evaluator_config.get("explainability_algorithm", None)
if algorithm is not None and algorithm not in _SUPPORTED_SHAP_ALGORITHMS:
raise MlflowException(
message=f"Specified explainer algorithm {algorithm} is unsupported. Currently only "
f"support {','.join(_SUPPORTED_SHAP_ALGORITHMS)} algorithms.",
error_code=INVALID_PARAMETER_VALUE,
)
if algorithm != "kernel":
feature_dtypes = list(self.X.get_original().dtypes)
for feature_dtype in feature_dtypes:
if not np.issubdtype(feature_dtype, np.number):
_logger.warning(
"Skip logging model explainability insights because the shap explainer "
f"{algorithm} requires all feature values to be numeric, and each feature "
"column must only contain scalar values."
)
return
try:
import shap
from matplotlib import pyplot
except ImportError:
_logger.warning(
"SHAP or matplotlib package is not installed, so model explainability insights "
"will not be logged."
)
return
if Version(shap.__version__) < Version("0.40"):
_logger.warning(
"Shap package version is lower than 0.40, Skip log model explainability."
)
return
sample_rows = self.evaluator_config.get(
"explainability_nsamples", _DEFAULT_SAMPLE_ROWS_FOR_SHAP
)
X_df = self.X.copy_to_avoid_mutation()
sampled_X = shap.sample(X_df, sample_rows, random_state=0)
mode_or_mean_dict = _compute_df_mode_or_mean(X_df)
sampled_X = sampled_X.fillna(mode_or_mean_dict)
# shap explainer might call provided `predict_fn` with a `numpy.ndarray` type
# argument, this might break some model inference, so convert the argument into
# a pandas dataframe.
# The `shap_predict_fn` calls model's predict function, we need to restore the input
# dataframe with original column names, because some model prediction routine uses
# the column name.
predict_fn = _extract_predict_fn(model)
shap_predict_fn = functools.partial(
_shap_predict_fn, predict_fn=predict_fn, feature_names=self.dataset.feature_names
)
if self.label_list is None:
# If label list is not specified, infer label list from model output.
# We need to copy the input data as the model might mutate the input data.
y_pred = predict_fn(X_df.copy()) if predict_fn else self.dataset.predictions_data
self.label_list = np.unique(np.concatenate([self.y_true, y_pred]))
try:
if algorithm:
if algorithm == "kernel":
# We need to lazily import shap, so lazily import `_PatchedKernelExplainer`
from mlflow.models.evaluation._shap_patch import _PatchedKernelExplainer
kernel_link = self.evaluator_config.get(
"explainability_kernel_link", "identity"
)
if kernel_link not in ["identity", "logit"]:
raise ValueError(
"explainability_kernel_link config can only be set to 'identity' or "
f"'logit', but got '{kernel_link}'."
)
background_X = shap.sample(X_df, sample_rows, random_state=3)
background_X = background_X.fillna(mode_or_mean_dict)
explainer = _PatchedKernelExplainer(
shap_predict_fn, background_X, link=kernel_link
)
else:
explainer = shap.Explainer(
shap_predict_fn,
sampled_X,
feature_names=self.dataset.feature_names,
algorithm=algorithm,
)
else:
if (
raw_model
and not len(self.label_list) > 2
and not isinstance(raw_model, sk_Pipeline)
):
# For mulitnomial classifier, shap.Explainer may choose Tree/Linear explainer
# for raw model, this case shap plot doesn't support it well, so exclude the
# multinomial_classifier case here.
explainer = shap.Explainer(
raw_model, sampled_X, feature_names=self.dataset.feature_names
)
else:
# fallback to default explainer
explainer = shap.Explainer(
shap_predict_fn, sampled_X, feature_names=self.dataset.feature_names
)
_logger.info(f"Shap explainer {explainer.__class__.__name__} is used.")
if algorithm == "kernel":
shap_values = shap.Explanation(
explainer.shap_values(sampled_X), feature_names=self.dataset.feature_names
)
else:
shap_values = explainer(sampled_X)
except Exception as e:
# Shap evaluation might fail on some edge cases, e.g., unsupported input data values
# or unsupported model on specific shap explainer. Catch exception to prevent it
# breaking the whole `evaluate` function.
if not self.evaluator_config.get("ignore_exceptions", True):
raise e
_logger.warning(
f"Shap evaluation failed. Reason: {e!r}. "
"Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
return
if self.evaluator_config.get("log_explainer", False):
try:
mlflow.shap.log_explainer(explainer, name="explainer")
except Exception as e:
# TODO: The explainer saver is buggy, if `get_underlying_model_flavor` return
# "unknown", then fallback to shap explainer saver, and shap explainer will call
# `model.save` for sklearn model, there is no `.save` method, so error will
# happen.
_logger.warning(
f"Logging explainer failed. Reason: {e!r}. "
"Set logging level to DEBUG to see the full traceback."
)
_logger.debug("", exc_info=True)
def _adjust_color_bar():
pyplot.gcf().axes[-1].set_aspect("auto")
pyplot.gcf().axes[-1].set_box_aspect(50)
def _adjust_axis_tick():
pyplot.xticks(fontsize=10)
pyplot.yticks(fontsize=10)
def plot_beeswarm():
shap.plots.beeswarm(shap_values, show=False, color_bar=True)
_adjust_color_bar()
_adjust_axis_tick()
with _suppress_class_imbalance_errors(ValueError, log_warning=False):
self._log_image_artifact(
plot_beeswarm,
"shap_beeswarm_plot",
)
def plot_summary():
shap.summary_plot(shap_values, show=False, color_bar=True)
_adjust_color_bar()
_adjust_axis_tick()
with _suppress_class_imbalance_errors(TypeError, log_warning=False):
self._log_image_artifact(
plot_summary,
"shap_summary_plot",
)
def plot_feature_importance():
shap.plots.bar(shap_values, show=False)
_adjust_axis_tick()
with _suppress_class_imbalance_errors(IndexError, log_warning=False):
self._log_image_artifact(
plot_feature_importance,
"shap_feature_importance_plot",
)
return EvaluationResult(
metrics=self.aggregate_metrics,
artifacts=self.artifacts,
run_id=self.run_id,
)
def _compute_df_mode_or_mean(df):
"""
Compute mean (for continuous columns) and compute mode (for other columns) for the
input dataframe, return a dict, key is column name, value is the corresponding mode or
mean value, this function calls `_is_continuous` to determine whether the
column is continuous column.
"""
continuous_cols = [c for c in df.columns if _is_continuous(df[c])]
df_cont = df[continuous_cols]
df_non_cont = df.drop(continuous_cols, axis=1)
means = {} if df_cont.empty else df_cont.mean().to_dict()
modes = {} if df_non_cont.empty else df_non_cont.mode().loc[0].to_dict()
return {**means, **modes}
| ShapEvaluator |
python | walkccc__LeetCode | solutions/2262. Total Appeal of A String/2262-2.py | {
"start": 0,
"end": 207
} | class ____:
def appealSum(self, s: str) -> int:
ans = 0
lastSeen = {}
for i, c in enumerate(s):
ans += (i - lastSeen.get(c, -1)) * (len(s) - i)
lastSeen[c] = i
return ans
| Solution |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 39555,
"end": 39669
} | class ____(TestEnPh):
def setup_faker(self):
self.fake = Faker("fil_PH")
Faker.seed(0)
| TestFilPh |
python | walkccc__LeetCode | solutions/3357. Minimize the Maximum Adjacent Element Difference/3357.py | {
"start": 0,
"end": 3087
} | class ____:
def minDifference(self, nums: list[int]) -> int:
maxPositiveGap = 0
mn = 1_000_000_000
mx = 0
for a, b in itertools.pairwise(nums):
if (a == -1) != (b == -1):
positive = max(a, b)
mn = min(mn, positive)
mx = max(mx, positive)
else:
maxPositiveGap = max(maxPositiveGap, abs(a - b))
l = maxPositiveGap
r = (mx - mn + 1) // 2
return bisect.bisect_left(
range(l, r), True,
key=lambda m: self._check(nums, m, mn + m, mx - m)) + l
def _check(self, nums: list[int], m: int, x: int, y: int) -> bool:
"""
Returns True if it's possible have `m` as maximum absolute difference
between adjacent numbers, where -1s are replaced with `x` or `y`.
"""
gapLength = 0
prev = 0
for num in nums:
if num == -1:
gapLength += 1
continue
if prev > 0 and gapLength > 0:
if gapLength == 1 and not self._checkSingleGap(prev, num, m, x, y):
return False
if gapLength > 1 and not self._checkMultipleGaps(prev, num, m, x, y):
return False
prev = num
gapLength = 0
# Check leading gaps
if nums[0] == -1:
num = next((num for num in nums if num != -1), -1)
if num != -1 and not self._checkBoundaryGaps(num, m, x, y):
return False
# Check trailing gaps
if nums[-1] == -1:
num = next((num for num in reversed(nums) if num != -1), -1)
if num != -1 and not self._checkBoundaryGaps(num, m, x, y):
return False
return True
def _checkSingleGap(self, a: int, b: int, m: int, x: int, y: int) -> bool:
"""
Returns true if it's possible to have at most `m` as the minimized maximum
difference for a sequence with a single -1 between two numbers.
e.g. [a, -1, b] can be filled with either x or y.
"""
gapWithX = max(abs(a - x), abs(b - x)) # [a, x, b]
gapWithY = max(abs(a - y), abs(b - y)) # [a, y, b]
return min(gapWithX, gapWithY) <= m
def _checkMultipleGaps(self, a: int, b: int, m: int, x: int, y: int) -> bool:
"""
Returns true if it's possible to have at most `m` as the minimized maximum
difference for a sequence with multiple -1s between two numbers.
e.g. [a, -1, -1, ..., -1, b] can be filled with x and y.
"""
ax = abs(a - x)
ay = abs(a - y)
bx = abs(b - x)
by = abs(b - y)
xy = abs(x - y)
gapAllX = max(ax, bx) # [a, x, x, ..., x, b]
gapAllY = max(ay, by) # [a, y, y, ..., y, b]
gapXToY = max(ax, xy, by) # [a, x, ..., y, b]
gapYToX = max(ay, xy, bx) # [a, y, ..., x, b]
return min(gapAllX, gapAllY, gapXToY, gapYToX) <= m
def _checkBoundaryGaps(self, a: int, m: int, x: int, y: int) -> bool:
"""
Returns true if it's possible to have at most `m` as the minimized maximum
difference for a boundary sequence starting or ending with -1s.
e.g. [a, -1, -1, ...] or [..., -1, -1, a].
"""
gapAllX = abs(a - x) # [x, x, ..., x, a]
gapAllY = abs(a - y) # [y, y, ..., y, a]
return min(gapAllX, gapAllY) <= m
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_cairo.py | {
"start": 18476,
"end": 18618
} | class ____(_Backend):
backend_version = cairo.version
FigureCanvas = FigureCanvasCairo
FigureManager = FigureManagerBase
| _BackendCairo |
python | psf__black | src/blib2to3/pgen2/parse.py | {
"start": 1289,
"end": 3723
} | class ____:
def __init__(self, parser: "Parser", ilabels: list[int], context: Context) -> None:
self.parser = parser
self._ilabels = ilabels
self.context = context # not really matter
self._dead_ilabels: set[int] = set()
self._start_point = self.parser.stack
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
@property
def ilabels(self) -> set[int]:
return self._dead_ilabels.symmetric_difference(self._ilabels)
@contextmanager
def switch_to(self, ilabel: int) -> Iterator[None]:
with self.backtrack():
self.parser.stack = self._points[ilabel]
try:
yield
except ParseError:
self._dead_ilabels.add(ilabel)
finally:
self.parser.stack = self._start_point
@contextmanager
def backtrack(self) -> Iterator[None]:
"""
Use the node-level invariant ones for basic parsing operations (push/pop/shift).
These still will operate on the stack; but they won't create any new nodes, or
modify the contents of any other existing nodes.
This saves us a ton of time when we are backtracking, since we
want to restore to the initial state as quick as possible, which
can only be done by having as little mutatations as possible.
"""
is_backtracking = self.parser.is_backtracking
try:
self.parser.is_backtracking = True
yield
finally:
self.parser.is_backtracking = is_backtracking
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
for ilabel in self.ilabels:
with self.switch_to(ilabel):
if raw:
self.parser._addtoken(ilabel, tok_type, tok_val, self.context)
else:
self.parser.addtoken(tok_type, tok_val, self.context)
def determine_route(
self, value: str | None = None, force: bool = False
) -> int | None:
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
raise ParseError("bad input", most_successful_ilabel, value, self.context)
ilabel, *rest = alive_ilabels
if force or not rest:
return ilabel
else:
return None
| Recorder |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_dataflow.py | {
"start": 6630,
"end": 13373
} | class ____:
def setup_method(self):
self.dataflow_hook = DataflowHook(gcp_conn_id="google_cloud_default")
self.dataflow_hook.beam_hook = MagicMock()
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.DataflowHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.build")
def test_dataflow_client_creation(self, mock_build, mock_authorize):
result = self.dataflow_hook.get_conn()
mock_build.assert_called_once_with(
"dataflow", "v1b3", http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
@pytest.mark.parametrize(
("expected_result", "job_name", "append_job_name"),
[
(JOB_NAME, JOB_NAME, False),
("test-example", "test_example", False),
(f"test-dataflow-pipeline-{MOCK_UUID_PREFIX}", JOB_NAME, True),
(f"test-example-{MOCK_UUID_PREFIX}", "test_example", True),
("df-job-1", "df-job-1", False),
("df-job", "df-job", False),
("dfjob", "dfjob", False),
("dfjob1", "dfjob1", False),
],
)
@mock.patch(DATAFLOW_STRING.format("uuid.uuid4"), return_value=MOCK_UUID)
def test_valid_dataflow_job_name(self, _, expected_result, job_name, append_job_name):
assert (
self.dataflow_hook.build_dataflow_job_name(job_name=job_name, append_job_name=append_job_name)
== expected_result
)
@pytest.mark.parametrize("job_name", ["1dfjob@", "dfjob@", "df^jo"])
def test_build_dataflow_job_name_with_invalid_value(self, job_name):
with pytest.raises(ValueError, match=rf"Invalid job_name \({re.escape(job_name)}\);"):
self.dataflow_hook.build_dataflow_job_name(job_name=job_name, append_job_name=False)
@mock.patch(DATAFLOW_STRING.format("_DataflowJobsController"))
@mock.patch(DATAFLOW_STRING.format("DataflowHook.get_conn"))
def test_get_job(self, mock_conn, mock_dataflowjob):
method_fetch_job_by_id = mock_dataflowjob.return_value.fetch_job_by_id
self.dataflow_hook.get_job(job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format("_DataflowJobsController"))
@mock.patch(DATAFLOW_STRING.format("DataflowHook.get_conn"))
def test_fetch_job_metrics_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_metrics_by_id = mock_dataflowjob.return_value.fetch_job_metrics_by_id
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_metrics_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format("DataflowHook.get_conn"))
def test_fetch_job_metrics_by_id_controller(self, mock_conn):
method_get_metrics = (
mock_conn.return_value.projects.return_value.locations.return_value.jobs.return_value.getMetrics
)
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
method_get_metrics.return_value.execute.assert_called_once_with(num_retries=0)
method_get_metrics.assert_called_once_with(
jobId=TEST_JOB_ID, projectId=TEST_PROJECT_ID, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format("_DataflowJobsController"))
@mock.patch(DATAFLOW_STRING.format("DataflowHook.get_conn"))
def test_fetch_job_messages_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_messages_by_id = mock_dataflowjob.return_value.fetch_job_messages_by_id
self.dataflow_hook.fetch_job_messages_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_messages_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format("_DataflowJobsController"))
@mock.patch(DATAFLOW_STRING.format("DataflowHook.get_conn"))
def test_fetch_job_autoscaling_events_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_autoscaling_events_by_id = (
mock_dataflowjob.return_value.fetch_job_autoscaling_events_by_id
)
self.dataflow_hook.fetch_job_autoscaling_events_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_autoscaling_events_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format("_DataflowJobsController"))
@mock.patch(DATAFLOW_STRING.format("DataflowHook.get_conn"))
def test_wait_for_done(self, mock_conn, mock_dataflowjob):
method_wait_for_done = mock_dataflowjob.return_value.wait_for_done
self.dataflow_hook.wait_for_done(
job_name="JOB_NAME",
project_id=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
multiple_jobs=False,
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
name="JOB_NAME",
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
job_id=TEST_JOB_ID,
num_retries=self.dataflow_hook.num_retries,
multiple_jobs=False,
drain_pipeline=self.dataflow_hook.drain_pipeline,
cancel_timeout=self.dataflow_hook.cancel_timeout,
wait_until_finished=self.dataflow_hook.wait_until_finished,
)
method_wait_for_done.assert_called_once_with()
@pytest.mark.db_test
| TestDataflowHook |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/back_populates/tutorial001_py310.py | {
"start": 85,
"end": 279
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
heroes: list["Hero"] = Relationship()
| Team |
python | allegroai__clearml | clearml/backend_api/services/v2_9/auth.py | {
"start": 12498,
"end": 14143
} | class ____(Request):
"""
Get a token based on supplied credentials (key/secret).
Intended for use by users with key/secret credentials that wish to obtain a token
for use with other services. Token will be limited by the same permissions that
exist for the credentials used in this call.
:param expiration_sec: Requested token expiration time in seconds. Not
guaranteed, might be overridden by the service
:type expiration_sec: int
"""
_service = "auth"
_action = "login"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"expiration_sec": {
"description": "Requested token expiration time in seconds. \n Not guaranteed, might be overridden by the service",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, expiration_sec: Optional[int] = None, **kwargs: Any) -> None:
super(LoginRequest, self).__init__(**kwargs)
self.expiration_sec = expiration_sec
@schema_property("expiration_sec")
def expiration_sec(self) -> Optional[int]:
return self._property_expiration_sec
@expiration_sec.setter
def expiration_sec(self, value: Optional[int]) -> None:
if value is None:
self._property_expiration_sec = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "expiration_sec", six.integer_types)
self._property_expiration_sec = value
| LoginRequest |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_setup_teardown.py | {
"start": 542,
"end": 663
} | class ____:
def setup_example(self):
self.setups = getattr(self, "setups", 0)
self.setups += 1
| HasSetup |
python | django__django | tests/migrations2/test_migrations_2_squashed_with_replaces/0001_squashed_0002.py | {
"start": 43,
"end": 795
} | class ____(migrations.Migration):
replaces = [
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
]
operations = [
migrations.CreateModel(
"OtherAuthor",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
],
),
migrations.CreateModel(
"OtherBook",
[
("id", models.AutoField(primary_key=True)),
(
"author",
models.ForeignKey(
"migrations2.OtherAuthor", models.SET_NULL, null=True
),
),
],
),
]
| Migration |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 96079,
"end": 96964
} | class ____(Request):
"""
:param task: Task ID
:type task: str
"""
_service = "events"
_action = "get_vector_metrics_and_variants"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task ID", "type": "string"}},
"required": ["task"],
"type": "object",
}
def __init__(self, task: str, **kwargs: Any) -> None:
super(GetVectorMetricsAndVariantsRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| GetVectorMetricsAndVariantsRequest |
python | PrefectHQ__prefect | src/prefect/deployments/steps/core.py | {
"start": 1598,
"end": 10348
} | class ____(Exception):
"""
Raised when a step fails to execute.
"""
def _strip_version(requirement: str) -> str:
"""
Strips the version from a requirement string.
Args:
requirement: A requirement string, e.g. "requests>=2.0.0"
Returns:
The package name, e.g. "requests"
Examples:
```python
_strip_version("s3fs>=2.0.0<3.0.0")
# "s3fs"
```
"""
# split on any of the characters in the set [<>=!~]
# and return the first element which will be the package name
return re.split(r"[<>=!~]", requirement)[0].strip()
def _get_function_for_step(
fully_qualified_name: str, requires: str | list[str] | None = None
):
if not isinstance(requires, list):
packages = [requires] if requires else []
else:
packages = requires
try:
for package in packages:
import_module(_strip_version(package).replace("-", "_"))
step_func = import_object(fully_qualified_name)
return step_func
except ImportError:
if requires:
print(
f"Unable to load step function: {fully_qualified_name}. Attempting"
f" install of {requires}."
)
else:
raise
try:
packages = [
KNOWN_EXTRAS_FOR_PACKAGES.get(package, package)
for package in packages
if package
]
install_packages(packages, stream_output=True)
except subprocess.CalledProcessError:
get_logger("deployments.steps.core").warning(
"Unable to install required packages for %s", fully_qualified_name
)
step_func = import_object(fully_qualified_name)
return step_func
async def run_step(
step: dict[str, Any], upstream_outputs: dict[str, Any] | None = None
) -> dict[str, Any]:
"""
Runs a step, returns the step's output.
Steps are assumed to be in the format `{"importable.func.name": {"kwarg1": "value1", ...}}`.
The 'id and 'requires' keywords are reserved for specific purposes and will be removed from the
inputs before passing to the step function:
This keyword is used to specify packages that should be installed before running the step.
"""
fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)
upstream_outputs = upstream_outputs or {}
if len(step.keys()) > 1:
raise ValueError(
f"Step has unexpected additional keys: {', '.join(list(step.keys())[1:])}"
)
keywords = {
keyword: inputs.pop(keyword)
for keyword in RESERVED_KEYWORDS
if keyword in inputs
}
inputs = apply_values(inputs, upstream_outputs)
inputs = await resolve_block_document_references(inputs)
inputs = await resolve_variables(inputs)
inputs = apply_values(inputs, os.environ)
step_func = _get_function_for_step(fqn, requires=keywords.get("requires"))
result = await from_async.call_soon_in_new_thread(
Call.new(step_func, **inputs)
).aresult()
return result
async def run_steps(
steps: list[dict[str, Any]],
upstream_outputs: dict[str, Any] | None = None,
print_function: Any = print,
deployment: Any | None = None,
flow_run: Any | None = None,
logger: Any | None = None,
) -> dict[str, Any]:
upstream_outputs = deepcopy(upstream_outputs) if upstream_outputs else {}
for step_index, step in enumerate(steps):
if not step:
continue
fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)
step_name = fqn.split(".")[-1]
print_function(f" > Running {step_name} step...")
# SECURITY: Serialize inputs BEFORE running the step (and thus before templating).
# This ensures that the event payload contains template strings like
# "{{ prefect.blocks.secret.api-key }}" rather than resolved secret values.
# Templating (which resolves blocks, variables, and env vars) happens inside
# run_step(), so by serializing here we prevent secrets from leaking in events.
serialized_step = {
"index": step_index,
"qualified_name": fqn,
"step_name": step_name,
"id": inputs.get("id"),
"inputs": inputs, # Keep all inputs including reserved keywords like 'requires'
}
try:
# catch warnings to ensure deprecation warnings are printed
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter(
"always",
category=PrefectDeprecationWarning,
)
warnings.simplefilter(
"always",
category=DeprecationWarning,
)
step_output = await run_step(step, upstream_outputs)
if w:
printed_messages = []
for warning in w:
message = str(warning.message)
# prevent duplicate warnings from being printed
if message not in printed_messages:
try:
# try using rich styling
print_function(message, style="yellow")
except Exception:
# default to printing without styling
print_function(message)
printed_messages.append(message)
if not isinstance(step_output, dict):
if PREFECT_DEBUG_MODE:
get_logger().warning(
"Step function %s returned unexpected type: %s",
fqn,
type(step_output),
)
continue
# store step output under step id to prevent clobbering
if inputs.get("id"):
upstream_outputs[inputs.get("id")] = step_output
upstream_outputs.update(step_output)
# Emit success event for this step
await _emit_pull_step_event(
serialized_step,
event_type="prefect.flow-run.pull-step.executed",
deployment=deployment,
flow_run=flow_run,
logger=logger,
)
except Exception as exc:
# Emit failure event for this step
await _emit_pull_step_event(
serialized_step,
event_type="prefect.flow-run.pull-step.failed",
deployment=deployment,
flow_run=flow_run,
logger=logger,
)
raise StepExecutionError(f"Encountered error while running {fqn}") from exc
return upstream_outputs
def _get_step_fully_qualified_name_and_inputs(step: dict) -> tuple[str, dict]:
step = deepcopy(step)
return step.popitem()
async def _emit_pull_step_event(
serialized_step: dict[str, Any],
*,
event_type: str,
deployment: Any | None = None,
flow_run: Any | None = None,
logger: Any | None = None,
) -> None:
# Get flow_run_id from flow_run param or environment
flow_run_id = None
if flow_run:
flow_run_id = flow_run.id
else:
# Read directly from environment variable
flow_run_id_str = os.getenv("PREFECT__FLOW_RUN_ID")
if flow_run_id_str:
flow_run_id = UUID(flow_run_id_str)
if not flow_run_id:
return
# Build related resources
related: list[RelatedResource] = []
if deployment:
related.append(
RelatedResource(
{
"prefect.resource.id": f"prefect.deployment.{deployment.id}",
"prefect.resource.role": "deployment",
}
)
)
try:
# Use events client directly with checkpoint_every=1 to avoid buffering issues
async with get_events_client(checkpoint_every=1) as events_client:
await events_client.emit(
Event(
event=event_type,
resource=Resource(
{
"prefect.resource.id": f"prefect.flow-run.{flow_run_id}",
}
),
related=related,
payload=serialized_step,
)
)
except Exception:
if logger:
logger.warning(
"Failed to emit pull-step event for flow run %s", flow_run_id
)
else:
get_logger(__name__).warning(
"Failed to emit pull-step event for flow run %s", flow_run_id
)
| StepExecutionError |
python | doocs__leetcode | solution/0000-0099/0023.Merge k Sorted Lists/Solution.py | {
"start": 151,
"end": 608
} | class ____:
def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:
setattr(ListNode, "__lt__", lambda a, b: a.val < b.val)
pq = [head for head in lists if head]
heapify(pq)
dummy = cur = ListNode()
while pq:
node = heappop(pq)
if node.next:
heappush(pq, node.next)
cur.next = node
cur = cur.next
return dummy.next
| Solution |
python | readthedocs__readthedocs.org | readthedocs/projects/admin.py | {
"start": 1985,
"end": 2315
} | class ____(ReadOnlyInlineMixin, admin.TabularInline):
"""Version inline relationship view for :py:class:`ProjectAdmin`."""
formset = VersionInlineFormSet
model = Version
classes = ["collapse"]
def get_queryset(self, request):
return super().get_queryset(request).select_related("project")
| VersionInline |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 17656,
"end": 18865
} | class ____:
@mock.patch(HOOK_STR)
@mock.patch(ASSET_STR)
def test_execute(self, asset_mock, hook_mock):
op = DataplexCreateAssetOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
zone_id=ZONE_ID,
asset_id=ASSET_ID,
body={},
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.wait_for_operation.return_value = None
asset_mock.return_value.to_dict.return_value = None
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.create_asset.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
zone_id=ZONE_ID,
asset_id=ASSET_ID,
body={},
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexCreateAssetOperator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 222768,
"end": 223384
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("CheckSuiteEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("CheckSuite"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| CheckSuiteConnection |
python | run-llama__llama_index | llama-index-core/llama_index/core/langchain_helpers/agents/tools.py | {
"start": 1383,
"end": 2476
} | class ____(BaseTool):
"""Tool for querying a LlamaIndex."""
# NOTE: name/description still needs to be set
query_engine: BaseQueryEngine
return_sources: bool = False
@classmethod
def from_tool_config(cls, tool_config: IndexToolConfig) -> "LlamaIndexTool":
"""Create a tool from a tool config."""
return_sources = tool_config.tool_kwargs.pop("return_sources", False)
return cls(
query_engine=tool_config.query_engine,
name=tool_config.name,
description=tool_config.description,
return_sources=return_sources,
**tool_config.tool_kwargs,
)
def _run(self, input: str) -> str:
response = self.query_engine.query(input)
if self.return_sources:
return _get_response_with_sources(response)
return str(response)
async def _arun(self, input: str) -> str:
response = await self.query_engine.aquery(input)
if self.return_sources:
return _get_response_with_sources(response)
return str(response)
| LlamaIndexTool |
python | django__django | tests/schema/tests.py | {
"start": 2169,
"end": 245269
} | class ____(TransactionTestCase):
"""
Tests for the schema-alteration code.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author,
AuthorCharFieldWithIndex,
AuthorTextFieldWithIndex,
AuthorWithDefaultHeight,
AuthorWithEvenLongerName,
Book,
BookWeak,
BookWithLongName,
BookWithO2O,
BookWithSlug,
IntegerPK,
Node,
Note,
Tag,
TagM2MTest,
TagUniqueRename,
Thing,
UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
# isolated_local_models contains models that are in test methods
# decorated with @isolate_apps.
self.isolated_local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if "schema" in new_apps.all_models:
for model in self.local_models:
for many_to_many in model._meta.many_to_many:
through = many_to_many.remote_field.through
if through and through._meta.auto_created:
del new_apps.all_models["schema"][through._meta.model_name]
del new_apps.all_models["schema"][model._meta.model_name]
if self.isolated_local_models:
with connection.schema_editor() as editor:
for model in self.isolated_local_models:
editor.delete_model(model)
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names()
if connection.features.ignores_table_name_case:
table_names = [table_name.lower() for table_name in table_names]
for model in itertools.chain(SchemaTests.models, self.local_models):
tbl = converter(model._meta.db_table)
if connection.features.ignores_table_name_case:
tbl = tbl.lower()
if tbl in table_names:
editor.delete_model(model)
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
return columns
def get_primary_key(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_primary_key_column(cursor, table)
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["index"] and len(c["columns"]) == 1
]
def get_uniques(self, table):
with connection.cursor() as cursor:
return [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table
).values()
if c["unique"] and len(c["columns"]) == 1
]
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def get_constraints_for_column(self, model, column_name):
constraints = self.get_constraints(model._meta.db_table)
constraints_for_column = []
for name, details in constraints.items():
if details["columns"] == [column_name]:
constraints_for_column.append(name)
return sorted(constraints_for_column)
def get_constraint_opclasses(self, constraint_name):
with connection.cursor() as cursor:
sql = """
SELECT opcname
FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = %s
"""
cursor.execute(sql, [constraint_name])
return [row[0] for row in cursor.fetchall()]
def check_added_field_default(
self,
schema_editor,
model,
field,
field_name,
expected_default,
cast_function=None,
):
with connection.cursor() as cursor:
schema_editor.add_field(model, field)
cursor.execute(
"SELECT {} FROM {};".format(field_name, model._meta.db_table)
)
database_default = cursor.fetchall()[0][0]
if cast_function and type(database_default) is not type(expected_default):
database_default = cast_function(database_default)
self.assertEqual(database_default, expected_default)
def get_constraints_count(self, table, column, fk_to):
"""
Return a dict with keys 'fks', 'uniques, and 'indexes' indicating the
number of foreign keys, unique constraints, and indexes on
`table`.`column`. The `fk_to` argument is a 2-tuple specifying the
expected foreign key relationship's (table, column).
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, table)
counts = {"fks": 0, "uniques": 0, "indexes": 0}
for c in constraints.values():
if c["columns"] == [column]:
if c["foreign_key"] == fk_to:
counts["fks"] += 1
if c["unique"]:
counts["uniques"] += 1
elif c["index"]:
counts["indexes"] += 1
return counts
def get_column_collation(self, table, column):
with connection.cursor() as cursor:
return next(
f.collation
for f in connection.introspection.get_table_description(cursor, table)
if f.name == column
)
def get_column_comment(self, table, column):
with connection.cursor() as cursor:
return next(
f.comment
for f in connection.introspection.get_table_description(cursor, table)
if f.name == column
)
def get_table_comment(self, table):
with connection.cursor() as cursor:
return next(
t.comment
for t in connection.introspection.get_table_list(cursor)
if t.name == table
)
def assert_column_comment_not_exists(self, table, column):
with connection.cursor() as cursor:
columns = connection.introspection.get_table_description(cursor, table)
self.assertFalse(any([c.name == column and c.comment for c in columns]))
def assertIndexOrder(self, table, index, order):
constraints = self.get_constraints(table)
self.assertIn(index, constraints)
index_orders = constraints[index]["orders"]
self.assertTrue(
all(val == expected for val, expected in zip(index_orders, order))
)
def assertForeignKeyExists(self, model, column, expected_fk_table, field="id"):
"""
Fail if the FK constraint on `model.Meta.db_table`.`column` to
`expected_fk_table`.id doesn't exist.
"""
if not connection.features.can_introspect_foreign_keys:
return
constraints = self.get_constraints(model._meta.db_table)
constraint_fk = None
for details in constraints.values():
if details["columns"] == [column] and details["foreign_key"]:
constraint_fk = details["foreign_key"]
break
self.assertEqual(constraint_fk, (expected_fk_table, field))
def assertForeignKeyNotExists(self, model, column, expected_fk_table):
if not connection.features.can_introspect_foreign_keys:
return
with self.assertRaises(AssertionError):
self.assertForeignKeyExists(model, column, expected_fk_table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
with connection.schema_editor() as editor:
# Create the table
editor.create_model(Author)
# The table is there
list(Author.objects.all())
# Clean up that table
editor.delete_model(Author)
# No deferred SQL should be left over.
self.assertEqual(editor.deferred_sql, [])
# The table is gone
with self.assertRaises(DatabaseError):
list(Author.objects.all())
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk(self):
"Creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertForeignKeyExists(Book, "author_id", "schema_tag")
@skipUnlessDBFeature("can_create_inline_fk")
def test_inline_fk(self):
# Create some tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.create_model(Note)
self.assertForeignKeyNotExists(Note, "book_id", "schema_book")
# Add a foreign key from one to the other.
with connection.schema_editor() as editor:
new_field = ForeignKey(Book, CASCADE)
new_field.set_attributes_from_name("book")
editor.add_field(Note, new_field)
self.assertForeignKeyExists(Note, "book_id", "schema_book")
# Creating a FK field with a constraint uses a single statement without
# a deferred ALTER TABLE.
self.assertFalse(
[
sql
for sql in (str(statement) for statement in editor.deferred_sql)
if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql
]
)
@skipUnlessDBFeature("can_create_inline_fk")
def test_inline_fk_db_on_delete(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.create_model(Note)
self.assertForeignKeyNotExists(Note, "book_id", "schema_book")
# Add a foreign key from model to the other.
with (
CaptureQueriesContext(connection) as ctx,
connection.schema_editor() as editor,
):
new_field = ForeignKey(Book, DB_CASCADE)
new_field.set_attributes_from_name("book")
editor.add_field(Note, new_field)
self.assertForeignKeyExists(Note, "book_id", "schema_book")
# Creating a FK field with a constraint uses a single statement without
# a deferred ALTER TABLE.
self.assertFalse(
[
sql
for sql in (str(statement) for statement in editor.deferred_sql)
if sql.startswith("ALTER TABLE") and "ADD CONSTRAINT" in sql
]
)
# ON DELETE clause is used.
self.assertTrue(
any(
capture_query["sql"].startswith("ALTER TABLE")
and "ON DELETE" in capture_query["sql"]
for capture_query in ctx.captured_queries
)
)
@skipUnlessDBFeature("can_create_inline_fk")
def test_add_inline_fk_update_data(self):
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key and update data in the same transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
assertIndex = (
self.assertIn
if connection.features.indexes_foreign_keys
else self.assertNotIn
)
assertIndex("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature(
"can_create_inline_fk",
"allows_multiple_constraints_on_same_fields",
)
@isolate_apps("schema")
def test_add_inline_fk_index_update_data(self):
class Node(Model):
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
# Add an inline foreign key, update data, and an index in the same
# transaction.
new_field = ForeignKey(Node, CASCADE, related_name="new_fk", null=True)
new_field.set_attributes_from_name("new_parent_fk")
parent = Node.objects.create()
with connection.schema_editor() as editor:
editor.add_field(Node, new_field)
Node._meta.add_field(new_field)
editor.execute("UPDATE schema_node SET new_parent_fk_id = %s;", [parent.pk])
editor.add_index(
Node, Index(fields=["new_parent_fk"], name="new_parent_inline_fk_idx")
)
self.assertIn("new_parent_fk_id", self.get_indexes(Node._meta.db_table))
@skipUnlessDBFeature("supports_foreign_keys")
def test_char_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorCharFieldWithIndex)
# Change CharField to FK
old_field = AuthorCharFieldWithIndex._meta.get_field("char_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("char_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorCharFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorCharFieldWithIndex, "char_field_id", "schema_author"
)
@skipUnlessDBFeature("supports_foreign_keys")
@skipUnlessDBFeature("supports_index_on_text_field")
def test_text_field_with_db_index_to_fk(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorTextFieldWithIndex)
# Change TextField to FK
old_field = AuthorTextFieldWithIndex._meta.get_field("text_field")
new_field = ForeignKey(Author, CASCADE, blank=True)
new_field.set_attributes_from_name("text_field")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorTextFieldWithIndex, old_field, new_field, strict=True
)
self.assertForeignKeyExists(
AuthorTextFieldWithIndex, "text_field_id", "schema_author"
)
@isolate_apps("schema")
def test_char_field_pk_to_auto_field(self):
class Foo(Model):
id = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@skipUnlessDBFeature("supports_foreign_keys")
def test_fk_to_proxy(self):
"Creating a FK to a proxy model creates database constraints."
class AuthorProxy(Author):
class Meta:
app_label = "schema"
apps = new_apps
proxy = True
class AuthorRef(Model):
author = ForeignKey(AuthorProxy, on_delete=CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [AuthorProxy, AuthorRef]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(AuthorRef)
self.assertForeignKeyExists(AuthorRef, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_fk_db_constraint(self):
"The db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, CASCADE, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
self.assertForeignKeyExists(Author, "tag_id", "schema_tag")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag, CASCADE)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
self.assertForeignKeyNotExists(Author, "tag_id", "schema_tag")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_fk_alter_on_delete(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Change CASCADE to DB_CASCADE.
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, DB_CASCADE)
new_field.set_attributes_from_name("author")
with (
connection.schema_editor() as editor,
CaptureQueriesContext(connection) as ctx,
):
editor.alter_field(Book, old_field, new_field)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
self.assertIs(
any("ON DELETE" in query["sql"] for query in ctx.captured_queries), True
)
# Change DB_CASCADE to CASCADE.
old_field = new_field
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with (
connection.schema_editor() as editor,
CaptureQueriesContext(connection) as ctx,
):
editor.alter_field(Book, old_field, new_field)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
self.assertIs(
any("ON DELETE" in query["sql"] for query in ctx.captured_queries), False
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_create_model_db_on_delete(self):
class Parent(Model):
class Meta:
app_label = "schema"
class Child(Model):
parent_fk = ForeignKey(Parent, DB_SET_NULL, null=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Parent)
with CaptureQueriesContext(connection) as ctx:
with connection.schema_editor() as editor:
editor.create_model(Child)
self.assertForeignKeyNotExists(Child, "parent_id", "schema_parent")
self.assertIs(
any("ON DELETE" in query["sql"] for query in ctx.captured_queries), True
)
@isolate_apps("schema")
def test_no_db_constraint_added_during_primary_key_change(self):
"""
When a primary key that's pointed to by a ForeignKey with
db_constraint=False is altered, a foreign key constraint isn't added.
"""
class Author(Model):
class Meta:
app_label = "schema"
class BookWeak(Model):
author = ForeignKey(Author, CASCADE, db_constraint=False)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWeak)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Author
new_field.set_attributes_from_name("id")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertForeignKeyNotExists(BookWeak, "author_id", "schema_author")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
self.assertForeignKeyNotExists(
new_field.remote_field.through, "tag_id", "schema_tag"
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@skipUnlessDBFeature("supports_foreign_keys")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with (
CaptureQueriesContext(connection) as ctx,
connection.schema_editor() as editor,
):
editor.add_field(Author, new_field)
drop_default_sql = editor.sql_alter_column_no_default % {
"column": editor.quote_name(new_field.name),
}
self.assertFalse(
any(drop_default_sql in query["sql"] for query in ctx.captured_queries)
)
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries), False
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries), False
)
columns = self.column_classes(Author)
self.assertEqual(
columns["age"][0],
connection.features.introspected_field_types["IntegerField"],
)
self.assertTrue(columns["age"][1][6])
def test_add_field_remove_field(self):
"""
Adding a field and removing it removes all deferred sql referring to
it.
"""
with connection.schema_editor() as editor:
# Create a table with a unique constraint on the slug field.
editor.create_model(Tag)
# Remove the slug column.
editor.remove_field(Tag, Tag._meta.get_field("slug"))
self.assertEqual(editor.deferred_sql, [])
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["surname"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
columns["surname"][1][6],
connection.features.interprets_empty_strings_as_nulls,
)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns["awesome"][0]
self.assertEqual(
field_type, connection.features.introspected_field_types["BooleanField"]
)
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this
# case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns["thing"]
self.assertEqual(
field_type, connection.features.introspected_field_types["IntegerField"]
)
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_o2o_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
new_field = OneToOneField(Note, CASCADE, null=True)
new_field.set_attributes_from_name("note")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertIn("note_id", columns)
self.assertTrue(columns["note_id"][1][6])
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns["bits"][0], ("BinaryField", "TextField"))
def test_add_field_durationfield_with_default(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = DurationField(default=datetime.timedelta(minutes=10))
new_field.set_attributes_from_name("duration")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["duration"][0],
connection.features.introspected_field_types["DurationField"],
)
@unittest.skipUnless(connection.vendor == "mysql", "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b"123")
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns["bits"][0], "TextField")
@isolate_apps("schema")
@skipUnlessDBFeature("supports_json_field", "supports_stored_generated_columns")
def test_add_generated_field_with_kt_model(self):
class GeneratedFieldKTModel(Model):
data = JSONField()
status = GeneratedField(
expression=KT("data__status"),
output_field=TextField(),
db_persist=True,
)
class Meta:
app_label = "schema"
with CaptureQueriesContext(connection) as ctx:
with connection.schema_editor() as editor:
editor.create_model(GeneratedFieldKTModel)
self.assertIs(
any("None" in query["sql"] for query in ctx.captured_queries),
False,
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_virtual_generated_columns")
def test_add_generated_boolean_field(self):
class GeneratedBooleanFieldModel(Model):
value = IntegerField(null=True)
has_value = GeneratedField(
expression=Q(value__isnull=False),
output_field=BooleanField(),
db_persist=False,
)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(GeneratedBooleanFieldModel)
obj = GeneratedBooleanFieldModel.objects.create()
self.assertIs(obj.has_value, False)
obj = GeneratedBooleanFieldModel.objects.create(value=1)
self.assertIs(obj.has_value, True)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_add_generated_field(self):
class GeneratedFieldOutputFieldModel(Model):
price = DecimalField(max_digits=7, decimal_places=2)
vat_price = GeneratedField(
expression=Round(F("price") * Value(Decimal("1.22")), 2),
db_persist=True,
output_field=DecimalField(max_digits=8, decimal_places=2),
)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(GeneratedFieldOutputFieldModel)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_add_generated_field_contains(self):
class GeneratedFieldContainsModel(Model):
text = TextField(default="foo")
generated = GeneratedField(
expression=Concat("text", Value("%")),
db_persist=True,
output_field=TextField(),
)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(GeneratedFieldContainsModel)
field = GeneratedField(
expression=Q(text__contains="foo"),
db_persist=True,
output_field=BooleanField(),
)
field.contribute_to_class(GeneratedFieldContainsModel, "contains_foo")
with connection.schema_editor() as editor:
editor.add_field(GeneratedFieldContainsModel, field)
obj = GeneratedFieldContainsModel.objects.create()
obj.refresh_from_db()
self.assertEqual(obj.text, "foo")
self.assertEqual(obj.generated, "foo%")
self.assertIs(obj.contains_foo, True)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_stored_generated_columns")
def test_alter_generated_field(self):
class GeneratedFieldIndexedModel(Model):
number = IntegerField(default=1)
generated = GeneratedField(
expression=F("number"),
db_persist=True,
output_field=IntegerField(),
)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(GeneratedFieldIndexedModel)
old_field = GeneratedFieldIndexedModel._meta.get_field("generated")
new_field = GeneratedField(
expression=F("number"),
db_persist=True,
db_index=True,
output_field=IntegerField(),
)
new_field.contribute_to_class(GeneratedFieldIndexedModel, "generated")
with connection.schema_editor() as editor:
editor.alter_field(GeneratedFieldIndexedModel, old_field, new_field)
self.assertIn(
"generated", self.get_indexes(GeneratedFieldIndexedModel._meta.db_table)
)
@isolate_apps("schema")
def test_add_auto_field(self):
class AddAutoFieldModel(Model):
name = CharField(max_length=255, primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(AddAutoFieldModel)
self.isolated_local_models = [AddAutoFieldModel]
old_field = AddAutoFieldModel._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
new_field.model = AddAutoFieldModel
with connection.schema_editor() as editor:
editor.alter_field(AddAutoFieldModel, old_field, new_field)
new_auto_field = AutoField(primary_key=True)
new_auto_field.set_attributes_from_name("id")
new_auto_field.model = AddAutoFieldModel()
with connection.schema_editor() as editor:
editor.add_field(AddAutoFieldModel, new_auto_field)
# Crashes on PostgreSQL when the GENERATED BY suffix is missing.
AddAutoFieldModel.objects.create(name="test")
def test_remove_field(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with CaptureQueriesContext(connection) as ctx:
editor.remove_field(Author, Author._meta.get_field("name"))
columns = self.column_classes(Author)
self.assertNotIn("name", columns)
if getattr(connection.features, "can_alter_table_drop_column", True):
# Table is not rebuilt.
self.assertIs(
any("CREATE TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
def test_remove_indexed_field(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorCharFieldWithIndex)
with connection.schema_editor() as editor:
editor.remove_field(
AuthorCharFieldWithIndex,
AuthorCharFieldWithIndex._meta.get_field("char_field"),
)
columns = self.column_classes(AuthorCharFieldWithIndex)
self.assertNotIn("char_field", columns)
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertTrue(columns["name"][1][6])
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
columns = self.column_classes(Author)
self.assertEqual(columns["name"][0], "TextField")
self.assertEqual(
bool(columns["name"][1][6]),
bool(connection.features.interprets_empty_strings_as_nulls),
)
def test_alter_auto_field_to_integer_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to IntegerField
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Now that ID is an IntegerField, the database raises an error if it
# isn't provided.
if not connection.features.supports_unspecified_pk:
with self.assertRaises(DatabaseError):
Author.objects.create()
def test_alter_auto_field_to_char_field(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change AutoField to CharField
old_field = Author._meta.get_field("id")
new_field = CharField(primary_key=True, max_length=50)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_auto_field_quoted_db_column(self):
class Foo(Model):
id = AutoField(primary_key=True, db_column='"quoted_id"')
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.db_column = '"quoted_id"'
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_not_unique_field_to_primary_key(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change UUIDField to primary key.
old_field = Author._meta.get_field("uuid")
new_field = UUIDField(primary_key=True)
new_field.set_attributes_from_name("uuid")
new_field.model = Author
with connection.schema_editor() as editor:
editor.remove_field(Author, Author._meta.get_field("id"))
editor.alter_field(Author, old_field, new_field, strict=True)
# Redundant unique constraint is not added.
count = self.get_constraints_count(
Author._meta.db_table,
Author._meta.get_field("uuid").column,
None,
)
self.assertLessEqual(count["uniques"], 1)
@isolate_apps("schema")
def test_alter_primary_key_quoted_db_table(self):
class Foo(Model):
class Meta:
app_label = "schema"
db_table = '"foo"'
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = Foo
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
Foo.objects.create()
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_not_null_with_default_value(self):
with connection.schema_editor() as editor:
editor.create_model(Note)
note = Note.objects.create(address=None)
old_field = Note._meta.get_field("address")
new_field = TextField(blank=True, default="", null=False)
new_field.set_attributes_from_name("address")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
note.refresh_from_db()
self.assertEqual(note.address, "")
@skipUnlessDBFeature("can_defer_constraint_checks", "can_rollback_ddl")
def test_alter_fk_checks_deferred_constraints(self):
"""
#25492 - Altering a foreign key's structure and data in the same
transaction.
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("parent")
new_field = ForeignKey(Node, CASCADE)
new_field.set_attributes_from_name("parent")
parent = Node.objects.create()
with connection.schema_editor() as editor:
# Update the parent FK to create a deferred constraint check.
Node.objects.update(parent=parent)
editor.alter_field(Node, old_field, new_field, strict=True)
@isolate_apps("schema")
def test_alter_null_with_default_value_deferred_constraints(self):
class Publisher(Model):
class Meta:
app_label = "schema"
class Article(Model):
publisher = ForeignKey(Publisher, CASCADE)
title = CharField(max_length=50, null=True)
description = CharField(max_length=100, null=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Publisher)
editor.create_model(Article)
self.isolated_local_models = [Article, Publisher]
publisher = Publisher.objects.create()
Article.objects.create(publisher=publisher)
old_title = Article._meta.get_field("title")
new_title = CharField(max_length=50, null=False, default="")
new_title.set_attributes_from_name("title")
old_description = Article._meta.get_field("description")
new_description = CharField(max_length=100, null=False, default="")
new_description.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.alter_field(Article, old_title, new_title, strict=True)
editor.alter_field(Article, old_description, new_description, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05")
old_field = Note._meta.get_field("info")
new_field = DateField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="1988-05-05 3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info="3:16:17.4567")
old_field = Note._meta.get_field("info")
new_field = TimeField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns["info"][1][6])
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
@skipUnlessDBFeature("interprets_empty_strings_as_nulls")
def test_alter_textual_field_not_null_to_null(self):
"""
Nullability for textual fields is preserved on databases that
interpret empty strings as NULLs.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
columns = self.column_classes(Author)
# Field is nullable.
self.assertTrue(columns["uuid"][1][6])
# Change to NOT NULL.
old_field = Author._meta.get_field("uuid")
new_field = SlugField(null=False, blank=True)
new_field.set_attributes_from_name("uuid")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
# Nullability is preserved.
self.assertTrue(columns["uuid"][1][6])
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="aaa")
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug="bbb")
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to
NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns["height"][1][6])
# Create some test data
Author.objects.create(name="Not null author", height=12)
Author.objects.create(name="Null author")
# Verify null value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertIsNone(Author.objects.get(name="Null author").height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertFalse(columns["height"][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name="Not null author").height, 12)
self.assertEqual(Author.objects.get(name="Null author").height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field("name")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_char_field_decrease_length(self):
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
Author.objects.create(name="x" * 255)
# Change max_length of CharField.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
msg = "value too long for type character varying(254)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(Author, old_field, new_field, strict=True)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_with_custom_db_type(self):
from django.contrib.postgres.fields import ArrayField
class Foo(Model):
field = ArrayField(CharField(max_length=255))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.isolated_local_models = [Foo]
old_field = Foo._meta.get_field("field")
new_field = ArrayField(CharField(max_length=16))
new_field.set_attributes_from_name("field")
new_field.model = Foo
with connection.schema_editor() as editor:
editor.alter_field(Foo, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=["x" * 16])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(CharField(max_length=15))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_array_field_decrease_nested_base_field_length(self):
from django.contrib.postgres.fields import ArrayField
class ArrayModel(Model):
field = ArrayField(ArrayField(CharField(max_length=16)))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
ArrayModel.objects.create(field=[["x" * 16]])
old_field = ArrayModel._meta.get_field("field")
new_field = ArrayField(ArrayField(CharField(max_length=15)))
new_field.set_attributes_from_name("field")
new_field.model = ArrayModel
with connection.schema_editor() as editor:
msg = "value too long for type character varying(15)"
with self.assertRaisesMessage(DataError, msg):
editor.alter_field(ArrayModel, old_field, new_field, strict=True)
def _add_ci_collation(self):
ci_collation = "case_insensitive"
def drop_collation():
with connection.cursor() as cursor:
cursor.execute(f"DROP COLLATION IF EXISTS {ci_collation}")
with connection.cursor() as cursor:
cursor.execute(
f"CREATE COLLATION IF NOT EXISTS {ci_collation} (provider=icu, "
f"locale='und-u-ks-level2', deterministic=false)"
)
self.addCleanup(drop_collation)
return ci_collation
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_db_collation_arrayfield(self):
from django.contrib.postgres.fields import ArrayField
ci_collation = self._add_ci_collation()
cs_collation = "en-x-icu"
class ArrayModel(Model):
field = ArrayField(CharField(max_length=16, db_collation=ci_collation))
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(ArrayModel)
self.isolated_local_models = [ArrayModel]
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
ci_collation,
)
# Alter collation.
old_field = ArrayModel._meta.get_field("field")
new_field_cs = ArrayField(CharField(max_length=16, db_collation=cs_collation))
new_field_cs.set_attributes_from_name("field")
new_field_cs.model = ArrayField
with connection.schema_editor() as editor:
editor.alter_field(ArrayModel, old_field, new_field_cs, strict=True)
self.assertEqual(
self.get_column_collation(ArrayModel._meta.db_table, "field"),
cs_collation,
)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_unique_with_collation_charfield(self):
ci_collation = self._add_ci_collation()
class CiCharModel(Model):
field = CharField(max_length=16, db_collation=ci_collation, unique=True)
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(CiCharModel)
self.isolated_local_models = [CiCharModel]
self.assertEqual(
self.get_column_collation(CiCharModel._meta.db_table, "field"),
ci_collation,
)
self.assertIn("field", self.get_uniques(CiCharModel._meta.db_table))
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_unique_with_deterministic_collation_charfield(self):
deterministic_collation = connection.features.test_collations.get(
"deterministic"
)
if not deterministic_collation:
self.skipTest("This backend does not support deterministic collations.")
class CharModel(Model):
field = CharField(db_collation=deterministic_collation, unique=True)
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(CharModel)
self.isolated_local_models = [CharModel]
constraints = self.get_constraints_for_column(
CharModel, CharModel._meta.get_field("field").column
)
self.assertIn("schema_charmodel_field_8b338dea_like", constraints)
self.assertIn(
"varchar_pattern_ops",
self.get_constraint_opclasses("schema_charmodel_field_8b338dea_like"),
)
self.assertEqual(
self.get_column_collation(CharModel._meta.db_table, "field"),
deterministic_collation,
)
self.assertIn("field", self.get_uniques(CharModel._meta.db_table))
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_relation_to_collation_charfield(self):
ci_collation = self._add_ci_collation()
class CiCharModel(Model):
field = CharField(max_length=16, db_collation=ci_collation, unique=True)
class Meta:
app_label = "schema"
class RelationModel(Model):
field = OneToOneField(CiCharModel, CASCADE, to_field="field")
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(CiCharModel)
editor.create_model(RelationModel)
self.isolated_local_models = [CiCharModel, RelationModel]
self.assertEqual(
self.get_column_collation(RelationModel._meta.db_table, "field_id"),
ci_collation,
)
self.assertEqual(
self.get_column_collation(CiCharModel._meta.db_table, "field"),
ci_collation,
)
self.assertIn("field_id", self.get_uniques(RelationModel._meta.db_table))
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_relation_to_deterministic_collation_charfield(self):
deterministic_collation = connection.features.test_collations.get(
"deterministic"
)
if not deterministic_collation:
self.skipTest("This backend does not support deterministic collations.")
class CharModel(Model):
field = CharField(db_collation=deterministic_collation, unique=True)
class Meta:
app_label = "schema"
class RelationModel(Model):
field = OneToOneField(CharModel, CASCADE, to_field="field")
class Meta:
app_label = "schema"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(CharModel)
editor.create_model(RelationModel)
self.isolated_local_models = [CharModel, RelationModel]
constraints = self.get_constraints_for_column(
CharModel, CharModel._meta.get_field("field").column
)
self.assertIn("schema_charmodel_field_8b338dea_like", constraints)
self.assertIn(
"varchar_pattern_ops",
self.get_constraint_opclasses("schema_charmodel_field_8b338dea_like"),
)
rel_constraints = self.get_constraints_for_column(
RelationModel, RelationModel._meta.get_field("field").column
)
self.assertIn("schema_relationmodel_field_id_395fbb08_like", rel_constraints)
self.assertIn(
"varchar_pattern_ops",
self.get_constraint_opclasses(
"schema_relationmodel_field_id_395fbb08_like"
),
)
self.assertEqual(
self.get_column_collation(RelationModel._meta.db_table, "field_id"),
deterministic_collation,
)
self.assertEqual(
self.get_column_collation(CharModel._meta.db_table, "field"),
deterministic_collation,
)
self.assertIn("field_id", self.get_uniques(RelationModel._meta.db_table))
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_nulls when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field("info")
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns["height"][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns["height"][1][6])
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for details in constraints.values():
if details["foreign_key"]:
self.fail(
"Found an unexpected FK constraint to %s" % details["columns"]
)
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
self.assertForeignKeyExists(LocalBook, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
BookWithO2O.objects.all().delete()
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
# Ensure the field is not unique anymore
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(Book, "author_id", "schema_author")
@skipUnlessDBFeature("supports_foreign_keys", "can_introspect_foreign_keys")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
Book.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
Book.objects.all().delete()
self.assertForeignKeyExists(Book, "author_id", "schema_author")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
columns = self.column_classes(BookWithO2O)
self.assertEqual(
columns["author_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
# Ensure the field is unique now
BookWithO2O.objects.create(
author=author, title="Django 1", pub_date=datetime.datetime.now()
)
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(
author=author, title="Django 2", pub_date=datetime.datetime.now()
)
self.assertForeignKeyExists(BookWithO2O, "author_id", "schema_author")
def test_alter_field_fk_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index on ForeignKey is replaced with a unique constraint for
# OneToOneField.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
def test_autofield_to_o2o(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Note)
# Rename the field.
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("note_ptr")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Alter AutoField to OneToOneField.
new_field_o2o = OneToOneField(Note, CASCADE)
new_field_o2o.set_attributes_from_name("note_ptr")
new_field_o2o.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field_o2o, strict=True)
columns = self.column_classes(Author)
field_type, _ = columns["note_ptr_id"]
self.assertEqual(
field_type, connection.features.introspected_field_types["BigIntegerField"]
)
def test_alter_field_fk_keeps_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
expected_indexes = 1 if connection.features.indexes_foreign_keys else 0
# Check the index is right to begin with.
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
old_field = Book._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = ForeignKey(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
counts = self.get_constraints_count(
Book._meta.db_table,
Book._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The index remains.
self.assertEqual(
counts,
{"fks": expected_fks, "uniques": 0, "indexes": expected_indexes},
)
def test_alter_field_o2o_to_fk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint on OneToOneField is replaced with an index for
# ForeignKey.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 0, "indexes": 1})
def test_alter_field_o2o_keeps_unique(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
expected_fks = (
1
if connection.features.supports_foreign_keys
and connection.features.can_introspect_foreign_keys
else 0
)
# Check the unique constraint is right to begin with.
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
old_field = BookWithO2O._meta.get_field("author")
# on_delete changed from CASCADE.
new_field = OneToOneField(Author, PROTECT)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
counts = self.get_constraints_count(
BookWithO2O._meta.db_table,
BookWithO2O._meta.get_field("author").column,
(Author._meta.db_table, Author._meta.pk.column),
)
# The unique constraint remains.
self.assertEqual(counts, {"fks": expected_fks, "uniques": 1, "indexes": 0})
@skipUnlessDBFeature("ignores_table_name_case")
def test_alter_db_table_case(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Alter the case of the table
old_table_name = Author._meta.db_table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, old_table_name, old_table_name.upper())
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name="Foo")
Author.objects.create(name="Bar")
def test_alter_autofield_pk_to_bigautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_autofield_pk_to_smallautofield_pk(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = SmallAutoField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
Author.objects.create(name="Foo", pk=1)
with connection.cursor() as cursor:
sequence_reset_sqls = connection.ops.sequence_reset_sql(
no_style(), [Author]
)
if sequence_reset_sqls:
cursor.execute(sequence_reset_sqls[0])
self.assertIsNotNone(Author.objects.create(name="Bar"))
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToAutoField(Model):
i = AutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
def test_alter_int_pk_to_bigautofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
BigAutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field("i")
new_field = BigAutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToBigAutoField(Model):
i = BigAutoField(primary_key=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = IntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToBigAutoField.objects.create(j=1)
self.assertIsNotNone(obj.i)
@isolate_apps("schema")
def test_alter_smallint_pk_to_smallautofield_pk(self):
"""
Should be able to rename an SmallIntegerField(primary_key=True) to
SmallAutoField(primary_key=True).
"""
class SmallIntegerPK(Model):
i = SmallIntegerField(primary_key=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(SmallIntegerPK)
self.isolated_local_models = [SmallIntegerPK]
old_field = SmallIntegerPK._meta.get_field("i")
new_field = SmallAutoField(primary_key=True)
new_field.model = SmallIntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(SmallIntegerPK, old_field, new_field, strict=True)
# A model representing the updated model.
class IntegerPKToSmallAutoField(Model):
i = SmallAutoField(primary_key=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = SmallIntegerPK._meta.db_table
# An id (i) is generated by the database.
obj = IntegerPKToSmallAutoField.objects.create()
self.assertIsNotNone(obj.i)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_serial_auto_field_to_bigautofield(self):
class SerialAutoField(Model):
id = SmallAutoField(primary_key=True)
class Meta:
app_label = "schema"
table = SerialAutoField._meta.db_table
column = SerialAutoField._meta.get_field("id").column
with connection.cursor() as cursor:
cursor.execute(
f'CREATE TABLE "{table}" '
f'("{column}" smallserial NOT NULL PRIMARY KEY)'
)
try:
old_field = SerialAutoField._meta.get_field("id")
new_field = BigAutoField(primary_key=True)
new_field.model = SerialAutoField
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(SerialAutoField, old_field, new_field, strict=True)
sequence_name = f"{table}_{column}_seq"
with connection.cursor() as cursor:
cursor.execute(
"SELECT data_type FROM pg_sequences WHERE sequencename = %s",
[sequence_name],
)
row = cursor.fetchone()
sequence_data_type = row[0] if row and row[0] else None
self.assertEqual(sequence_data_type, "bigint")
# Rename the column.
old_field = new_field
new_field = AutoField(primary_key=True)
new_field.model = SerialAutoField
new_field.set_attributes_from_name("renamed_id")
with connection.schema_editor() as editor:
editor.alter_field(SerialAutoField, old_field, new_field, strict=True)
with connection.cursor() as cursor:
cursor.execute(
"SELECT data_type FROM pg_sequences WHERE sequencename = %s",
[sequence_name],
)
row = cursor.fetchone()
sequence_data_type = row[0] if row and row[0] else None
self.assertEqual(sequence_data_type, "integer")
finally:
with connection.cursor() as cursor:
cursor.execute(f'DROP TABLE "{table}"')
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# Delete the old PK
old_field = IntegerPK._meta.get_field("i")
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name("i")
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# The primary key constraint is gone. Result depends on database:
# 'id' for SQLite, None for others (must not be 'i').
self.assertIn(self.get_primary_key(IntegerPK._meta.db_table), ("id", None))
# Set up a model class as it currently stands. The original IntegerPK
# class is now out of date and some backends make use of the whole
# model class when modifying a field (such as sqlite3 when remaking a
# table) so an outdated model class leads to incorrect results.
class Transitional(Model):
i = IntegerField(unique=True)
j = IntegerField(unique=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# model requires a new PK
old_field = Transitional._meta.get_field("j")
new_field = IntegerField(primary_key=True)
new_field.model = Transitional
new_field.set_attributes_from_name("j")
with connection.schema_editor() as editor:
editor.alter_field(Transitional, old_field, new_field, strict=True)
# Create a model class representing the updated model.
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = "schema"
apps = new_apps
db_table = "INTEGERPK"
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
columns = self.column_classes(Author)
self.assertEqual(
columns["display_name"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertNotIn("name", columns)
@isolate_apps("schema")
def test_rename_referenced_field(self):
class Author(Model):
name = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE, to_field="name")
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor() as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# Ensure the foreign key reference was updated.
self.assertForeignKeyExists(Book, "author_id", "schema_author", "renamed")
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["detail_info"][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
@isolate_apps("schema")
def test_rename_keep_db_default(self):
"""Renaming a field shouldn't affect a database default."""
class AuthorDbDefault(Model):
birth_year = IntegerField(db_default=1985)
class Meta:
app_label = "schema"
self.isolated_local_models = [AuthorDbDefault]
with connection.schema_editor() as editor:
editor.create_model(AuthorDbDefault)
columns = self.column_classes(AuthorDbDefault)
self.assertEqual(columns["birth_year"][1].default, "1985")
old_field = AuthorDbDefault._meta.get_field("birth_year")
new_field = IntegerField(db_default=1985)
new_field.set_attributes_from_name("renamed_year")
new_field.model = AuthorDbDefault
with connection.schema_editor() as editor:
editor.alter_field(AuthorDbDefault, old_field, new_field, strict=True)
columns = self.column_classes(AuthorDbDefault)
self.assertEqual(columns["renamed_year"][1].default, "1985")
@isolate_apps("schema")
def test_add_field_both_defaults_preserves_db_default(self):
class Author(Model):
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
field = IntegerField(default=1985, db_default=1988)
field.set_attributes_from_name("birth_year")
field.model = Author
with connection.schema_editor() as editor:
editor.add_field(Author, field)
columns = self.column_classes(Author)
self.assertEqual(columns["birth_year"][1].default, "1988")
@isolate_apps("schema")
def test_add_text_field_with_db_default(self):
class Author(Model):
description = TextField(db_default="(missing)")
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
columns = self.column_classes(Author)
self.assertIn("(missing)", columns["description"][1].default)
@isolate_apps("schema")
def test_db_default_equivalent_sql_noop(self):
class Author(Model):
name = TextField(db_default=Value("foo"))
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = TextField(db_default="foo")
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
@skipUnlessDBFeature("supports_json_field")
@isolate_apps("schema")
def test_db_default_output_field_resolving(self):
class Author(Model):
data = JSONField(
encoder=DjangoJSONEncoder,
db_default={
"epoch": datetime.datetime(1970, 1, 1, tzinfo=datetime.UTC)
},
)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
author = Author.objects.create()
author.refresh_from_db()
self.assertEqual(author.data, {"epoch": "1970-01-01T00:00:00Z"})
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
@isolate_apps("schema")
def test_rename_field_with_check_to_truncated_name(self):
class AuthorWithLongColumn(Model):
field_with_very_looooooong_name = PositiveIntegerField(null=True)
class Meta:
app_label = "schema"
self.isolated_local_models = [AuthorWithLongColumn]
with connection.schema_editor() as editor:
editor.create_model(AuthorWithLongColumn)
old_field = AuthorWithLongColumn._meta.get_field(
"field_with_very_looooooong_name"
)
new_field = PositiveIntegerField(null=True)
new_field.set_attributes_from_name("renamed_field_with_very_long_name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithLongColumn, old_field, new_field, strict=True)
new_field_name = truncate_name(
new_field.column, connection.ops.max_name_length()
)
constraints = self.get_constraints(AuthorWithLongColumn._meta.db_table)
check_constraints = [
name
for name, details in constraints.items()
if details["columns"] == [new_field_name] and details["check"]
]
self.assertEqual(len(check_constraints), 1)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass(
"TagM2MTest", related_name="books", through=LocalTagThrough
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(
columns["book_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
self.assertEqual(
columns["tag_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def test_m2m_through_remove(self):
class LocalAuthorNoteThrough(Model):
book = ForeignKey("schema.Author", CASCADE)
tag = ForeignKey("self", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalNoteWithM2MThrough(Model):
authors = ManyToManyField("schema.Author", through=LocalAuthorNoteThrough)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorNoteThrough, LocalNoteWithM2MThrough]
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalAuthorNoteThrough)
editor.create_model(LocalNoteWithM2MThrough)
# Remove the through parameter.
old_field = LocalNoteWithM2MThrough._meta.get_field("authors")
new_field = ManyToManyField("Author")
new_field.set_attributes_from_name("authors")
msg = (
f"Cannot alter field {old_field} into {new_field} - they are not "
f"compatible types (you cannot alter to or from M2M fields, or add or "
f"remove through= on M2M fields)"
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.alter_field(LocalNoteWithM2MThrough, old_field, new_field)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Add the field
with (
CaptureQueriesContext(connection) as ctx,
connection.schema_editor() as editor,
):
editor.add_field(LocalAuthorWithM2M, new_field)
# Table is not rebuilt.
self.assertEqual(
len(
[
query["sql"]
for query in ctx.captured_queries
if "CREATE TABLE" in query["sql"]
]
),
1,
)
self.assertIs(
any("DROP TABLE" in query["sql"] for query in ctx.captured_queries),
False,
)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(
columns["tagm2mtest_id"][0],
connection.features.introspected_field_types["BigIntegerField"],
)
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field, strict=True)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
with self.assertRaises(DatabaseError):
self.column_classes(new_field.remote_field.through)
# Make sure the model state is coherent with the table one now that
# we've removed the tags field.
opts = LocalAuthorWithM2M._meta
opts.local_many_to_many.remove(new_field)
del new_apps.all_models["schema"][
new_field.remote_field.through._meta.model_name
]
opts._expire_cache()
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough", CASCADE)
tag = ForeignKey("schema.TagM2MTest", CASCADE)
class Meta:
app_label = "schema"
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass(
"schema.TagM2MTest", related_name="authors", through=LocalAuthorTag
)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(
LocalAuthorWithM2MThrough, old_field, new_field, strict=True
)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author, CASCADE)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = "schema"
apps = new_apps
self.local_models = [LocalBookWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
LocalBookWithM2M._meta.get_field("tags").remote_field.through,
"tagm2mtest_id",
"schema_tagm2mtest",
)
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field, strict=True)
# Ensure old M2M is gone
with self.assertRaises(DatabaseError):
self.column_classes(
LocalBookWithM2M._meta.get_field("tags").remote_field.through
)
# This model looks like the new model and is used for teardown.
opts = LocalBookWithM2M._meta
opts.local_many_to_many.remove(old_field)
# Ensure the new M2M exists and points to UniqueTest
if connection.features.supports_foreign_keys:
self.assertForeignKeyExists(
new_field.remote_field.through, "uniquetest_id", "schema_uniquetest"
)
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@isolate_apps("schema")
def test_m2m_rename_field_in_target_model(self):
class LocalTagM2MTest(Model):
title = CharField(max_length=255)
class Meta:
app_label = "schema"
class LocalM2M(Model):
tags = ManyToManyField(LocalTagM2MTest)
class Meta:
app_label = "schema"
# Create the tables.
with connection.schema_editor() as editor:
editor.create_model(LocalM2M)
editor.create_model(LocalTagM2MTest)
self.isolated_local_models = [LocalM2M, LocalTagM2MTest]
# Ensure the m2m table is there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
# Alter a field in LocalTagM2MTest.
old_field = LocalTagM2MTest._meta.get_field("title")
new_field = CharField(max_length=254)
new_field.contribute_to_class(LocalTagM2MTest, "title1")
# @isolate_apps() and inner models are needed to have the model
# relations populated, otherwise this doesn't act as a regression test.
self.assertEqual(len(new_field.model._meta.related_objects), 1)
with connection.schema_editor() as editor:
editor.alter_field(LocalTagM2MTest, old_field, new_field, strict=True)
# Ensure the m2m table is still there.
self.assertEqual(len(self.column_classes(LocalM2M)), 1)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for details in constraints.values():
if details["columns"] == ["height"] and details["check"]:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
if not any(
details["columns"] == ["height"] and details["check"]
for details in constraints.values()
):
self.fail("No check constraint for height found")
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
@isolate_apps("schema")
def test_check_constraint_timedelta_param(self):
class DurationModel(Model):
duration = DurationField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(DurationModel)
self.isolated_local_models = [DurationModel]
constraint_name = "duration_gte_5_minutes"
constraint = CheckConstraint(
condition=Q(duration__gt=datetime.timedelta(minutes=5)),
name=constraint_name,
)
DurationModel._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(DurationModel, constraint)
constraints = self.get_constraints(DurationModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with self.assertRaises(IntegrityError), atomic():
DurationModel.objects.create(duration=datetime.timedelta(minutes=4))
DurationModel.objects.create(duration=datetime.timedelta(minutes=10))
@skipUnlessDBFeature(
"supports_column_check_constraints",
"can_introspect_check_constraints",
"supports_json_field",
)
@isolate_apps("schema")
def test_check_constraint_exact_jsonfield(self):
class JSONConstraintModel(Model):
data = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONConstraintModel)
self.isolated_local_models = [JSONConstraintModel]
constraint_name = "check_only_stable_version"
constraint = CheckConstraint(
condition=Q(data__version="stable"),
name=constraint_name,
)
JSONConstraintModel._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(JSONConstraintModel, constraint)
constraints = self.get_constraints(JSONConstraintModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with self.assertRaises(IntegrityError), atomic():
JSONConstraintModel.objects.create(
data={"release": "5.0.2dev", "version": "dev"}
)
JSONConstraintModel.objects.create(
data={"release": "5.0.3", "version": "stable"}
)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_remove_field_check_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the custom check constraint
constraint = CheckConstraint(
condition=Q(height__gte=0), name="author_height_gte_0_check"
)
custom_constraint_name = constraint.name
Author._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field check
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field check
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["height"]
and details["check"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the check constraint
with connection.schema_editor() as editor:
Author._meta.constraints = []
editor.remove_constraint(Author, constraint)
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
with self.assertRaises(IntegrityError):
TagUniqueRename.objects.create(title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_name_quoting(self):
old_table_name = TagUniqueRename._meta.db_table
try:
with connection.schema_editor() as editor:
editor.create_model(TagUniqueRename)
editor.alter_db_table(TagUniqueRename, old_table_name, "unique-table")
TagUniqueRename._meta.db_table = "unique-table"
# This fails if the unique index name isn't quoted.
editor.alter_unique_together(TagUniqueRename, [], (("title", "slug2"),))
finally:
with connection.schema_editor() as editor:
editor.delete_model(TagUniqueRename)
TagUniqueRename._meta.db_table = old_table_name
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_unique_no_unnecessary_fk_drops(self):
"""
If AlterField isn't selective about dropping foreign key constraints
when modifying a field with a unique constraint, the AlterField
incorrectly drops and recreates the Book.author foreign key even though
it doesn't restrict the field being changed (#29193).
"""
class Author(Model):
name = CharField(max_length=254, unique=True)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
new_field = CharField(max_length=255, unique=True)
new_field.model = Author
new_field.set_attributes_from_name("name")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Author, Author._meta.get_field("name"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
@isolate_apps("schema")
def test_unique_and_reverse_m2m(self):
"""
AlterField can modify a unique field when there's a reverse M2M
relation on the model.
"""
class Tag(Model):
title = CharField(max_length=255)
slug = SlugField(unique=True)
class Meta:
app_label = "schema"
class Book(Model):
tags = ManyToManyField(Tag, related_name="books")
class Meta:
app_label = "schema"
self.isolated_local_models = [Book._meta.get_field("tags").remote_field.through]
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Book)
new_field = SlugField(max_length=75, unique=True)
new_field.model = Tag
new_field.set_attributes_from_name("slug")
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
with connection.schema_editor() as editor:
editor.alter_field(Tag, Tag._meta.get_field("slug"), new_field)
# One SQL statement is executed to alter the field.
self.assertEqual(len(cm.records), 1)
# Ensure that the field is still unique.
Tag.objects.create(title="foo", slug="foo")
with self.assertRaises(IntegrityError):
Tag.objects.create(title="bar", slug="foo")
def test_remove_ignored_unique_constraint_not_create_fk_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
constraint = UniqueConstraint(
"author",
condition=Q(title__in=["tHGttG", "tRatEotU"]),
name="book_author_condition_uniq",
)
# Add unique constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Book, constraint)
old_constraints = self.get_constraints_for_column(
Book,
Book._meta.get_field("author").column,
)
# Remove unique constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Book, constraint)
new_constraints = self.get_constraints_for_column(
Book,
Book._meta.get_field("author").column,
)
# Redundant foreign key index is not added.
self.assertEqual(
(
len(old_constraints) - 1
if connection.features.supports_partial_indexes
else len(old_constraints)
),
len(new_constraints),
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_field_unique_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueName)
self.local_models = [AuthorWithUniqueName]
# Add the custom unique constraint
constraint = UniqueConstraint(fields=["name"], name="author_name_uniq")
custom_constraint_name = constraint.name
AuthorWithUniqueName._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueName, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Alter the column to remove field uniqueness
old_field = AuthorWithUniqueName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, old_field, new_field, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Alter the column to re-add field uniqueness
new_field2 = AuthorWithUniqueName._meta.get_field("name")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithUniqueName, new_field, new_field2, strict=True)
constraints = self.get_constraints(AuthorWithUniqueName._meta.db_table)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueName._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueName, constraint)
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, UniqueTest._meta.unique_together, []
)
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(
UniqueTest, [], UniqueTest._meta.unique_together
)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author, CASCADE)
new_field.set_attributes_from_name("author")
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [["author", "title"]])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [["author", "title"]], [])
def _test_composed_index_with_fk(self, index):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
table = Book._meta.db_table
self.assertEqual(Book._meta.indexes, [])
Book._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(Book, index)
self.assertIn(index.name, self.get_constraints(table))
Book._meta.indexes = []
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
def test_composed_index_with_fk(self):
index = Index(fields=["author", "title"], name="book_author_title_idx")
self._test_composed_index_with_fk(index)
def test_composed_desc_index_with_fk(self):
index = Index(fields=["-author", "title"], name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_func_index_with_fk(self):
index = Index(F("author"), F("title"), name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_desc_func_index_with_fk(self):
index = Index(F("author").desc(), F("title"), name="book_author_title_idx")
self._test_composed_index_with_fk(index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_composed_func_transform_index_with_fk(self):
index = Index(F("title__lower"), name="book_title_lower_idx")
with register_lookup(CharField, Lower):
self._test_composed_index_with_fk(index)
def _test_composed_constraint_with_fk(self, constraint):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
table = Book._meta.db_table
self.assertEqual(Book._meta.constraints, [])
Book._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Book, constraint)
self.assertIn(constraint.name, self.get_constraints(table))
Book._meta.constraints = []
with connection.schema_editor() as editor:
editor.remove_constraint(Book, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
def test_composed_constraint_with_fk(self):
constraint = UniqueConstraint(
fields=["author", "title"],
name="book_author_title_uniq",
)
self._test_composed_constraint_with_fk(constraint)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_composed_check_constraint_with_fk(self):
constraint = CheckConstraint(
condition=Q(author__gt=0), name="book_author_check"
)
self._test_composed_constraint_with_fk(constraint)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_does_not_remove_meta_constraints(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorWithUniqueNameAndBirthday)
self.local_models = [AuthorWithUniqueNameAndBirthday]
# Add the custom unique constraint
constraint = UniqueConstraint(
fields=["name", "birthday"], name="author_name_birthday_uniq"
)
custom_constraint_name = constraint.name
AuthorWithUniqueNameAndBirthday._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(AuthorWithUniqueNameAndBirthday, constraint)
# Ensure the constraints exist
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Remove unique together
unique_together = AuthorWithUniqueNameAndBirthday._meta.unique_together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, unique_together, []
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 0)
# Re-add unique together
with connection.schema_editor() as editor:
editor.alter_unique_together(
AuthorWithUniqueNameAndBirthday, [], unique_together
)
constraints = self.get_constraints(
AuthorWithUniqueNameAndBirthday._meta.db_table
)
self.assertIn(custom_constraint_name, constraints)
other_constraints = [
name
for name, details in constraints.items()
if details["columns"] == ["name", "birthday"]
and details["unique"]
and name != custom_constraint_name
]
self.assertEqual(len(other_constraints), 1)
# Drop the unique constraint
with connection.schema_editor() as editor:
AuthorWithUniqueNameAndBirthday._meta.constraints = []
editor.remove_constraint(AuthorWithUniqueNameAndBirthday, constraint)
def test_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(fields=["name"], name="name_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIs(sql.references_table(table), True)
self.assertIs(sql.references_column(table, "name"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Upper("name").desc(), name="func_upper_uq")
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC"])
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_unique_constraint(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Upper("title"),
Lower("slug"),
name="func_upper_lower_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains database functions.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
sql = str(sql)
self.assertIn("UPPER(%s)" % editor.quote_name("title"), sql)
self.assertIn("LOWER(%s)" % editor.quote_name("slug"), sql)
self.assertLess(sql.index("UPPER"), sql.index("LOWER"))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_unique_constraint_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
F("height").desc(),
"uuid",
Lower("name").asc(),
name="func_f_lower_field_unq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC", "ASC"])
constraints = self.get_constraints(table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(len(constraints[constraint.name]["columns"]), 3)
self.assertEqual(constraints[constraint.name]["columns"][1], "uuid")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "uuid"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_partial_indexes")
def test_func_unique_constraint_partial(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_cond_weight_uq",
condition=Q(weight__isnull=False),
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"WHERE %s IS NOT NULL" % editor.quote_name("weight"),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_covering_indexes")
def test_func_unique_constraint_covering(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
Upper("name"),
name="func_upper_covering_uq",
include=["weight", "height"],
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertEqual(
constraints[constraint.name]["columns"],
[None, "weight", "height"],
)
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
self.assertIs(sql.references_column(table, "height"), True)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), str(sql))
self.assertIn(
"INCLUDE (%s, %s)"
% (
editor.quote_name("weight"),
editor.quote_name("height"),
),
str(sql),
)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
constraint = UniqueConstraint(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
sql = constraint.create_sql(Author, editor)
table = Author._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
constraint = UniqueConstraint(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_uq",
)
# Add constraint.
with connection.schema_editor() as editor:
editor.add_constraint(BookWithSlug, constraint)
sql = constraint.create_sql(BookWithSlug, editor)
table = BookWithSlug._meta.db_table
constraints = self.get_constraints(table)
self.assertIn(constraint.name, constraints)
self.assertIs(constraints[constraint.name]["unique"], True)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, constraint.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(BookWithSlug, constraint)
self.assertNotIn(constraint.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_unique_constraint_unsupported(self):
# UniqueConstraint is ignored on databases that don't support indexes
# on expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(F("name"), name="func_name_uq")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_constraint(Author, constraint))
self.assertIsNone(editor.remove_constraint(Author, constraint))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nonexistent_field(self):
constraint = UniqueConstraint(Lower("nonexistent"), name="func_nonexistent_uq")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_unique_constraint_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(Random(), name="func_random_uq")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_constraint(Author, constraint)
@skipUnlessDBFeature("supports_nulls_distinct_unique_constraints")
def test_unique_constraint_index_nulls_distinct(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
nulls_distinct = UniqueConstraint(
F("height"), name="distinct_height", nulls_distinct=True
)
nulls_not_distinct = UniqueConstraint(
F("weight"), name="not_distinct_weight", nulls_distinct=False
)
with connection.schema_editor() as editor:
editor.add_constraint(Author, nulls_distinct)
editor.add_constraint(Author, nulls_not_distinct)
Author.objects.create(name="", height=None, weight=None)
Author.objects.create(name="", height=None, weight=1)
with self.assertRaises(IntegrityError):
Author.objects.create(name="", height=1, weight=None)
with connection.schema_editor() as editor:
editor.remove_constraint(Author, nulls_distinct)
editor.remove_constraint(Author, nulls_not_distinct)
constraints = self.get_constraints(Author._meta.db_table)
self.assertNotIn(nulls_distinct.name, constraints)
self.assertNotIn(nulls_not_distinct.name, constraints)
@skipUnlessDBFeature("supports_nulls_distinct_unique_constraints")
def test_unique_constraint_nulls_distinct(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
fields=["height", "weight"], name="constraint", nulls_distinct=False
)
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
Author.objects.create(name="", height=None, weight=None)
Author.objects.create(name="", height=1, weight=None)
Author.objects.create(name="", height=None, weight=1)
with self.assertRaises(IntegrityError):
Author.objects.create(name="", height=None, weight=None)
with self.assertRaises(IntegrityError):
Author.objects.create(name="", height=1, weight=None)
with self.assertRaises(IntegrityError):
Author.objects.create(name="", height=None, weight=1)
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
constraints = self.get_constraints(Author._meta.db_table)
self.assertNotIn(constraint.name, constraints)
@skipUnlessDBFeature(
"supports_nulls_distinct_unique_constraints",
"supports_partial_indexes",
)
def test_unique_constraint_nulls_distinct_condition(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
fields=["height", "weight"],
name="un_height_weight_start_A",
condition=Q(name__startswith="A"),
nulls_distinct=False,
)
with connection.schema_editor() as editor:
editor.add_constraint(Author, constraint)
Author.objects.create(name="Adam", height=None, weight=None)
Author.objects.create(name="Avocado", height=1, weight=None)
Author.objects.create(name="Adrian", height=None, weight=1)
with self.assertRaises(IntegrityError):
Author.objects.create(name="Alex", height=None, weight=None)
Author.objects.create(name="Bob", height=None, weight=None)
with self.assertRaises(IntegrityError):
Author.objects.create(name="Alex", height=1, weight=None)
Author.objects.create(name="Bill", height=None, weight=None)
with self.assertRaises(IntegrityError):
Author.objects.create(name="Alex", height=None, weight=1)
Author.objects.create(name="Celine", height=None, weight=1)
with connection.schema_editor() as editor:
editor.remove_constraint(Author, constraint)
constraints = self.get_constraints(Author._meta.db_table)
self.assertNotIn(constraint.name, constraints)
@skipIfDBFeature("supports_nulls_distinct_unique_constraints")
def test_unique_constraint_nulls_distinct_unsupported(self):
# UniqueConstraint is ignored on databases that don't support
# NULLS [NOT] DISTINCT.
with connection.schema_editor() as editor:
editor.create_model(Author)
constraint = UniqueConstraint(
F("name"), name="func_name_uq", nulls_distinct=True
)
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_constraint(Author, constraint))
self.assertIsNone(editor.remove_constraint(Author, constraint))
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
True,
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertIs(
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c["columns"] == ["slug", "title"]
),
False,
)
@isolate_apps("schema")
def test_db_table(self):
"""
Tests renaming of the table
"""
class Author(Model):
name = CharField(max_length=255)
class Meta:
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
# Create the table and one referring it.
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
# Ensure the foreign key reference was updated
self.assertForeignKeyExists(Book, "author_id", "schema_otherauthor")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(
columns["name"][0],
connection.features.introspected_field_types["CharField"],
)
def test_add_remove_index(self):
"""
Tests index addition and removal
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there and has no index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
# Add the index
index = Index(fields=["name"], name="author_title_idx")
with connection.schema_editor() as editor:
editor.add_index(Author, index)
self.assertIn("name", self.get_indexes(Author._meta.db_table))
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn("name", self.get_indexes(Author._meta.db_table))
def test_remove_db_index_doesnt_remove_custom_indexes(self):
"""
Changing db_index to False doesn't remove indexes from Meta.indexes.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithIndexedName)
self.local_models = [AuthorWithIndexedName]
# Ensure the table has its index
self.assertIn("name", self.get_indexes(AuthorWithIndexedName._meta.db_table))
# Add the custom index
index = Index(fields=["-name"], name="author_name_idx")
author_index_name = index.name
with connection.schema_editor() as editor:
db_index_name = editor._create_index_name(
table_name=AuthorWithIndexedName._meta.db_table,
column_names=("name",),
)
try:
AuthorWithIndexedName._meta.indexes = [index]
with connection.schema_editor() as editor:
editor.add_index(AuthorWithIndexedName, index)
old_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertIn(author_index_name, old_constraints)
self.assertIn(db_index_name, old_constraints)
# Change name field to db_index=False
old_field = AuthorWithIndexedName._meta.get_field("name")
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(
AuthorWithIndexedName, old_field, new_field, strict=True
)
new_constraints = self.get_constraints(AuthorWithIndexedName._meta.db_table)
self.assertNotIn(db_index_name, new_constraints)
# The index from Meta.indexes is still in the database.
self.assertIn(author_index_name, new_constraints)
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(AuthorWithIndexedName, index)
finally:
AuthorWithIndexedName._meta.indexes = []
def test_order_index(self):
"""
Indexes defined with ordering (ASC/DESC) defined on column
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
# The table doesn't have an index
self.assertNotIn("title", self.get_indexes(Author._meta.db_table))
index_name = "author_name_idx"
# Add the index
index = Index(fields=["name", "-weight"], name=index_name)
with connection.schema_editor() as editor:
editor.add_index(Author, index)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Author._meta.db_table, index_name, ["ASC", "DESC"])
# Drop the index
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_uniques(Book._meta.db_table),
)
def test_text_field_with_db_index(self):
with connection.schema_editor() as editor:
editor.create_model(AuthorTextFieldWithIndex)
# The text_field index is present if the database supports it.
assertion = (
self.assertIn
if connection.features.supports_index_on_text_field
else self.assertNotIn
)
assertion(
"text_field", self.get_indexes(AuthorTextFieldWithIndex._meta.db_table)
)
def _index_expressions_wrappers(self):
index_expression = IndexExpression()
index_expression.set_wrapper_classes(connection)
return ", ".join(
[
wrapper_cls.__qualname__
for wrapper_cls in index_expression.wrapper_classes
]
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_multiple_wrapper_references(self):
index = Index(OrderBy(F("name").desc(), descending=True), name="name")
msg = (
"Multiple references to %s can't be used in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_invalid_topmost_expressions(self):
index = Index(Upper(F("name").desc()), name="name")
msg = (
"%s must be topmost expressions in an indexed expression."
% self._index_expressions_wrappers()
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name").desc(), name="func_lower_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains a database function.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_f(self):
with connection.schema_editor() as editor:
editor.create_model(Tag)
index = Index("slug", F("title").desc(), name="func_f_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Tag, index)
sql = index.create_sql(Tag, editor)
table = Tag._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(Tag._meta.db_table, index.name, ["ASC", "DESC"])
# SQL contains columns.
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIs(sql.references_column(table, "title"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Tag, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_lookups(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
with register_lookup(CharField, Lower), register_lookup(IntegerField, Abs):
index = Index(
F("name__lower"),
F("weight__abs"),
name="func_lower_abs_lookup_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Lower("name"), Upper("name"), name="func_lower_upper_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains database functions.
self.assertIs(sql.references_column(table, "name"), True)
sql = str(sql)
self.assertIn("LOWER(%s)" % editor.quote_name("name"), sql)
self.assertIn("UPPER(%s)" % editor.quote_name("name"), sql)
self.assertLess(sql.index("LOWER"), sql.index("UPPER"))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_composite_func_index_field_and_expression(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
index = Index(
F("author").desc(),
Lower("title").asc(),
"pub_date",
name="func_f_lower_field_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Book, index)
sql = index.create_sql(Book, editor)
table = Book._meta.db_table
constraints = self.get_constraints(table)
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC", "ASC"])
self.assertEqual(len(constraints[index.name]["columns"]), 3)
self.assertEqual(constraints[index.name]["columns"][2], "pub_date")
# SQL contains database functions and columns.
self.assertIs(sql.references_column(table, "author_id"), True)
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "pub_date"), True)
self.assertIn("LOWER(%s)" % editor.quote_name("title"), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@isolate_apps("schema")
def test_func_index_f_decimalfield(self):
class Node(Model):
value = DecimalField(max_digits=5, decimal_places=2)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Node)
index = Index(F("value"), name="func_f_decimalfield_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Node, index)
sql = index.create_sql(Node, editor)
table = Node._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "value"), True)
# SQL doesn't contain casting.
self.assertNotIn("CAST", str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Node, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_cast(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Cast("weight", FloatField()), name="func_cast_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "weight"), True)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_collate(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithSlug)
index = Index(
Collate(F("title"), collation=collation).desc(),
Collate("slug", collation=collation),
name="func_collate_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(BookWithSlug, index)
sql = index.create_sql(BookWithSlug, editor)
table = Book._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC", "ASC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "title"), True)
self.assertIs(sql.references_column(table, "slug"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Book, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
@skipIfDBFeature("collate_as_index_expression")
def test_func_index_collate_f_ordered(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("This backend does not support case-insensitive collations.")
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(
Collate(F("name").desc(), collation=collation),
name="func_collate_f_desc_idx",
)
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
if connection.features.supports_index_column_ordering:
self.assertIndexOrder(table, index.name, ["DESC"])
# SQL contains columns and a collation.
self.assertIs(sql.references_column(table, "name"), True)
self.assertIn("COLLATE %s" % editor.quote_name(collation), str(sql))
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_calc(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("height") / (F("weight") + Value(5)), name="func_calc_idx")
# Add index.
with connection.schema_editor() as editor:
editor.add_index(Author, index)
sql = index.create_sql(Author, editor)
table = Author._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
# SQL contains columns and expressions.
self.assertIs(sql.references_column(table, "height"), True)
self.assertIs(sql.references_column(table, "weight"), True)
sql = str(sql)
self.assertIs(
sql.index(editor.quote_name("height"))
< sql.index("/")
< sql.index(editor.quote_name("weight"))
< sql.index("+")
< sql.index("5"),
True,
)
# Remove index.
with connection.schema_editor() as editor:
editor.remove_index(Author, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index("field__some_key", name="func_json_key_idx")
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipUnlessDBFeature("supports_expression_indexes", "supports_json_field")
@isolate_apps("schema")
def test_func_index_json_key_transform_cast(self):
class JSONModel(Model):
field = JSONField()
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(JSONModel)
self.isolated_local_models = [JSONModel]
index = Index(
Cast(KeyTextTransform("some_key", "field"), IntegerField()),
name="func_json_key_cast_idx",
)
with connection.schema_editor() as editor:
editor.add_index(JSONModel, index)
sql = index.create_sql(JSONModel, editor)
table = JSONModel._meta.db_table
self.assertIn(index.name, self.get_constraints(table))
self.assertIs(sql.references_column(table, "field"), True)
with connection.schema_editor() as editor:
editor.remove_index(JSONModel, index)
self.assertNotIn(index.name, self.get_constraints(table))
@skipIfDBFeature("supports_expression_indexes")
def test_func_index_unsupported(self):
# Index is ignored on databases that don't support indexes on
# expressions.
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(F("name"), name="random_idx")
with connection.schema_editor() as editor, self.assertNumQueries(0):
self.assertIsNone(editor.add_index(Author, index))
self.assertIsNone(editor.remove_index(Author, index))
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nonexistent_field(self):
index = Index(Lower("nonexistent"), name="func_nonexistent_idx")
msg = (
"Cannot resolve keyword 'nonexistent' into field. Choices are: "
"height, id, name, uuid, weight"
)
with self.assertRaisesMessage(FieldError, msg):
with connection.schema_editor() as editor:
editor.add_index(Author, index)
@skipUnlessDBFeature("supports_expression_indexes")
def test_func_index_nondeterministic(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
index = Index(Random(), name="func_random_idx")
with connection.schema_editor() as editor:
with self.assertRaises(DatabaseError):
editor.add_index(Author, index)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "id")
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
"id",
self.get_indexes(Tag._meta.db_table),
)
self.assertEqual(self.get_primary_key(Tag._meta.db_table), "slug")
def test_alter_primary_key_the_same_name(self):
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=2, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@skipIfDBFeature("can_rollback_ddl")
def test_unsupported_transactional_ddl_disallowed(self):
message = (
"Executing DDL statements while in a transaction on databases "
"that can't perform a rollback is prohibited."
)
with atomic(), connection.schema_editor() as editor:
with self.assertRaisesMessage(TransactionManagementError, message):
editor.execute(
editor.sql_create_table % {"table": "foo", "definition": ""}
)
@skipUnlessDBFeature("supports_foreign_keys", "indexes_foreign_keys")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name(
"author_foreign_key_with_really_long_field_name_id"
)
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(
AuthorWithEvenLongerName, CASCADE, related_name="something"
)
new_field.set_attributes_from_name(
"author_other_really_long_named_i_mean_so_long_fk"
)
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_foreign_keys")
def test_add_foreign_key_quoted_db_table(self):
class Author(Model):
class Meta:
db_table = '"table_author_double_quoted"'
app_label = "schema"
class Book(Model):
author = ForeignKey(Author, CASCADE)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
self.isolated_local_models = [Author]
if connection.vendor == "mysql":
self.assertForeignKeyExists(
Book, "author_id", '"table_author_double_quoted"'
)
else:
self.assertForeignKeyExists(Book, "author_id", "table_author_double_quoted")
def test_add_foreign_object(self):
with connection.schema_editor() as editor:
editor.create_model(BookForeignObj)
self.local_models = [BookForeignObj]
new_field = ForeignObject(
Author, on_delete=CASCADE, from_fields=["author_id"], to_fields=["id"]
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.add_field(BookForeignObj, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail(
"Errors when applying initial migration for a model "
"with a table named after an SQL reserved word: %s" % e
)
# The table is there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# The table is gone
with self.assertRaises(DatabaseError):
list(Thing.objects.all())
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital
letters.
"""
def get_field(*args, field_class=BigIntegerField, **kwargs):
kwargs["db_column"] = "CamelCase"
field = field_class(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
identifier_converter = connection.introspection.identifier_converter
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
constraint_name = "CamelCaseIndex"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_index
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"using": "",
"columns": editor.quote_name(column),
"extra": "",
"condition": "",
"include": "",
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(db_index=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
constraint_name = "CamelCaseUniqConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(editor._create_unique_sql(model, [field], constraint_name))
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(model, get_field(unique=True), field, strict=True)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
if editor.sql_create_fk and connection.features.can_introspect_foreign_keys:
constraint_name = "CamelCaseFKConstraint"
expected_constraint_name = identifier_converter(constraint_name)
editor.execute(
editor.sql_create_fk
% {
"table": editor.quote_name(table),
"name": editor.quote_name(constraint_name),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
"deferrable": connection.ops.deferrable_sql(),
"on_delete_db": "",
}
)
self.assertIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
editor.alter_field(
model,
get_field(Author, CASCADE, field_class=ForeignKey),
field,
strict=True,
)
self.assertNotIn(
expected_constraint_name, self.get_constraints(model._meta.db_table)
)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField to ensure default will be used from
# effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(
item[0],
None if connection.features.interprets_empty_strings_as_nulls else "",
)
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default="surname default")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], "surname default")
# And that the default is no longer set in the database.
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_add_field_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable CharField with a default.
new_field = CharField(max_length=15, blank=True, null=True, default="surname")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "surname"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_add_textfield_default_nullable(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add new nullable TextField with a default.
new_field = TextField(blank=True, null=True, default="text")
new_field.set_attributes_from_name("description")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
Author.objects.create(name="Anonymous1")
with connection.cursor() as cursor:
cursor.execute("SELECT description FROM schema_author;")
item = cursor.fetchall()[0]
self.assertIsNone(item[0])
field = next(
f
for f in connection.introspection.get_table_description(
cursor,
"schema_author",
)
if f.name == "description"
)
# Field is still nullable.
self.assertTrue(field.null_ok)
# The database default is no longer set.
if connection.features.can_introspect_default:
self.assertIn(field.default, ["NULL", None])
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
self.assertIsNone(Author.objects.get().height)
old_field = Author._meta.get_field("height")
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f
for f in connection.introspection.get_table_description(
cursor, "schema_author"
)
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_doesnt_perform_queries(self):
"""
No queries are performed if a field default changes and the field's
not changing from null to non-null.
"""
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_default = old_field.default * 2
new_field = PositiveIntegerField(null=True, blank=True, default=new_default)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(
AuthorWithDefaultHeight, old_field, new_field, strict=True
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_fk_attributes_noop(self):
"""
No queries are performed when changing field attributes that don't
affect the schema.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_field = Book._meta.get_field("author")
new_field = ForeignKey(
Author,
blank=True,
editable=False,
error_messages={"invalid": "error message"},
help_text="help text",
limit_choices_to={"limit": "choice"},
on_delete=CASCADE,
related_name="related_name",
related_query_name="related_query_name",
validators=[lambda x: x],
verbose_name="verbose name",
)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Book, new_field, old_field, strict=True)
def test_alter_field_choices_noop(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(
choices=(("Jane", "Jane"), ("Joe", "Joe")),
max_length=255,
)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, old_field, new_field, strict=True)
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_field(Author, new_field, old_field, strict=True)
def test_add_textfield_unhashable_default(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name="Anonymous1")
# Create a field that has an unhashable default
new_field = TextField(default={})
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_indexed_charfield(self):
field = CharField(max_length=255, db_index=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851",
"schema_author_nom_de_plume_7570a851_like",
],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_add_unique_charfield(self):
field = CharField(max_length=255, unique=True)
field.set_attributes_from_name("nom_de_plume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
# Should create two indexes; one for like operator.
self.assertEqual(
self.get_constraints_for_column(Author, "nom_de_plume"),
[
"schema_author_nom_de_plume_7570a851_like",
"schema_author_nom_de_plume_key",
],
)
@skipUnlessDBFeature("supports_comments")
def test_add_db_comment_charfield(self):
comment = "Custom comment"
field = CharField(max_length=255, db_comment=comment)
field.set_attributes_from_name("name_with_comment")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
self.assertEqual(
self.get_column_comment(Author._meta.db_table, "name_with_comment"),
comment,
)
@skipUnlessDBFeature("supports_comments", "supports_stored_generated_columns")
def test_add_db_comment_generated_field(self):
comment = "Custom comment"
field = GeneratedField(
expression=Value(1),
db_persist=True,
output_field=IntegerField(),
db_comment=comment,
)
field.set_attributes_from_name("volume")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.add_field(Author, field)
self.assertEqual(
self.get_column_comment(Author._meta.db_table, "volume"),
comment,
)
@skipUnlessDBFeature("supports_comments")
def test_add_db_comment_and_default_charfield(self):
comment = "Custom comment with default"
field = CharField(max_length=255, default="Joe Doe", db_comment=comment)
field.set_attributes_from_name("name_with_comment_default")
with connection.schema_editor() as editor:
editor.create_model(Author)
Author.objects.create(name="Before adding a new field")
editor.add_field(Author, field)
self.assertEqual(
self.get_column_comment(Author._meta.db_table, "name_with_comment_default"),
comment,
)
with connection.cursor() as cursor:
cursor.execute(
f"SELECT name_with_comment_default FROM {Author._meta.db_table};"
)
for row in cursor.fetchall():
self.assertEqual(row[0], "Joe Doe")
@skipUnlessDBFeature("supports_comments")
def test_alter_db_comment(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add comment.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_comment="Custom comment")
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_comment(Author._meta.db_table, "name"),
"Custom comment",
)
# Alter comment.
old_field = new_field
new_field = CharField(max_length=255, db_comment="New custom comment")
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_comment(Author._meta.db_table, "name"),
"New custom comment",
)
# Remove comment.
old_field = new_field
new_field = CharField(max_length=255)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertIn(
self.get_column_comment(Author._meta.db_table, "name"),
[None, ""],
)
@skipUnlessDBFeature("supports_comments", "supports_foreign_keys")
def test_alter_db_comment_foreign_key(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
comment = "FK custom comment"
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, CASCADE, db_comment=comment)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_comment(Book._meta.db_table, "author_id"),
comment,
)
@skipUnlessDBFeature("supports_comments")
def test_alter_field_type_preserve_comment(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
comment = "This is the name."
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_comment=comment)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_comment(Author._meta.db_table, "name"),
comment,
)
# Changing a field type should preserve the comment.
old_field = new_field
new_field = CharField(max_length=511, db_comment=comment)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
# Comment is preserved.
self.assertEqual(
self.get_column_comment(Author._meta.db_table, "name"),
comment,
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_comments")
def test_db_comment_table(self):
class ModelWithDbTableComment(Model):
class Meta:
app_label = "schema"
db_table_comment = "Custom table comment"
with connection.schema_editor() as editor:
editor.create_model(ModelWithDbTableComment)
self.isolated_local_models = [ModelWithDbTableComment]
self.assertEqual(
self.get_table_comment(ModelWithDbTableComment._meta.db_table),
"Custom table comment",
)
# Alter table comment.
old_db_table_comment = ModelWithDbTableComment._meta.db_table_comment
with connection.schema_editor() as editor:
editor.alter_db_table_comment(
ModelWithDbTableComment, old_db_table_comment, "New table comment"
)
self.assertEqual(
self.get_table_comment(ModelWithDbTableComment._meta.db_table),
"New table comment",
)
# Remove table comment.
old_db_table_comment = ModelWithDbTableComment._meta.db_table_comment
with connection.schema_editor() as editor:
editor.alter_db_table_comment(
ModelWithDbTableComment, old_db_table_comment, None
)
self.assertIn(
self.get_table_comment(ModelWithDbTableComment._meta.db_table),
[None, ""],
)
@isolate_apps("schema")
@skipIfDBFeature("supports_comments")
def test_db_comment_table_unsupported(self):
class ModelWithDbTableComment(Model):
class Meta:
app_label = "schema"
db_table_comment = "Custom table comment"
# Table comments are ignored on databases that don't support them.
with connection.schema_editor() as editor, self.assertNumQueries(1):
editor.create_model(ModelWithDbTableComment)
self.isolated_local_models = [ModelWithDbTableComment]
with connection.schema_editor() as editor, self.assertNumQueries(0):
editor.alter_db_table_comment(
ModelWithDbTableComment, "Custom table comment", "New table comment"
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_comments", "supports_foreign_keys")
def test_db_comments_from_abstract_model(self):
class AbstractModelWithDbComments(Model):
name = CharField(
max_length=255, db_comment="Custom comment", null=True, blank=True
)
class Meta:
app_label = "schema"
abstract = True
db_table_comment = "Custom table comment"
class ModelWithDbComments(AbstractModelWithDbComments):
pass
with connection.schema_editor() as editor:
editor.create_model(ModelWithDbComments)
self.isolated_local_models = [ModelWithDbComments]
self.assertEqual(
self.get_column_comment(ModelWithDbComments._meta.db_table, "name"),
"Custom comment",
)
self.assertEqual(
self.get_table_comment(ModelWithDbComments._meta.db_table),
"Custom table comment",
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_index=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617", "schema_author_name_1fbc5617_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
# Alter to add unique=True and create 2 indexes.
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "name"),
["schema_author_name_1fbc5617_like", "schema_author_name_1fbc5617_uniq"],
)
# Remove unique=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "name"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_index_to_textfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Note)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
# Alter to add db_index=True and create 2 indexes.
old_field = Note._meta.get_field("info")
new_field = TextField(db_index=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Note, "info"),
["schema_note_info_4b0ea695", "schema_note_info_4b0ea695_like"],
)
# Remove db_index=True to drop both indexes.
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Note, "info"), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_unique_to_charfield_with_db_index(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove unique=True (should drop unique index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_remove_unique_and_db_index_from_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to add unique=True (should replace the index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, db_index=True, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to remove both unique=True and db_index=True (should drop all
# indexes)
new_field2 = CharField(max_length=100)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"), []
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_swap_unique_and_db_index_with_charfield(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(BookWithoutAuthor)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
# Alter to set unique=True and remove db_index=True (should replace the
# index)
old_field = BookWithoutAuthor._meta.get_field("title")
new_field = CharField(max_length=100, unique=True)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff_like", "schema_book_title_2dfb2dff_uniq"],
)
# Alter to set db_index=True and remove unique=True (should restore
# index)
new_field2 = CharField(max_length=100, db_index=True)
new_field2.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(BookWithoutAuthor, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(BookWithoutAuthor, "title"),
["schema_book_title_2dfb2dff", "schema_book_title_2dfb2dff_like"],
)
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_alter_field_add_db_index_to_charfield_with_unique(self):
# Create the table and verify initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Tag)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to add db_index=True
old_field = Tag._meta.get_field("slug")
new_field = SlugField(db_index=True, unique=True)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
# Alter to remove db_index=True
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
self.assertEqual(
self.get_constraints_for_column(Tag, "slug"),
["schema_tag_slug_2c418ba3_like", "schema_tag_slug_key"],
)
@isolate_apps("schema")
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific")
def test_indexed_charfield_to_textfield(self):
class SimpleModel(Model):
field1 = CharField(max_length=10, db_index=True)
class Meta:
app_label = "schema"
with connection.schema_editor() as editor:
editor.create_model(SimpleModel)
self.assertEqual(
self.get_constraints_for_column(SimpleModel, "field1"),
[
"schema_simplemodel_field1_f07a3d6a",
"schema_simplemodel_field1_f07a3d6a_like",
],
)
# Change to TextField.
old_field1 = SimpleModel._meta.get_field("field1")
new_field1 = TextField(db_index=True)
new_field1.set_attributes_from_name("field1")
with connection.schema_editor() as editor:
editor.alter_field(SimpleModel, old_field1, new_field1, strict=True)
self.assertEqual(
self.get_constraints_for_column(SimpleModel, "field1"),
[
"schema_simplemodel_field1_f07a3d6a",
"schema_simplemodel_field1_f07a3d6a_like",
],
)
# Change back to CharField.
old_field1 = SimpleModel._meta.get_field("field1")
new_field1 = CharField(max_length=10, db_index=True)
new_field1.set_attributes_from_name("field1")
with connection.schema_editor() as editor:
editor.alter_field(SimpleModel, old_field1, new_field1, strict=True)
self.assertEqual(
self.get_constraints_for_column(SimpleModel, "field1"),
[
"schema_simplemodel_field1_f07a3d6a",
"schema_simplemodel_field1_f07a3d6a_like",
],
)
def test_alter_field_add_index_to_integerfield(self):
# Create the table and verify no initial indexes.
with connection.schema_editor() as editor:
editor.create_model(Author)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
# Alter to add db_index=True and create index.
old_field = Author._meta.get_field("weight")
new_field = IntegerField(null=True, db_index=True)
new_field.set_attributes_from_name("weight")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_constraints_for_column(Author, "weight"),
["schema_author_weight_587740f9"],
)
# Remove db_index=True to drop index.
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertEqual(self.get_constraints_for_column(Author, "weight"), [])
def test_alter_pk_with_self_referential_field(self):
"""
Changing the primary key field name of a model with a self-referential
foreign key (#26384).
"""
with connection.schema_editor() as editor:
editor.create_model(Node)
old_field = Node._meta.get_field("node_id")
new_field = AutoField(primary_key=True)
new_field.set_attributes_from_name("id")
with connection.schema_editor() as editor:
editor.alter_field(Node, old_field, new_field, strict=True)
self.assertForeignKeyExists(Node, "parent_id", Node._meta.db_table)
@mock.patch("django.db.backends.base.schema.datetime")
@mock.patch("django.db.backends.base.schema.timezone")
def test_add_datefield_and_datetimefield_use_effective_default(
self, mocked_datetime, mocked_tz
):
"""
effective_default() should be used for DateField, DateTimeField, and
TimeField if auto_now or auto_now_add is set (#25005).
"""
now = datetime.datetime(month=1, day=1, year=2000, hour=1, minute=1)
now_tz = datetime.datetime(
month=1, day=1, year=2000, hour=1, minute=1, tzinfo=datetime.UTC
)
mocked_datetime.now = mock.MagicMock(return_value=now)
mocked_tz.now = mock.MagicMock(return_value=now_tz)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check auto_now/auto_now_add attributes are not defined
columns = self.column_classes(Author)
self.assertNotIn("dob_auto_now", columns)
self.assertNotIn("dob_auto_now_add", columns)
self.assertNotIn("dtob_auto_now", columns)
self.assertNotIn("dtob_auto_now_add", columns)
self.assertNotIn("tob_auto_now", columns)
self.assertNotIn("tob_auto_now_add", columns)
# Create a row
Author.objects.create(name="Anonymous1")
# Ensure fields were added with the correct defaults
dob_auto_now = DateField(auto_now=True)
dob_auto_now.set_attributes_from_name("dob_auto_now")
self.check_added_field_default(
editor,
Author,
dob_auto_now,
"dob_auto_now",
now.date(),
cast_function=lambda x: x.date(),
)
dob_auto_now_add = DateField(auto_now_add=True)
dob_auto_now_add.set_attributes_from_name("dob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dob_auto_now_add,
"dob_auto_now_add",
now.date(),
cast_function=lambda x: x.date(),
)
dtob_auto_now = DateTimeField(auto_now=True)
dtob_auto_now.set_attributes_from_name("dtob_auto_now")
self.check_added_field_default(
editor,
Author,
dtob_auto_now,
"dtob_auto_now",
now,
)
dt_tm_of_birth_auto_now_add = DateTimeField(auto_now_add=True)
dt_tm_of_birth_auto_now_add.set_attributes_from_name("dtob_auto_now_add")
self.check_added_field_default(
editor,
Author,
dt_tm_of_birth_auto_now_add,
"dtob_auto_now_add",
now,
)
tob_auto_now = TimeField(auto_now=True)
tob_auto_now.set_attributes_from_name("tob_auto_now")
self.check_added_field_default(
editor,
Author,
tob_auto_now,
"tob_auto_now",
now.time(),
cast_function=lambda x: x.time(),
)
tob_auto_now_add = TimeField(auto_now_add=True)
tob_auto_now_add.set_attributes_from_name("tob_auto_now_add")
self.check_added_field_default(
editor,
Author,
tob_auto_now_add,
"tob_auto_now_add",
now.time(),
cast_function=lambda x: x.time(),
)
def test_namespaced_db_table_create_index_name(self):
"""
Table names are stripped of their namespace/schema before being used to
generate index names.
"""
with connection.schema_editor() as editor:
max_name_length = connection.ops.max_name_length() or 200
namespace = "n" * max_name_length
table_name = "t" * max_name_length
namespaced_table_name = '"%s"."%s"' % (namespace, table_name)
self.assertEqual(
editor._create_index_name(table_name, []),
editor._create_index_name(namespaced_table_name, []),
)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle specific db_table syntax"
)
def test_creation_with_db_table_double_quotes(self):
oracle_user = connection.creation._test_database_user()
class Student(Model):
name = CharField(max_length=30)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_STUDENT_TABLE"' % oracle_user
class Document(Model):
name = CharField(max_length=30)
students = ManyToManyField(Student)
class Meta:
app_label = "schema"
apps = new_apps
db_table = '"%s"."DJANGO_DOCUMENT_TABLE"' % oracle_user
self.isolated_local_models = [Student, Document]
with connection.schema_editor() as editor:
editor.create_model(Student)
editor.create_model(Document)
doc = Document.objects.create(name="Test Name")
student = Student.objects.create(name="Some man")
doc.students.add(student)
@isolate_apps("schema")
@unittest.skipUnless(
connection.vendor == "postgresql", "PostgreSQL specific db_table syntax."
)
def test_namespaced_db_table_foreign_key_reference(self):
with connection.cursor() as cursor:
cursor.execute("CREATE SCHEMA django_schema_tests")
def delete_schema():
with connection.cursor() as cursor:
cursor.execute("DROP SCHEMA django_schema_tests CASCADE")
self.addCleanup(delete_schema)
class Author(Model):
class Meta:
app_label = "schema"
class Book(Model):
class Meta:
app_label = "schema"
db_table = '"django_schema_tests"."schema_book"'
author = ForeignKey(Author, CASCADE)
author.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.add_field(Book, author)
def test_rename_table_renames_deferred_sql_references(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
editor.alter_db_table(Author, "schema_author", "schema_renamed_author")
editor.alter_db_table(Author, "schema_book", "schema_renamed_book")
try:
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_table("schema_author"), False)
self.assertIs(statement.references_table("schema_book"), False)
finally:
editor.alter_db_table(Author, "schema_renamed_author", "schema_author")
editor.alter_db_table(Author, "schema_renamed_book", "schema_book")
def test_rename_column_renames_deferred_sql_references(self):
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
old_title = Book._meta.get_field("title")
new_title = CharField(max_length=100, db_index=True)
new_title.set_attributes_from_name("renamed_title")
editor.alter_field(Book, old_title, new_title)
old_author = Book._meta.get_field("author")
new_author = ForeignKey(Author, CASCADE)
new_author.set_attributes_from_name("renamed_author")
editor.alter_field(Book, old_author, new_author)
self.assertGreater(len(editor.deferred_sql), 0)
for statement in editor.deferred_sql:
self.assertIs(statement.references_column("book", "title"), False)
self.assertIs(statement.references_column("book", "author_id"), False)
@isolate_apps("schema")
def test_referenced_field_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the field
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_field(Foo, Foo._meta.get_field("field"), new_field)
@isolate_apps("schema")
def test_referenced_table_without_constraint_rename_inside_atomic_block(self):
"""
Foreign keys without database level constraint don't prevent the table
they reference from being renamed in an atomic block.
"""
class Foo(Model):
field = CharField(max_length=255, unique=True)
class Meta:
app_label = "schema"
class Bar(Model):
foo = ForeignKey(Foo, CASCADE, to_field="field", db_constraint=False)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo, Bar]
with connection.schema_editor() as editor:
editor.create_model(Foo)
editor.create_model(Bar)
new_field = CharField(max_length=255, unique=True)
new_field.set_attributes_from_name("renamed")
with connection.schema_editor(atomic=True) as editor:
editor.alter_db_table(Foo, Foo._meta.db_table, "renamed_table")
Foo._meta.db_table = "renamed_table"
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_db_collation_charfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = CharField(max_length=255, db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@isolate_apps("schema")
@skipUnlessDBFeature("supports_collation_on_textfield")
def test_db_collation_textfield(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
class Foo(Model):
field = TextField(db_collation=collation)
class Meta:
app_label = "schema"
self.isolated_local_models = [Foo]
with connection.schema_editor() as editor:
editor.create_model(Foo)
self.assertEqual(
self.get_column_collation(Foo._meta.db_table, "field"),
collation,
)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_add_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("alias")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
self.assertEqual(
columns["alias"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["alias"][1][8], collation)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_field_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_collation(Author._meta.db_table, "name"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
self.assertIsNone(self.get_column_collation(Author._meta.db_table, "name"))
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_field_type_preserve_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
self.assertEqual(
self.get_column_collation(Author._meta.db_table, "name"),
collation,
)
# Changing a field type should preserve the collation.
old_field = new_field
new_field = CharField(max_length=511, db_collation=collation)
new_field.set_attributes_from_name("name")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, old_field, strict=True)
# Collation is preserved.
self.assertEqual(
self.get_column_collation(Author._meta.db_table, "name"),
collation,
)
@skipUnlessDBFeature("supports_collation_on_charfield")
def test_alter_primary_key_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Thing)
old_field = Thing._meta.get_field("when")
new_field = CharField(max_length=1, db_collation=collation, primary_key=True)
new_field.set_attributes_from_name("when")
new_field.model = Thing
with connection.schema_editor() as editor:
editor.alter_field(Thing, old_field, new_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertEqual(
self.get_column_collation(Thing._meta.db_table, "when"),
collation,
)
with connection.schema_editor() as editor:
editor.alter_field(Thing, new_field, old_field, strict=True)
self.assertEqual(self.get_primary_key(Thing._meta.db_table), "when")
self.assertIsNone(self.get_column_collation(Thing._meta.db_table, "when"))
@skipUnlessDBFeature(
"supports_collation_on_charfield", "supports_collation_on_textfield"
)
def test_alter_field_type_and_db_collation(self):
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=255, db_collation=collation)
new_field.set_attributes_from_name("info")
new_field.model = Note
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(
columns["info"][0],
connection.features.introspected_field_types["CharField"],
)
self.assertEqual(columns["info"][1][8], collation)
with connection.schema_editor() as editor:
editor.alter_field(Note, new_field, old_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns["info"][0], "TextField")
self.assertIsNone(columns["info"][1][8])
@skipUnlessDBFeature(
"supports_collation_on_charfield",
"supports_non_deterministic_collations",
)
def test_ci_cs_db_collation(self):
cs_collation = connection.features.test_collations.get("cs")
ci_collation = connection.features.test_collations.get("ci")
try:
if connection.vendor == "mysql":
cs_collation = "latin1_general_cs"
elif connection.vendor == "postgresql":
cs_collation = "en-x-icu"
with connection.cursor() as cursor:
cursor.execute(
"CREATE COLLATION IF NOT EXISTS case_insensitive "
"(provider = icu, locale = 'und-u-ks-level2', "
"deterministic = false)"
)
ci_collation = "case_insensitive"
# Create the table.
with connection.schema_editor() as editor:
editor.create_model(Author)
# Case-insensitive collation.
old_field = Author._meta.get_field("name")
new_field_ci = CharField(max_length=255, db_collation=ci_collation)
new_field_ci.set_attributes_from_name("name")
new_field_ci.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field_ci, strict=True)
Author.objects.create(name="ANDREW")
self.assertIs(Author.objects.filter(name="Andrew").exists(), True)
# Case-sensitive collation.
new_field_cs = CharField(max_length=255, db_collation=cs_collation)
new_field_cs.set_attributes_from_name("name")
new_field_cs.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field_ci, new_field_cs, strict=True)
self.assertIs(Author.objects.filter(name="Andrew").exists(), False)
finally:
if connection.vendor == "postgresql":
with connection.cursor() as cursor:
cursor.execute("DROP COLLATION IF EXISTS case_insensitive")
| SchemaTests |
python | getsentry__sentry | src/sentry/explore/endpoints/serializers.py | {
"start": 2258,
"end": 2861
} | class ____(serializers.Serializer):
name = serializers.CharField(
required=True,
help_text="The name of the metric.",
)
type = serializers.ChoiceField(
choices=[
"counter",
"gauge",
"distribution",
],
required=True,
help_text="The type of the metric.",
)
unit = serializers.CharField(
required=False,
allow_null=True,
help_text="The unit of the metric (e.g., 'millisecond'). See MetricUnit in relay",
)
@extend_schema_serializer(exclude_fields=["groupby"])
| MetricSerializer |
python | huggingface__transformers | src/transformers/models/xlnet/modeling_xlnet.py | {
"start": 25537,
"end": 26552
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
corresponds to `sequence_length`.
mems (`list[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The
token ids which have their past given to this model should not be passed as `input_ids` as they have
already been computed.
"""
last_hidden_state: torch.FloatTensor
mems: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`XLNetLMHeadModel`].
"""
)
| XLNetModelOutput |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-recharge/unit_tests/integration/streams/test_discounts.py | {
"start": 1828,
"end": 3803
} | class ____(StreamTestCase):
_STREAM_NAME = "discounts"
@HttpMocker()
def test_state_message_produced_while_read_and_state_match_latest_record(self, http_mocker: HttpMocker) -> None:
min_cursor_value = "2024-01-01T00:00:00+00:00"
max_cursor_value = "2024-02-01T00:00:00+00:00"
http_mocker.get(
self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(),
get_stream_response(_STREAM_NAME)
.with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD).with_cursor(min_cursor_value))
.with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD).with_cursor(max_cursor_value))
.build(),
)
output = read_incremental(self._config, _STREAM_NAME)
test_cursor_value = get_cursor_value_from_state_message(output, _CURSOR_FIELD)
assert test_cursor_value == max_cursor_value
@HttpMocker()
def test_given_multiple_pages_when_read_then_return_records_with_state(self, http_mocker: HttpMocker) -> None:
min_cursor_value = "2024-01-01T00:00:00+00:00"
max_cursor_value = "2024-02-01T00:00:00+00:00"
http_mocker.get(
self.stream_request().with_limit(250).with_next_page_token(NEXT_PAGE_TOKEN).build(),
get_stream_response(_STREAM_NAME).with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(),
)
http_mocker.get(
self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(),
get_stream_response(_STREAM_NAME)
.with_pagination()
.with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD).with_cursor(min_cursor_value))
.with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD).with_cursor(max_cursor_value))
.build(),
)
output = read_incremental(self._config, _STREAM_NAME)
assert len(output.records) == 3
| TestIncremental |
python | langchain-ai__langchain | libs/langchain/langchain_classic/output_parsers/retry.py | {
"start": 1421,
"end": 6000
} | class ____(BaseOutputParser[T]):
"""Wrap a parser and try to fix parsing errors.
Does this by passing the original prompt and the completion to another
LLM, and telling it the completion did not satisfy criteria in the prompt.
"""
parser: Annotated[BaseOutputParser[T], SkipValidation()]
"""The parser to use to parse the output."""
# Should be an LLMChain but we want to avoid top-level imports from
# langchain_classic.chains
retry_chain: Annotated[
RunnableSerializable[RetryOutputParserRetryChainInput, str] | Any,
SkipValidation(),
]
"""The RunnableSerializable to use to retry the completion (Legacy: LLMChain)."""
max_retries: int = 1
"""The maximum number of times to retry the parse."""
legacy: bool = True
"""Whether to use the run or arun method of the retry_chain."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
parser: BaseOutputParser[T],
prompt: BasePromptTemplate = NAIVE_RETRY_PROMPT,
max_retries: int = 1,
) -> RetryOutputParser[T]:
"""Create an RetryOutputParser from a language model and a parser.
Args:
llm: llm to use for fixing
parser: parser to use for parsing
prompt: prompt to use for fixing
max_retries: Maximum number of retries to parse.
Returns:
RetryOutputParser
"""
chain = prompt | llm | StrOutputParser()
return cls(parser=parser, retry_chain=chain, max_retries=max_retries)
def parse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
"""Parse the output of an LLM call using a wrapped parser.
Args:
completion: The chain completion to parse.
prompt_value: The prompt to use to parse the completion.
Returns:
The parsed completion.
"""
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse(completion)
except OutputParserException:
if retries == self.max_retries:
raise
retries += 1
if self.legacy and hasattr(self.retry_chain, "run"):
completion = self.retry_chain.run(
prompt=prompt_value.to_string(),
completion=completion,
)
else:
completion = self.retry_chain.invoke(
{
"prompt": prompt_value.to_string(),
"completion": completion,
},
)
msg = "Failed to parse"
raise OutputParserException(msg)
async def aparse_with_prompt(self, completion: str, prompt_value: PromptValue) -> T:
"""Parse the output of an LLM call using a wrapped parser.
Args:
completion: The chain completion to parse.
prompt_value: The prompt to use to parse the completion.
Returns:
The parsed completion.
"""
retries = 0
while retries <= self.max_retries:
try:
return await self.parser.aparse(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise
retries += 1
if self.legacy and hasattr(self.retry_chain, "arun"):
completion = await self.retry_chain.arun(
prompt=prompt_value.to_string(),
completion=completion,
error=repr(e),
)
else:
completion = await self.retry_chain.ainvoke(
{
"prompt": prompt_value.to_string(),
"completion": completion,
},
)
msg = "Failed to parse"
raise OutputParserException(msg)
@override
def parse(self, completion: str) -> T:
msg = "This OutputParser can only be called by the `parse_with_prompt` method."
raise NotImplementedError(msg)
@override
def get_format_instructions(self) -> str:
return self.parser.get_format_instructions()
@property
def _type(self) -> str:
return "retry"
@property
@override
def OutputType(self) -> type[T]:
return self.parser.OutputType
| RetryOutputParser |
python | encode__httpx | httpx/_client.py | {
"start": 42349,
"end": 65713
} | class ____(BaseClient):
"""
An asynchronous HTTP client, with connection pooling, HTTP/2, redirects,
cookie persistence, etc.
It can be shared between tasks.
Usage:
```python
>>> async with httpx.AsyncClient() as client:
>>> response = await client.get('https://example.org')
```
**Parameters:**
* **auth** - *(optional)* An authentication class to use when sending
requests.
* **params** - *(optional)* Query parameters to include in request URLs, as
a string, dictionary, or sequence of two-tuples.
* **headers** - *(optional)* Dictionary of HTTP headers to include when
sending requests.
* **cookies** - *(optional)* Dictionary of Cookie items to include when
sending requests.
* **verify** - *(optional)* Either `True` to use an SSL context with the
default CA bundle, `False` to disable verification, or an instance of
`ssl.SSLContext` to use a custom context.
* **http2** - *(optional)* A boolean indicating if HTTP/2 support should be
enabled. Defaults to `False`.
* **proxy** - *(optional)* A proxy URL where all the traffic should be routed.
* **timeout** - *(optional)* The timeout configuration to use when sending
requests.
* **limits** - *(optional)* The limits configuration to use.
* **max_redirects** - *(optional)* The maximum number of redirect responses
that should be followed.
* **base_url** - *(optional)* A URL to use as the base when building
request URLs.
* **transport** - *(optional)* A transport class to use for sending requests
over the network.
* **trust_env** - *(optional)* Enables or disables usage of environment
variables for configuration.
* **default_encoding** - *(optional)* The default encoding to use for decoding
response text, if no charset information is included in a response Content-Type
header. Set to a callable for automatic character set detection. Default: "utf-8".
"""
def __init__(
self,
*,
auth: AuthTypes | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
verify: ssl.SSLContext | str | bool = True,
cert: CertTypes | None = None,
http1: bool = True,
http2: bool = False,
proxy: ProxyTypes | None = None,
mounts: None | (typing.Mapping[str, AsyncBaseTransport | None]) = None,
timeout: TimeoutTypes = DEFAULT_TIMEOUT_CONFIG,
follow_redirects: bool = False,
limits: Limits = DEFAULT_LIMITS,
max_redirects: int = DEFAULT_MAX_REDIRECTS,
event_hooks: None | (typing.Mapping[str, list[EventHook]]) = None,
base_url: URL | str = "",
transport: AsyncBaseTransport | None = None,
trust_env: bool = True,
default_encoding: str | typing.Callable[[bytes], str] = "utf-8",
) -> None:
super().__init__(
auth=auth,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
follow_redirects=follow_redirects,
max_redirects=max_redirects,
event_hooks=event_hooks,
base_url=base_url,
trust_env=trust_env,
default_encoding=default_encoding,
)
if http2:
try:
import h2 # noqa
except ImportError: # pragma: no cover
raise ImportError(
"Using http2=True, but the 'h2' package is not installed. "
"Make sure to install httpx using `pip install httpx[http2]`."
) from None
allow_env_proxies = trust_env and transport is None
proxy_map = self._get_proxy_map(proxy, allow_env_proxies)
self._transport = self._init_transport(
verify=verify,
cert=cert,
trust_env=trust_env,
http1=http1,
http2=http2,
limits=limits,
transport=transport,
)
self._mounts: dict[URLPattern, AsyncBaseTransport | None] = {
URLPattern(key): None
if proxy is None
else self._init_proxy_transport(
proxy,
verify=verify,
cert=cert,
trust_env=trust_env,
http1=http1,
http2=http2,
limits=limits,
)
for key, proxy in proxy_map.items()
}
if mounts is not None:
self._mounts.update(
{URLPattern(key): transport for key, transport in mounts.items()}
)
self._mounts = dict(sorted(self._mounts.items()))
def _init_transport(
self,
verify: ssl.SSLContext | str | bool = True,
cert: CertTypes | None = None,
trust_env: bool = True,
http1: bool = True,
http2: bool = False,
limits: Limits = DEFAULT_LIMITS,
transport: AsyncBaseTransport | None = None,
) -> AsyncBaseTransport:
if transport is not None:
return transport
return AsyncHTTPTransport(
verify=verify,
cert=cert,
trust_env=trust_env,
http1=http1,
http2=http2,
limits=limits,
)
def _init_proxy_transport(
self,
proxy: Proxy,
verify: ssl.SSLContext | str | bool = True,
cert: CertTypes | None = None,
trust_env: bool = True,
http1: bool = True,
http2: bool = False,
limits: Limits = DEFAULT_LIMITS,
) -> AsyncBaseTransport:
return AsyncHTTPTransport(
verify=verify,
cert=cert,
trust_env=trust_env,
http1=http1,
http2=http2,
limits=limits,
proxy=proxy,
)
def _transport_for_url(self, url: URL) -> AsyncBaseTransport:
"""
Returns the transport instance that should be used for a given URL.
This will either be the standard connection pool, or a proxy.
"""
for pattern, transport in self._mounts.items():
if pattern.matches(url):
return self._transport if transport is None else transport
return self._transport
async def request(
self,
method: str,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Build and send a request.
Equivalent to:
```python
request = client.build_request(...)
response = await client.send(request, ...)
```
See `AsyncClient.build_request()`, `AsyncClient.send()`
and [Merging of configuration][0] for how the various parameters
are merged with client-level configuration.
[0]: /advanced/clients/#merging-of-configuration
"""
if cookies is not None: # pragma: no cover
message = (
"Setting per-request cookies=<...> is being deprecated, because "
"the expected behaviour on cookie persistence is ambiguous. Set "
"cookies directly on the client instance instead."
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
request = self.build_request(
method=method,
url=url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
extensions=extensions,
)
return await self.send(request, auth=auth, follow_redirects=follow_redirects)
@asynccontextmanager
async def stream(
self,
method: str,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> typing.AsyncIterator[Response]:
"""
Alternative to `httpx.request()` that streams the response body
instead of loading it into memory at once.
**Parameters**: See `httpx.request`.
See also: [Streaming Responses][0]
[0]: /quickstart#streaming-responses
"""
request = self.build_request(
method=method,
url=url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
timeout=timeout,
extensions=extensions,
)
response = await self.send(
request=request,
auth=auth,
follow_redirects=follow_redirects,
stream=True,
)
try:
yield response
finally:
await response.aclose()
async def send(
self,
request: Request,
*,
stream: bool = False,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
) -> Response:
"""
Send a request.
The request is sent as-is, unmodified.
Typically you'll want to build one with `AsyncClient.build_request()`
so that any client-level configuration is merged into the request,
but passing an explicit `httpx.Request()` is supported as well.
See also: [Request instances][0]
[0]: /advanced/clients/#request-instances
"""
if self._state == ClientState.CLOSED:
raise RuntimeError("Cannot send a request, as the client has been closed.")
self._state = ClientState.OPENED
follow_redirects = (
self.follow_redirects
if isinstance(follow_redirects, UseClientDefault)
else follow_redirects
)
self._set_timeout(request)
auth = self._build_request_auth(request, auth)
response = await self._send_handling_auth(
request,
auth=auth,
follow_redirects=follow_redirects,
history=[],
)
try:
if not stream:
await response.aread()
return response
except BaseException as exc:
await response.aclose()
raise exc
async def _send_handling_auth(
self,
request: Request,
auth: Auth,
follow_redirects: bool,
history: list[Response],
) -> Response:
auth_flow = auth.async_auth_flow(request)
try:
request = await auth_flow.__anext__()
while True:
response = await self._send_handling_redirects(
request,
follow_redirects=follow_redirects,
history=history,
)
try:
try:
next_request = await auth_flow.asend(response)
except StopAsyncIteration:
return response
response.history = list(history)
await response.aread()
request = next_request
history.append(response)
except BaseException as exc:
await response.aclose()
raise exc
finally:
await auth_flow.aclose()
async def _send_handling_redirects(
self,
request: Request,
follow_redirects: bool,
history: list[Response],
) -> Response:
while True:
if len(history) > self.max_redirects:
raise TooManyRedirects(
"Exceeded maximum allowed redirects.", request=request
)
for hook in self._event_hooks["request"]:
await hook(request)
response = await self._send_single_request(request)
try:
for hook in self._event_hooks["response"]:
await hook(response)
response.history = list(history)
if not response.has_redirect_location:
return response
request = self._build_redirect_request(request, response)
history = history + [response]
if follow_redirects:
await response.aread()
else:
response.next_request = request
return response
except BaseException as exc:
await response.aclose()
raise exc
async def _send_single_request(self, request: Request) -> Response:
"""
Sends a single request, without handling any redirections.
"""
transport = self._transport_for_url(request.url)
start = time.perf_counter()
if not isinstance(request.stream, AsyncByteStream):
raise RuntimeError(
"Attempted to send a sync request with an AsyncClient instance."
)
with request_context(request=request):
response = await transport.handle_async_request(request)
assert isinstance(response.stream, AsyncByteStream)
response.request = request
response.stream = BoundAsyncStream(
response.stream, response=response, start=start
)
self.cookies.extract_cookies(response)
response.default_encoding = self._default_encoding
logger.info(
'HTTP Request: %s %s "%s %d %s"',
request.method,
request.url,
response.http_version,
response.status_code,
response.reason_phrase,
)
return response
async def get(
self,
url: URL | str,
*,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault | None = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `GET` request.
**Parameters**: See `httpx.request`.
"""
return await self.request(
"GET",
url,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
async def options(
self,
url: URL | str,
*,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send an `OPTIONS` request.
**Parameters**: See `httpx.request`.
"""
return await self.request(
"OPTIONS",
url,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
async def head(
self,
url: URL | str,
*,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `HEAD` request.
**Parameters**: See `httpx.request`.
"""
return await self.request(
"HEAD",
url,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
async def post(
self,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `POST` request.
**Parameters**: See `httpx.request`.
"""
return await self.request(
"POST",
url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
async def put(
self,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `PUT` request.
**Parameters**: See `httpx.request`.
"""
return await self.request(
"PUT",
url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
async def patch(
self,
url: URL | str,
*,
content: RequestContent | None = None,
data: RequestData | None = None,
files: RequestFiles | None = None,
json: typing.Any | None = None,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `PATCH` request.
**Parameters**: See `httpx.request`.
"""
return await self.request(
"PATCH",
url,
content=content,
data=data,
files=files,
json=json,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
async def delete(
self,
url: URL | str,
*,
params: QueryParamTypes | None = None,
headers: HeaderTypes | None = None,
cookies: CookieTypes | None = None,
auth: AuthTypes | UseClientDefault = USE_CLIENT_DEFAULT,
follow_redirects: bool | UseClientDefault = USE_CLIENT_DEFAULT,
timeout: TimeoutTypes | UseClientDefault = USE_CLIENT_DEFAULT,
extensions: RequestExtensions | None = None,
) -> Response:
"""
Send a `DELETE` request.
**Parameters**: See `httpx.request`.
"""
return await self.request(
"DELETE",
url,
params=params,
headers=headers,
cookies=cookies,
auth=auth,
follow_redirects=follow_redirects,
timeout=timeout,
extensions=extensions,
)
async def aclose(self) -> None:
"""
Close transport and proxies.
"""
if self._state != ClientState.CLOSED:
self._state = ClientState.CLOSED
await self._transport.aclose()
for proxy in self._mounts.values():
if proxy is not None:
await proxy.aclose()
async def __aenter__(self: U) -> U:
if self._state != ClientState.UNOPENED:
msg = {
ClientState.OPENED: "Cannot open a client instance more than once.",
ClientState.CLOSED: (
"Cannot reopen a client instance, once it has been closed."
),
}[self._state]
raise RuntimeError(msg)
self._state = ClientState.OPENED
await self._transport.__aenter__()
for proxy in self._mounts.values():
if proxy is not None:
await proxy.__aenter__()
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None = None,
exc_value: BaseException | None = None,
traceback: TracebackType | None = None,
) -> None:
self._state = ClientState.CLOSED
await self._transport.__aexit__(exc_type, exc_value, traceback)
for proxy in self._mounts.values():
if proxy is not None:
await proxy.__aexit__(exc_type, exc_value, traceback)
| AsyncClient |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/daemon_sensor_tests/test_pythonic_resources.py | {
"start": 1993,
"end": 18524
} | class ____(
dg.ConfigurableResource, dg.IAttachDifferentObjectToOpContext
):
a_str: str
def get_object_to_set_on_execution_context(self) -> str:
return self.a_str
@sensor(job_name="the_job", required_resource_keys={"my_resource"})
def sensor_from_context(context: SensorEvaluationContext):
return dg.RunRequest(context.resources.my_resource.a_str, run_config={}, tags={})
@sensor(job_name="the_job")
def sensor_from_fn_arg(context: SensorEvaluationContext, my_resource: MyResource):
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@dg.op(out={})
def the_op_but_with_a_resource_dep(my_resource: MyResource):
assert my_resource.a_str == "foo"
@dg.job
def the_job_but_with_a_resource_dep() -> None:
the_op_but_with_a_resource_dep()
@sensor(job_name="the_job_but_with_a_resource_dep")
def sensor_with_job_with_resource_dep(context: SensorEvaluationContext, my_resource: MyResource):
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@sensor(job_name="the_job")
def sensor_with_resource_from_context(
context: SensorEvaluationContext, my_resource_attach: MyResourceAttachDifferentObject
):
assert context.resources.my_resource_attach == my_resource_attach.a_str
return dg.RunRequest(my_resource_attach.a_str, run_config={}, tags={})
is_in_cm = False
@dg.resource
@contextmanager
def my_cm_resource(_) -> Iterator[str]:
global is_in_cm # noqa: PLW0603
is_in_cm = True
yield "foo"
is_in_cm = False
@sensor(job_name="the_job")
def sensor_with_cm(context: SensorEvaluationContext, my_cm_resource: dg.ResourceParam[str]):
assert is_in_cm
return dg.RunRequest(my_cm_resource, run_config={}, tags={})
@sensor(job_name="the_job", required_resource_keys={"my_resource"})
def sensor_from_context_weird_name(not_called_context: SensorEvaluationContext):
return dg.RunRequest(not_called_context.resources.my_resource.a_str, run_config={}, tags={})
@sensor(job_name="the_job")
def sensor_from_fn_arg_no_context(my_resource: MyResource):
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@sensor(job_name="the_job")
def sensor_context_arg_not_first_and_weird_name(
my_resource: MyResource, not_called_context: SensorEvaluationContext
):
assert not_called_context.resources.my_resource.a_str == my_resource.a_str
return dg.RunRequest(not_called_context.resources.my_resource.a_str, run_config={}, tags={})
@dg.resource
def the_inner() -> str:
return "oo"
@dg.resource(required_resource_keys={"the_inner"})
def the_outer(init_context) -> str:
return "f" + init_context.resources.the_inner
@sensor(job=the_job, required_resource_keys={"the_outer"})
def sensor_resource_deps(context):
return dg.RunRequest(context.resources.the_outer, run_config={}, tags={})
@dg.asset_sensor(asset_key=dg.AssetKey("my_asset"), job_name="the_job")
def sensor_asset(my_resource: MyResource, not_called_context: SensorEvaluationContext):
assert not_called_context.resources.my_resource.a_str == my_resource.a_str
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@dg.asset_sensor(asset_key=dg.AssetKey("my_asset"), job_name="the_job")
def sensor_asset_with_cm(
my_cm_resource: dg.ResourceParam[str], not_called_context: SensorEvaluationContext
):
assert not_called_context.resources.my_cm_resource == my_cm_resource
assert is_in_cm
return dg.RunRequest(my_cm_resource, run_config={}, tags={})
@dg.asset_sensor(asset_key=dg.AssetKey("my_asset"), job_name="the_job")
def sensor_asset_with_event(
my_resource: MyResource,
not_called_context: SensorEvaluationContext,
my_asset_event: EventLogEntry,
):
assert not_called_context.resources.my_resource.a_str == my_resource.a_str
assert my_asset_event.dagster_event
assert my_asset_event.dagster_event.asset_key == dg.AssetKey("my_asset")
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@dg.asset_sensor(asset_key=dg.AssetKey("my_asset"), job_name="the_job")
def sensor_asset_no_context(
my_resource: MyResource,
):
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@dg.multi_asset_sensor(
monitored_assets=[dg.AssetKey("my_asset")],
job_name="the_job",
)
def sensor_multi_asset(
my_resource: MyResource,
not_called_context: MultiAssetSensorEvaluationContext,
) -> dg.RunRequest:
assert not_called_context.resources.my_resource.a_str == my_resource.a_str
asset_events = list(
not_called_context.materialization_records_for_key(
asset_key=dg.AssetKey("my_asset"), limit=1
)
)
if asset_events:
not_called_context.advance_all_cursors()
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@dg.multi_asset_sensor(
monitored_assets=[dg.AssetKey("my_asset")],
job_name="the_job",
)
def sensor_multi_asset_with_cm(
my_cm_resource: dg.ResourceParam[str],
not_called_context: MultiAssetSensorEvaluationContext,
) -> dg.RunRequest:
assert not_called_context.resources.my_cm_resource == my_cm_resource
assert is_in_cm
asset_events = list(
not_called_context.materialization_records_for_key(
asset_key=dg.AssetKey("my_asset"), limit=1
)
)
if asset_events:
not_called_context.advance_all_cursors()
return dg.RunRequest(my_cm_resource, run_config={}, tags={})
@dg.run_status_sensor(
monitor_all_repositories=True, run_status=DagsterRunStatus.SUCCESS, request_job=the_job
)
def sensor_run_status(my_resource: MyResource, not_called_context: RunStatusSensorContext):
assert not_called_context.resources.my_resource.a_str == my_resource.a_str
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@dg.run_status_sensor(
monitor_all_repositories=True, run_status=DagsterRunStatus.SUCCESS, request_job=the_job
)
def sensor_run_status_with_cm(
my_cm_resource: dg.ResourceParam[str], not_called_context: RunStatusSensorContext
):
assert not_called_context.resources.my_cm_resource == my_cm_resource
assert is_in_cm
return dg.RunRequest(my_cm_resource, run_config={}, tags={})
@dg.run_failure_sensor(monitor_all_repositories=True, request_job=the_job)
def sensor_run_failure(my_resource: MyResource, not_called_context: RunFailureSensorContext):
assert not_called_context.failure_event
assert not_called_context.resources.my_resource.a_str == my_resource.a_str
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@dg.run_failure_sensor(monitor_all_repositories=True, request_job=the_job)
def sensor_run_failure_with_cm(
my_cm_resource: dg.ResourceParam[str], not_called_context: RunFailureSensorContext
):
assert not_called_context.failure_event
assert not_called_context.resources.my_cm_resource == my_cm_resource
assert is_in_cm
return dg.RunRequest(my_cm_resource, run_config={}, tags={})
the_repo = dg.Definitions(
jobs=[the_job, the_failure_job, the_job_but_with_a_resource_dep],
sensors=[
sensor_from_context,
sensor_from_fn_arg,
sensor_with_job_with_resource_dep,
sensor_with_resource_from_context,
sensor_with_cm,
sensor_from_context_weird_name,
sensor_from_fn_arg_no_context,
sensor_context_arg_not_first_and_weird_name,
sensor_resource_deps,
sensor_asset,
sensor_asset_with_cm,
sensor_asset_with_event,
sensor_asset_no_context,
sensor_multi_asset,
sensor_multi_asset_with_cm,
sensor_run_status,
sensor_run_status_with_cm,
sensor_run_failure,
sensor_run_failure_with_cm,
],
resources={
"my_resource": MyResource(a_str="foo"),
"my_cm_resource": my_cm_resource,
"my_resource_attach": MyResourceAttachDifferentObject(a_str="foo"),
"the_inner": the_inner,
"the_outer": the_outer,
},
)
def create_workspace_load_target(attribute: Optional[str] = SINGLETON_REPOSITORY_NAME):
return ModuleTarget(
module_name="dagster_tests.daemon_sensor_tests.test_pythonic_resources",
attribute=None,
working_directory=os.path.join(os.path.dirname(__file__), "..", ".."),
location_name="test_location",
)
@pytest.fixture(name="workspace_context_struct_resources", scope="module")
def workspace_fixture(instance_module_scoped):
with create_test_daemon_workspace_context(
workspace_load_target=create_workspace_load_target(),
instance=instance_module_scoped,
) as workspace:
yield workspace
@pytest.fixture(name="remote_repo_struct_resources", scope="module")
def remote_repo_fixture(workspace_context_struct_resources: WorkspaceProcessContext):
repo_loc = next(
iter(
workspace_context_struct_resources.create_request_context()
.get_code_location_entries()
.values()
)
).code_location
assert repo_loc
return repo_loc.get_repository(SINGLETON_REPOSITORY_NAME)
def loadable_target_origin() -> LoadableTargetOrigin:
return LoadableTargetOrigin(
executable_path=sys.executable,
module_name="dagster_tests.daemon_sensor_tests.test_pythonic_resources",
working_directory=os.getcwd(),
attribute=None,
)
def test_cant_use_required_resource_keys_and_params_both() -> None:
with pytest.raises(ParameterCheckError):
@sensor(job_name="the_job", required_resource_keys={"my_other_resource"})
def sensor_from_context_and_params(
context: SensorEvaluationContext, my_resource: MyResource
):
return dg.RunRequest(my_resource.a_str, run_config={}, tags={})
@pytest.mark.parametrize(
"sensor_name",
[
"sensor_from_context",
"sensor_from_fn_arg",
"sensor_with_job_with_resource_dep",
"sensor_with_resource_from_context",
"sensor_with_cm",
"sensor_from_context_weird_name",
"sensor_from_fn_arg_no_context",
"sensor_context_arg_not_first_and_weird_name",
"sensor_resource_deps",
"sensor_asset",
"sensor_asset_with_cm",
"sensor_asset_with_event",
"sensor_asset_no_context",
"sensor_multi_asset",
"sensor_multi_asset_with_cm",
],
)
def test_resources(
caplog,
instance: DagsterInstance,
workspace_context_struct_resources,
remote_repo_struct_resources,
sensor_name,
) -> None:
assert not is_in_cm
freeze_datetime = create_datetime(
year=2019,
month=2,
day=27,
hour=23,
minute=59,
second=59,
).astimezone(get_timezone("US/Central"))
with freeze_time(freeze_datetime):
base_run_count = 0
if "asset" in sensor_name:
the_job.execute_in_process(instance=instance)
base_run_count = 1
sensor = remote_repo_struct_resources.get_sensor(sensor_name)
instance.add_instigator_state(
InstigatorState(
sensor.get_remote_origin(),
InstigatorType.SENSOR,
InstigatorStatus.RUNNING,
)
)
assert instance.get_runs_count() == base_run_count
ticks = instance.get_ticks(sensor.get_remote_origin_id(), sensor.selector_id)
assert len(ticks) == 0
evaluate_sensors(workspace_context_struct_resources, None)
wait_for_all_runs_to_start(instance)
assert instance.get_runs_count() == base_run_count + 1
run = instance.get_runs()[0]
ticks = instance.get_ticks(sensor.get_remote_origin_id(), sensor.selector_id)
assert len(ticks) == 1
assert ticks[0].run_keys == ["foo"]
validate_tick(
ticks[0],
sensor,
freeze_datetime,
TickStatus.SUCCESS,
expected_run_ids=[run.run_id],
)
assert not is_in_cm
@pytest.mark.parametrize(
"sensor_name",
[
"sensor_run_status",
"sensor_run_status_with_cm",
],
)
def test_resources_run_status_sensor(
caplog,
instance: DagsterInstance,
workspace_context_struct_resources,
remote_repo_struct_resources,
sensor_name,
) -> None:
assert not is_in_cm
freeze_datetime = create_datetime(
year=2019,
month=2,
day=27,
hour=23,
minute=59,
second=59,
).astimezone(get_timezone("US/Central"))
original_time = freeze_datetime
with freeze_time(freeze_datetime):
remote_sensor = remote_repo_struct_resources.get_sensor(sensor_name)
instance.add_instigator_state(
InstigatorState(
remote_sensor.get_remote_origin(),
InstigatorType.SENSOR,
InstigatorStatus.RUNNING,
)
)
ticks = instance.get_ticks(remote_sensor.get_remote_origin_id(), remote_sensor.selector_id)
assert len(ticks) == 0
# We have to do two ticks because the first tick will be skipped due to the run status
# sensor initializing its cursor
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context_struct_resources, None)
wait_for_all_runs_to_start(instance)
the_job.execute_in_process(instance=instance)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context_struct_resources, None)
wait_for_all_runs_to_start(instance)
with freeze_time(freeze_datetime):
ticks = instance.get_ticks(remote_sensor.get_remote_origin_id(), remote_sensor.selector_id)
assert len(ticks) == 2
assert instance.get_runs_count() == 2
run = instance.get_runs()[0]
assert ticks[0].run_keys == ["foo"]
validate_tick(
ticks[0],
remote_sensor,
freeze_datetime,
TickStatus.SUCCESS,
expected_run_ids=[run.run_id],
)
validate_tick(
ticks[1],
remote_sensor,
original_time,
TickStatus.SKIPPED,
expected_run_ids=[],
)
assert not is_in_cm
@pytest.mark.parametrize(
"sensor_name",
[
"sensor_run_failure",
"sensor_run_failure_with_cm",
],
)
def test_resources_run_failure_sensor(
caplog,
instance: DagsterInstance,
workspace_context_struct_resources,
remote_repo_struct_resources,
sensor_name,
) -> None:
assert not is_in_cm
freeze_datetime = create_datetime(
year=2019,
month=2,
day=27,
hour=23,
minute=59,
second=59,
).astimezone(get_timezone("US/Central"))
original_time = freeze_datetime
with freeze_time(freeze_datetime):
remote_sensor = remote_repo_struct_resources.get_sensor(sensor_name)
instance.add_instigator_state(
InstigatorState(
remote_sensor.get_remote_origin(),
InstigatorType.SENSOR,
InstigatorStatus.RUNNING,
)
)
ticks = instance.get_ticks(remote_sensor.get_remote_origin_id(), remote_sensor.selector_id)
assert len(ticks) == 0
# We have to do two ticks because the first tick will be skipped due to the run status
# sensor initializing its cursor
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context_struct_resources, None)
wait_for_all_runs_to_start(instance)
the_failure_job.execute_in_process(instance=instance, raise_on_error=False)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context_struct_resources, None)
wait_for_all_runs_to_start(instance)
with freeze_time(freeze_datetime):
ticks = instance.get_ticks(remote_sensor.get_remote_origin_id(), remote_sensor.selector_id)
assert len(ticks) == 2
assert instance.get_runs_count() == 2
run = instance.get_runs()[0]
assert ticks[0].run_keys == ["foo"]
validate_tick(
ticks[0],
remote_sensor,
freeze_datetime,
TickStatus.SUCCESS,
expected_run_ids=[run.run_id],
)
validate_tick(
ticks[1],
remote_sensor,
original_time,
TickStatus.SKIPPED,
expected_run_ids=[],
)
assert not is_in_cm
| MyResourceAttachDifferentObject |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cloud/runs.py | {
"start": 615,
"end": 9949
} | class ____(Enum):
"""dbt Cloud Job statuses."""
QUEUED = 1
STARTING = 2
RUNNING = 3
SUCCESS = 10
FAILED = 20
CANCELLED = 30
@classmethod
def is_terminal_status_code(cls, status_code: Any) -> bool:
"""
Returns True if a status code is terminal for a job run.
Returns False otherwise.
"""
return status_code in [cls.SUCCESS.value, cls.FAILED.value, cls.CANCELLED.value]
@task(
name="Get dbt Cloud job run details",
description="Retrieves details of a dbt Cloud job run "
"for the run with the given run_id.",
retries=3,
retry_delay_seconds=10,
)
async def get_dbt_cloud_run_info(
dbt_cloud_credentials: DbtCloudCredentials,
run_id: int,
include_related: Optional[
List[Literal["trigger", "job", "debug_logs", "run_steps"]]
] = None,
) -> Dict:
"""
A task to retrieve information about a dbt Cloud job run.
Args:
dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.
run_id: The ID of the job to trigger.
include_related: List of related fields to pull with the run.
Valid values are "trigger", "job", "debug_logs", and "run_steps".
If "debug_logs" is not provided in a request, then the included debug
logs will be truncated to the last 1,000 lines of the debug log output file.
Returns:
The run data returned by the dbt Cloud administrative API.
Example:
Get status of a dbt Cloud job run:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import get_run
@flow
def get_run_flow():
credentials = DbtCloudCredentials(api_key="my_api_key", account_id=123456789)
return get_run(
dbt_cloud_credentials=credentials,
run_id=42
)
get_run_flow()
```
""" # noqa
try:
async with dbt_cloud_credentials.get_administrative_client() as client:
response = await client.get_run(
run_id=run_id, include_related=include_related
)
except HTTPStatusError as ex:
raise DbtCloudGetRunFailed(extract_user_message(ex)) from ex
return response.json()["data"]
@task(
name="List dbt Cloud job artifacts",
description="Fetches a list of artifact files generated for a completed run.",
retries=3,
retry_delay_seconds=10,
)
async def list_dbt_cloud_run_artifacts(
dbt_cloud_credentials: DbtCloudCredentials, run_id: int, step: Optional[int] = None
) -> List[str]:
"""
A task to list the artifact files generated for a completed run.
Args:
dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.
run_id: The ID of the run to list run artifacts for.
step: The index of the step in the run to query for artifacts. The
first step in the run has the index 1. If the step parameter is
omitted, then this method will return the artifacts compiled
for the last step in the run.
Returns:
A list of paths to artifact files that can be used to retrieve the generated artifacts.
Example:
List artifacts of a dbt Cloud job run:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import list_dbt_cloud_run_artifacts
@flow
def list_artifacts_flow():
credentials = DbtCloudCredentials(api_key="my_api_key", account_id=123456789)
return list_dbt_cloud_run_artifacts(
dbt_cloud_credentials=credentials,
run_id=42
)
list_artifacts_flow()
```
""" # noqa
try:
async with dbt_cloud_credentials.get_administrative_client() as client:
response = await client.list_run_artifacts(run_id=run_id, step=step)
except HTTPStatusError as ex:
raise DbtCloudListRunArtifactsFailed(extract_user_message(ex)) from ex
return response.json()["data"]
@task(
name="Get dbt Cloud job artifact",
description="Fetches an artifact from a completed run.",
retries=3,
retry_delay_seconds=10,
)
async def get_dbt_cloud_run_artifact(
dbt_cloud_credentials: DbtCloudCredentials,
run_id: int,
path: str,
step: Optional[int] = None,
) -> Union[Dict, str]:
"""
A task to get an artifact generated for a completed run. The requested artifact
is saved to a file in the current working directory.
Args:
dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.
run_id: The ID of the run to list run artifacts for.
path: The relative path to the run artifact (e.g. manifest.json, catalog.json,
run_results.json)
step: The index of the step in the run to query for artifacts. The
first step in the run has the index 1. If the step parameter is
omitted, then this method will return the artifacts compiled
for the last step in the run.
Returns:
The contents of the requested manifest. Returns a `Dict` if the
requested artifact is a JSON file and a `str` otherwise.
Examples:
Get an artifact of a dbt Cloud job run:
```python
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.runs import get_dbt_cloud_run_artifact
@flow
def get_artifact_flow():
credentials = DbtCloudCredentials(api_key="my_api_key", account_id=123456789)
return get_dbt_cloud_run_artifact(
dbt_cloud_credentials=credentials,
run_id=42,
path="manifest.json"
)
get_artifact_flow()
```
Get an artifact of a dbt Cloud job run and write it to a file:
```python
import json
from prefect import flow
from prefect_dbt.cloud import DbtCloudCredentials
from prefect_dbt.cloud.jobs import get_dbt_cloud_run_artifact
@flow
def get_artifact_flow():
credentials = DbtCloudCredentials(api_key="my_api_key", account_id=123456789)
get_run_artifact_result = get_dbt_cloud_run_artifact(
dbt_cloud_credentials=credentials,
run_id=42,
path="manifest.json"
)
with open("manifest.json", "w") as file:
json.dump(get_run_artifact_result, file)
get_artifact_flow()
```
""" # noqa
try:
async with dbt_cloud_credentials.get_administrative_client() as client:
response = await client.get_run_artifact(
run_id=run_id, path=path, step=step
)
except HTTPStatusError as ex:
raise DbtCloudGetRunArtifactFailed(extract_user_message(ex)) from ex
if path.endswith(".json"):
artifact_contents = response.json()
else:
artifact_contents = response.text
return artifact_contents
@flow(
name="Wait for dbt Cloud job run",
description="Waits for a dbt Cloud job run to finish running.",
)
async def wait_for_dbt_cloud_job_run(
run_id: int,
dbt_cloud_credentials: DbtCloudCredentials,
max_wait_seconds: int = 900,
poll_frequency_seconds: int = 10,
) -> Tuple[DbtCloudJobRunStatus, Dict]:
"""
Waits for the given dbt Cloud job run to finish running.
Args:
run_id: The ID of the run to wait for.
dbt_cloud_credentials: Credentials for authenticating with dbt Cloud.
max_wait_seconds: Maximum number of seconds to wait for job to complete
poll_frequency_seconds: Number of seconds to wait in between checks for
run completion.
Raises:
DbtCloudJobRunTimedOut: When the elapsed wait time exceeds `max_wait_seconds`.
Returns:
run_status: An enum representing the final dbt Cloud job run status
run_data: A dictionary containing information about the run after completion.
Example:
"""
logger = get_run_logger()
seconds_waited_for_run_completion = 0
wait_for = []
while seconds_waited_for_run_completion <= max_wait_seconds:
run_data_future = await get_dbt_cloud_run_info(
dbt_cloud_credentials=dbt_cloud_credentials,
run_id=run_id,
wait_for=wait_for,
)
run_data = run_data_future
run_status_code = run_data.get("status")
if DbtCloudJobRunStatus.is_terminal_status_code(run_status_code):
return DbtCloudJobRunStatus(run_status_code), run_data
wait_for = [run_data_future]
logger.debug(
"dbt Cloud job run with ID %i has status %s. Waiting for %i seconds.",
run_id,
DbtCloudJobRunStatus(run_status_code).name,
poll_frequency_seconds,
)
await asyncio.sleep(poll_frequency_seconds)
seconds_waited_for_run_completion += poll_frequency_seconds
raise DbtCloudJobRunTimedOut(
f"Max wait time of {max_wait_seconds} seconds exceeded while waiting "
"for job run with ID {run_id}"
)
| DbtCloudJobRunStatus |
python | viewflow__viewflow | viewflow/workflow/flow/views/list.py | {
"start": 3902,
"end": 4901
} | class ____(mixins.StoreRequestPathMixin, ListModelView):
flow_classes = None
model = Task
template_name = "viewflow/workflow/workflow_tasks_list.html"
def task_id(self, task):
task_url = task.flow_task.reverse("index", args=[task.process_id, task.pk])
return mark_safe(f'<a href="{task_url}">#{task.process_id}/{task.pk}</a>')
task_id.short_description = _("#")
def flow_task(self, task):
return task.title
flow_task.short_description = _("Task")
def process_brief(self, task):
flow_viewset = task.flow_task.flow_class.parent
process_url = flow_viewset.reverse("process_detail", args=[task.process_id])
return mark_safe(f'<a href="{process_url}">{task.process.brief}</a>')
def flows_start_nodes(self):
return {
flow_class: start_nodes
for flow_class in self.flow_classes
if (start_nodes := flow_class.get_start_nodes(self.request.user))
}
| WorkflowTaskListView |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-fusion-retriever/llama_index/packs/fusion_retriever/query_rewrite/base.py | {
"start": 410,
"end": 2173
} | class ____(BaseLlamaPack):
"""
Query rewriting retriever pack.
Given input nodes, build a vector index.
Then rewrite the query into multiple queries and
rerank the results.
"""
def __init__(
self,
nodes: List[TextNode] = None,
chunk_size: int = 256,
mode: str = "reciprocal_rerank",
vector_similarity_top_k: int = 2,
fusion_similarity_top_k: int = 2,
num_queries: int = 4,
**kwargs: Any,
) -> None:
"""Init params."""
Settings.chunk_size = chunk_size
index = VectorStoreIndex(nodes)
self.vector_retriever = index.as_retriever(
similarity_top_k=vector_similarity_top_k
)
self.fusion_retriever = QueryFusionRetriever(
[self.vector_retriever],
similarity_top_k=fusion_similarity_top_k,
num_queries=num_queries, # set this to 1 to disable query generation
mode=mode,
use_async=True,
verbose=True,
# query_gen_prompt="...", # we could override the query generation prompt here
)
self.query_engine = RetrieverQueryEngine.from_args(self.fusion_retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"vector_retriever": self.vector_retriever,
"fusion_retriever": self.fusion_retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.fusion_retriever.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
| QueryRewritingRetrieverPack |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 605,
"end": 729
} | class ____(ParentClosed1):
b: str
# This should generate an error because extra_items is incompatible type.
| ChildClosed1_2 |
python | run-llama__llama_index | llama-index-core/llama_index/core/node_parser/text/sentence.py | {
"start": 839,
"end": 999
} | class ____:
text: str # the split text
is_sentence: bool # save whether this is a full sentence
token_size: int # token length of split text
| _Split |
python | numba__numba | numba/core/typing/listdecl.py | {
"start": 3179,
"end": 3516
} | class ____(AbstractTemplate):
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if isinstance(a, types.List) and isinstance(b, types.List):
if self.context.can_convert(b.dtype, a.dtype):
return signature(a, a, b)
@infer_global(operator.mul)
| InplaceAddList |
python | kamyu104__LeetCode-Solutions | Python/queue-reconstruction-by-height.py | {
"start": 39,
"end": 750
} | class ____(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key=lambda h_k: (-h_k[0], h_k[1]))
blocks = [[]]
for p in people:
index = p[1]
for i, block in enumerate(blocks):
if index <= len(block):
break
index -= len(block)
block.insert(index, p)
if len(block) * len(block) > len(people):
blocks.insert(i+1, block[len(block)/2:])
del block[len(block)/2:]
return [p for block in blocks for p in block]
# Time: O(n^2)
# Space: O(n)
| Solution |
python | huggingface__transformers | tests/utils/import_structures/import_structure_raw_register_with_versions.py | {
"start": 851,
"end": 992
} | class ____:
def __init__(self):
pass
@requires(backends=("torch>2.5",))
def d1():
pass
@requires(backends=("torch<=2.5",))
| D1 |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 5043,
"end": 5931
} | class ____(msgspec.Struct, array_like=True, frozen=True, omit_defaults=True):
id: int
"""
The request id, set by the sender.
This is used to allow "pipeling" of requests and to be able to tie response to requests, which is
particularly useful in the Triggerer where multiple async tasks can send a requests concurrently.
"""
body: dict[str, Any] | None
req_encoder: ClassVar[msgspec.msgpack.Encoder] = _new_encoder()
def as_bytes(self) -> bytearray:
# https://jcristharif.com/msgspec/perf-tips.html#length-prefix-framing for inspiration
buffer = bytearray(256)
self.req_encoder.encode_into(self, buffer, 4)
n = len(buffer) - 4
if n >= 2**32:
raise OverflowError(f"Cannot send messages larger than 4GiB {n=}")
buffer[:4] = n.to_bytes(4, byteorder="big")
return buffer
| _RequestFrame |
python | redis__redis-py | redis/asyncio/cluster.py | {
"start": 74770,
"end": 76597
} | class ____(ExecutionStrategy):
def __init__(self, pipe: ClusterPipeline) -> None:
self._pipe: ClusterPipeline = pipe
self._command_queue: List["PipelineCommand"] = []
async def initialize(self) -> "ClusterPipeline":
if self._pipe.cluster_client._initialize:
await self._pipe.cluster_client.initialize()
self._command_queue = []
return self._pipe
def execute_command(
self, *args: Union[KeyT, EncodableT], **kwargs: Any
) -> "ClusterPipeline":
self._command_queue.append(
PipelineCommand(len(self._command_queue), *args, **kwargs)
)
return self._pipe
def _annotate_exception(self, exception, number, command):
"""
Provides extra context to the exception prior to it being handled
"""
cmd = " ".join(map(safe_str, command))
msg = (
f"Command # {number} ({truncate_text(cmd)}) of pipeline "
f"caused error: {exception.args[0]}"
)
exception.args = (msg,) + exception.args[1:]
@abstractmethod
def mset_nonatomic(
self, mapping: Mapping[AnyKeyT, EncodableT]
) -> "ClusterPipeline":
pass
@abstractmethod
async def execute(
self, raise_on_error: bool = True, allow_redirections: bool = True
) -> List[Any]:
pass
@abstractmethod
async def reset(self):
pass
@abstractmethod
def multi(self):
pass
@abstractmethod
async def watch(self, *names):
pass
@abstractmethod
async def unwatch(self):
pass
@abstractmethod
async def discard(self):
pass
@abstractmethod
async def unlink(self, *names):
pass
def __len__(self) -> int:
return len(self._command_queue)
| AbstractStrategy |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 345096,
"end": 346014
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateEnterpriseOwnerOrganizationRole"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "organization_id", "organization_role", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the Enterprise which the owner belongs to."""
organization_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="organizationId")
"""The ID of the organization for membership change."""
organization_role = sgqlc.types.Field(sgqlc.types.non_null(RoleInOrganization), graphql_name="organizationRole")
"""The role to assume in the organization."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateEnterpriseOwnerOrganizationRoleInput |
python | getsentry__sentry | src/sentry/utils/outcomes.py | {
"start": 614,
"end": 4458
} | class ____:
def __init__(
self,
bucket_interval: int = 60,
flush_interval: int = 300,
max_batch_size: int = 10000,
jitter: int | None = None,
):
self.bucket_interval = bucket_interval
self.flush_interval = flush_interval
self.max_batch_size = max_batch_size
self._buffer: dict[OutcomeKey, int] = {}
self._lock = Lock()
if jitter is None:
jitter = random.randint(0, 60)
# Add jitter to the initial flush time to prevent all replicas from flushing simultaneously
# Default jitter is up to ~1 minute (0-60 seconds) if not specified
self._last_flush_time = time.time() + jitter
# since 3.13 we can rely on child processes of
# RunTaskWithMultiprocessing to also work correctly with atexit:
# https://github.com/python/cpython/pull/114279
atexit.register(self._atexit_flush)
def flush(self, force: bool = False) -> None:
if not force:
current_time = time.time()
buffer_size = len(self._buffer)
time_elapsed = current_time - self._last_flush_time
should_flush_size = buffer_size >= self.max_batch_size
should_flush_time = time_elapsed >= self.flush_interval
if should_flush_size:
metrics.incr("outcomes.flush_size")
elif should_flush_time:
metrics.incr("outcomes.flush_time")
else:
return
with self._lock:
if not self._buffer:
return
buffer_to_flush = self._buffer
self._buffer = {}
self._last_flush_time = time.time()
with metrics.timer("outcomes.flush_buffer"):
for key, aggregated_quantity in buffer_to_flush.items():
track_outcome(
org_id=key.org_id,
project_id=key.project_id,
key_id=key.key_id,
outcome=Outcome(key.outcome),
reason=key.reason,
timestamp=to_datetime(key.time_bucket * self.bucket_interval),
event_id=None,
category=DataCategory(key.category) if key.category is not None else None,
quantity=aggregated_quantity,
)
def track_outcome_aggregated(
self,
org_id: int,
project_id: int,
key_id: int | None,
outcome: Outcome,
reason: str | None = None,
timestamp: datetime | None = None,
category: DataCategory | None = None,
quantity: int | None = None,
) -> None:
if quantity is None:
quantity = 1
assert isinstance(org_id, int)
assert isinstance(project_id, int)
assert isinstance(key_id, (type(None), int))
assert isinstance(outcome, Outcome)
assert isinstance(timestamp, (type(None), datetime))
assert isinstance(category, (type(None), DataCategory))
assert isinstance(quantity, int)
now = to_datetime(time.time())
timestamp = timestamp or now
timestamp_seconds = int(timestamp.timestamp())
time_bucket = timestamp_seconds // self.bucket_interval
key = OutcomeKey(
time_bucket=time_bucket,
org_id=org_id,
project_id=project_id,
key_id=key_id,
outcome=outcome.value,
reason=reason,
category=category.value if category is not None else None,
)
with self._lock:
existing = self._buffer.get(key) or 0
self._buffer[key] = existing + quantity
self.flush()
def _atexit_flush(self) -> None:
self.flush(force=True)
# valid values for outcome
| OutcomeAggregator |
python | huggingface__transformers | src/transformers/models/mt5/modeling_mt5.py | {
"start": 5189,
"end": 5937
} | class ____(nn.Module):
def __init__(self, config: MT5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = MT5DenseGatedActDense(config)
else:
self.DenseReluDense = MT5DenseActDense(config)
self.layer_norm = MT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5Attention with T5->MT5
| MT5LayerFF |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py | {
"start": 13039,
"end": 14052
} | class ____(Benchmark):
"""
Univariate Problem18 objective function.
This class defines the Univariate Problem18 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem18}}(x)
= \\begin{cases}(x-2)^2 & \\textrm{if} \\hspace{5pt} x
\\leq 3 \\\\ 2\\log(x-2)+1&\\textrm{otherwise}\\end{cases}
Bound constraints: :math:`x \\in [0, 6]`
.. figure:: figures/Problem18.png
:alt: Univariate Problem18 function
:align: center
**Univariate Problem18 function**
*Global optimum*: :math:`f(x)=0` for :math:`x = 2`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.0, 6.0)]
self.global_optimum = 2
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
x = x[0]
if x <= 3:
return (x - 2.0) ** 2.0
return 2 * log(x - 2.0) + 1
| Problem18 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 1221,
"end": 1418
} | class ____(Class5[frozenset[T_contra]]): ...
# This should generate an error because Sequence[T_co]
# is covariant and is therefore not compatible with
# a contravariant type parameter.
| Class5_Child1 |
python | conda__conda | conda/core/link.py | {
"start": 7654,
"end": 8143
} | class ____(NamedTuple):
remove_menu_action_groups: Iterable[ActionGroup]
unlink_action_groups: Iterable[ActionGroup]
unregister_action_groups: Iterable[ActionGroup]
link_action_groups: Iterable[ActionGroup]
register_action_groups: Iterable[ActionGroup]
compile_action_groups: Iterable[ActionGroup]
make_menu_action_groups: Iterable[ActionGroup]
entry_point_action_groups: Iterable[ActionGroup]
prefix_record_groups: Iterable[ActionGroup]
| PrefixActionGroup |
python | pypa__warehouse | tests/unit/manage/views/test_organizations.py | {
"start": 48614,
"end": 57984
} | class ____:
@pytest.fixture
def organization(self):
organization = OrganizationFactory.create()
OrganizationStripeCustomerFactory.create(organization=organization)
return organization
@pytest.fixture
def organization_no_customer(self):
return OrganizationFactory.create()
@pytest.fixture
def subscription(self, organization):
return StripeSubscriptionFactory.create(
stripe_customer_id=organization.customer.customer_id
)
@pytest.fixture
def organization_subscription(self, organization, subscription):
return OrganizationStripeSubscriptionFactory.create(
organization=organization, subscription=subscription
)
@pytest.fixture
def subscription_price(self):
return StripeSubscriptionPriceFactory.create()
def test_customer_id(
self,
db_request,
subscription_service,
organization,
):
billing_service = pretend.stub(
create_customer=lambda *a, **kw: {"id": organization.customer.customer_id},
)
view = org_views.ManageOrganizationBillingViews(organization, db_request)
view.billing_service = billing_service
customer_id = view.customer_id
assert customer_id == organization.customer.customer_id
def test_customer_id_local_mock(
self,
db_request,
billing_service,
subscription_service,
organization_no_customer,
):
db_request.registry.settings["site.name"] = "PyPI"
view = org_views.ManageOrganizationBillingViews(
organization_no_customer, db_request
)
customer_id = view.customer_id
assert customer_id.startswith("mockcus_")
def test_disable_organizations(
self,
db_request,
billing_service,
subscription_service,
organization,
):
db_request.organization_access = False
view = org_views.ManageOrganizationBillingViews(organization, db_request)
with pytest.raises(HTTPNotFound):
view.create_or_manage_subscription()
@pytest.mark.usefixtures("_enable_organizations")
def test_activate_subscription(
self,
db_request,
organization,
monkeypatch,
):
organization_activate_billing_form_obj = pretend.stub()
organization_activate_billing_form_cls = pretend.call_recorder(
lambda *a, **kw: organization_activate_billing_form_obj
)
monkeypatch.setattr(
org_views,
"OrganizationActivateBillingForm",
organization_activate_billing_form_cls,
)
db_request.POST = MultiDict()
view = org_views.ManageOrganizationBillingViews(organization, db_request)
result = view.activate_subscription()
assert result == {
"organization": organization,
"form": organization_activate_billing_form_obj,
}
@pytest.mark.usefixtures("_enable_organizations")
def test_post_activate_subscription_valid(
self,
db_request,
organization,
monkeypatch,
):
db_request.method = "POST"
db_request.POST = MultiDict({"terms_of_service_agreement": "1"})
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "mock-billing-url"
)
view = org_views.ManageOrganizationBillingViews(organization, db_request)
result = view.activate_subscription()
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "mock-billing-url"
@pytest.mark.usefixtures("_enable_organizations")
def test_post_activate_subscription_invalid(
self,
db_request,
organization,
monkeypatch,
):
db_request.method = "POST"
db_request.POST = MultiDict()
view = org_views.ManageOrganizationBillingViews(organization, db_request)
result = view.activate_subscription()
assert result["organization"] == organization
assert result["form"].terms_of_service_agreement.errors == [
"Terms of Service must be accepted."
]
@pytest.mark.usefixtures("_enable_organizations")
def test_create_subscription(
self,
db_request,
subscription_service,
organization,
subscription_price,
monkeypatch,
):
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "mock-session-url"
)
# Stub for billing service is not instance of MockStripeBillingService.
create_checkout_session = pretend.call_recorder(
lambda *a, **kw: {"url": "session-url"}
)
billing_service = pretend.stub(
create_checkout_session=create_checkout_session,
create_customer=lambda *a, **kw: {"id": organization.customer.customer_id},
sync_price=lambda *a, **kw: None,
sync_product=lambda *a, **kw: None,
)
view = org_views.ManageOrganizationBillingViews(organization, db_request)
view.billing_service = billing_service
result = view.create_or_manage_subscription()
assert create_checkout_session.calls == [
pretend.call(
customer_id=organization.customer.customer_id,
price_ids=[subscription_price.price_id],
success_url=view.return_url,
cancel_url=view.return_url,
),
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "session-url"
@pytest.mark.usefixtures("_enable_organizations")
def test_create_subscription_local_mock(
self,
db_request,
billing_service,
subscription_service,
organization,
subscription_price,
monkeypatch,
):
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "mock-session-url"
)
# Fixture for billing service is instance of MockStripeBillingService.
create_checkout_session = pretend.call_recorder(
lambda *a, **kw: {"url": "session-url"}
)
monkeypatch.setattr(
billing_service, "create_checkout_session", create_checkout_session
)
view = org_views.ManageOrganizationBillingViews(organization, db_request)
result = view.create_or_manage_subscription()
assert create_checkout_session.calls == [
pretend.call(
customer_id=view.customer_id,
price_ids=[subscription_price.price_id],
success_url=view.return_url,
cancel_url=view.return_url,
),
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "mock-session-url"
@pytest.mark.usefixtures("_enable_organizations")
def test_manage_subscription(
self,
db_request,
billing_service,
subscription_service,
organization,
organization_subscription,
monkeypatch,
):
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "mock-session-url"
)
# Stub for billing service is not instance of MockStripeBillingService.
create_portal_session = pretend.call_recorder(
lambda *a, **kw: {"url": "session-url"}
)
billing_service = pretend.stub(
create_portal_session=create_portal_session,
sync_price=lambda *a, **kw: None,
sync_product=lambda *a, **kw: None,
)
view = org_views.ManageOrganizationBillingViews(organization, db_request)
view.billing_service = billing_service
result = view.create_or_manage_subscription()
assert create_portal_session.calls == [
pretend.call(
customer_id=organization.customer.customer_id,
return_url=view.return_url,
),
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "session-url"
@pytest.mark.usefixtures("_enable_organizations")
def test_manage_subscription_local_mock(
self,
db_request,
billing_service,
subscription_service,
organization,
organization_subscription,
monkeypatch,
):
db_request.route_path = pretend.call_recorder(
lambda *a, **kw: "mock-session-url"
)
# Fixture for billing service is instance of MockStripeBillingService.
create_portal_session = pretend.call_recorder(
lambda *a, **kw: {"url": "session-url"}
)
monkeypatch.setattr(
billing_service, "create_portal_session", create_portal_session
)
view = org_views.ManageOrganizationBillingViews(organization, db_request)
result = view.create_or_manage_subscription()
assert create_portal_session.calls == [
pretend.call(
customer_id=organization.customer.customer_id,
return_url=view.return_url,
),
]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "mock-session-url"
| TestManageOrganizationBillingViews |
python | sphinx-doc__sphinx | sphinx/util/docutils.py | {
"start": 9524,
"end": 12501
} | class ____(CustomReSTDispatcher):
"""Monkey-patch directive and role dispatch, so that domain-specific
markup takes precedence.
"""
def __init__(self, env: BuildEnvironment) -> None:
self.domains = env.domains
self.current_document = env.current_document
super().__init__()
def directive(
self,
directive_name: str,
language_module: ModuleType,
document: nodes.document,
) -> tuple[type[Directive] | None, list[system_message]]:
"""Lookup a directive, given its name which can include a domain."""
directive_name = directive_name.lower()
# explicit domain given?
if ':' in directive_name:
domain_name, _, name = directive_name.partition(':')
try:
domain = self.domains[domain_name]
except KeyError:
logger.warning(__('unknown directive name: %s'), directive_name)
else:
element = domain.directive(name)
if element is not None:
return element, []
# else look in the default domain
else:
name = directive_name
default_domain = self.current_document.default_domain
if default_domain is not None:
element = default_domain.directive(name)
if element is not None:
return element, []
# always look in the std domain
element = self.domains.standard_domain.directive(name)
if element is not None:
return element, []
return super().directive(directive_name, language_module, document)
def role(
self,
role_name: str,
language_module: ModuleType,
lineno: int,
reporter: Reporter,
) -> tuple[RoleFunction, list[system_message]]:
"""Lookup a role, given its name which can include a domain."""
role_name = role_name.lower()
# explicit domain given?
if ':' in role_name:
domain_name, _, name = role_name.partition(':')
try:
domain = self.domains[domain_name]
except KeyError:
logger.warning(__('unknown role name: %s'), role_name)
else:
element = domain.role(name)
if element is not None:
return element, []
# else look in the default domain
else:
name = role_name
default_domain = self.current_document.default_domain
if default_domain is not None:
element = default_domain.role(name)
if element is not None:
return element, []
# always look in the std domain
element = self.domains.standard_domain.role(name)
if element is not None:
return element, []
return super().role(role_name, language_module, lineno, reporter)
| sphinx_domains |
python | django-haystack__django-haystack | test_haystack/test_fields.py | {
"start": 12971,
"end": 13984
} | class ____(TestCase):
def test_init(self):
try:
foo = DateField(model_attr="foo")
except:
self.fail()
def test_convert(self):
pub_date = DateField()
self.assertEqual(pub_date.convert("2016-02-16"), datetime.date(2016, 2, 16))
def test_prepare(self):
mock = MockModel()
mock.pub_date = datetime.date(2009, 2, 13)
pub_date = DateField(model_attr="pub_date")
self.assertEqual(pub_date.prepare(mock), datetime.date(2009, 2, 13))
# Simulate default=datetime.date(2000, 1, 1).
mock = MockModel()
default = DateField(default=datetime.date(2000, 1, 1))
self.assertEqual(default.prepare(mock), datetime.date(2000, 1, 1))
def test_prepare_from_string(self):
mock = MockModel()
mock.pub_date = datetime.date(2016, 2, 16)
pub_date = DateField(model_attr="pub_date")
self.assertEqual(pub_date.prepare(mock), datetime.date(2016, 2, 16))
| DateFieldTestCase |
python | walkccc__LeetCode | solutions/1300. Sum of Mutated Array Closest to Target/1300.py | {
"start": 0,
"end": 273
} | class ____:
def findBestValue(self, arr: list[int], target: int) -> int:
prefix = 0
arr.sort()
for i, a in enumerate(arr):
ans = round((target - prefix) / (len(arr) - i))
if ans <= a:
return ans
prefix += a
return arr[-1]
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB118.py | {
"start": 1740,
"end": 2167
} | class ____:
@staticmethod
def add(x, y):
return x + y
# See https://github.com/astral-sh/ruff/issues/13508
op_itemgetter = lambda x: x[:, 1]
op_itemgetter = lambda x: x[1, :]
# With a slice, trivia is dropped
op_itemgetter = lambda x: x[1, :]
# Without a slice, trivia is retained
op_itemgetter = lambda x: x[1, 2]
# All methods in classes are ignored, even those defined using lambdas:
| Class |
python | apache__airflow | providers/apache/kafka/tests/unit/apache/kafka/operators/test_consume.py | {
"start": 2963,
"end": 10804
} | class ____:
"""
Test ConsumeFromTopic
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="kafka_d",
conn_type="kafka",
extra=json.dumps(
{"socket.timeout.ms": 10, "bootstrap.servers": "localhost:9092", "group.id": "test_group"}
),
)
)
def test_operator(self):
operator = ConsumeFromTopicOperator(
kafka_config_id="kafka_d",
topics=["test"],
apply_function="unit.apache.kafka.operators.test_consume._no_op",
task_id="test",
poll_timeout=0.0001,
)
# execute the operator (this is essentially a no op as the broker isn't setup)
operator.execute(context={})
def test_operator_callable(self):
operator = ConsumeFromTopicOperator(
kafka_config_id="kafka_d",
topics=["test"],
apply_function=_no_op,
task_id="test",
poll_timeout=0.0001,
)
# execute the operator (this is essentially a no op as the broker isn't setup)
operator.execute(context={})
@pytest.mark.parametrize(
("max_messages", "expected_consumed_messages"),
[
[None, 1001], # Consume all messages
[100, 1000], # max_messages < max_batch_size -> max_messages is set to default max_batch_size
[2000, 1001], # max_messages > max_batch_size
],
)
def test_operator_consume(self, max_messages, expected_consumed_messages):
# Create mock consumer with tracking of consumed messages
_, mock_get_consumer, consumed_messages = create_mock_kafka_consumer(
message_count=1001, message_content="test_messages", track_consumed_messages=True
)
# Use the mock
with mock_get_consumer:
operator = ConsumeFromTopicOperator(
kafka_config_id="kafka_d",
topics=["test"],
task_id="test",
poll_timeout=0.0001,
max_messages=max_messages,
)
# execute the operator (this is essentially a no op as we're mocking the consumer)
operator.execute(context={})
assert consumed_messages[0] == expected_consumed_messages
@pytest.mark.parametrize(
"commit_cadence",
[
# will raise AirflowException for invalid commit_cadence
("invalid_cadence"),
("end_of_operator"),
("end_of_batch"),
("never"),
],
)
def test__validate_commit_cadence_on_construct(self, commit_cadence):
operator_kwargs = {
"kafka_config_id": "kafka_d",
"topics": ["test"],
"task_id": "test",
"commit_cadence": commit_cadence,
}
# early return for invalid commit_cadence
if commit_cadence == "invalid_cadence":
with pytest.raises(
AirflowException,
match=f"commit_cadence must be one of {VALID_COMMIT_CADENCE}. Got invalid_cadence",
):
ConsumeFromTopicOperator(**operator_kwargs)
return
# should not raise AirflowException for valid commit_cadence
ConsumeFromTopicOperator(**operator_kwargs)
@pytest.mark.parametrize(
("commit_cadence", "enable_auto_commit", "expected_warning"),
[
# will not log warning if set 'enable.auto.commit' to false
("end_of_operator", "false", False),
("end_of_batch", "false", False),
("never", "false", False),
# will log warning if set 'enable.auto.commit' to true
("end_of_operator", "true", True),
("end_of_batch", "true", True),
("never", "true", True),
# will log warning if 'enable.auto.commit' is not set
("end_of_operator", None, True),
("end_of_batch", None, True),
("never", None, True),
# will not log warning if commit_cadence is None, no matter the value of 'enable.auto.commit'
(None, None, False),
(None, "true", False),
(None, "false", False),
],
)
def test__validate_commit_cadence_before_execute(
self, commit_cadence, enable_auto_commit, expected_warning
):
# mock connection and hook
mocked_hook = mock.MagicMock()
mocked_hook.get_connection.return_value.extra_dejson = (
{} if enable_auto_commit is None else {"enable.auto.commit": enable_auto_commit}
)
with (
mock.patch(
"airflow.providers.apache.kafka.operators.consume.ConsumeFromTopicOperator.hook",
new_callable=mock.PropertyMock,
return_value=mocked_hook,
),
mock.patch(
"airflow.providers.apache.kafka.operators.consume.ConsumeFromTopicOperator.log"
) as mock_log,
):
operator = ConsumeFromTopicOperator(
kafka_config_id="kafka_d",
topics=["test"],
task_id="test",
commit_cadence=commit_cadence,
)
operator._validate_commit_cadence_before_execute()
if expected_warning:
expected_warning_template = (
"To respect commit_cadence='%s', "
"'enable.auto.commit' should be set to 'false' in the Kafka connection configuration. "
"Currently, 'enable.auto.commit' is not explicitly set, so it defaults to 'true', which causes "
"the consumer to auto-commit offsets every 5 seconds. "
"See: https://kafka.apache.org/documentation/#consumerconfigs_enable.auto.commit for more information"
)
mock_log.warning.assert_called_with(expected_warning_template, commit_cadence)
else:
mock_log.warning.assert_not_called()
@pytest.mark.parametrize(
("commit_cadence", "max_messages", "expected_commit_calls"),
[
# end_of_operator: should call commit once at the end
("end_of_operator", 1500, 1),
# end_of_batch: should call commit after each batch (2 batches for 1500 messages with default batch size 1000)
# and a final commit at the end of execute (since commit_cadence is not 'never')
("end_of_batch", 1500, 3),
# never: should never call commit
("never", 1500, 0),
],
)
def test_commit_cadence_behavior(self, commit_cadence, max_messages, expected_commit_calls):
# Create mock consumer with 1500 messages (will use 1001 for the first batch)
mock_consumer, mock_get_consumer, _ = create_mock_kafka_consumer(
message_count=1001, # Only need to create 1001 messages for the first batch
)
# Use the mocks
with mock_get_consumer:
# Create and execute the operator
operator = ConsumeFromTopicOperator(
kafka_config_id="kafka_d",
topics=["test"],
task_id="test",
poll_timeout=0.0001,
max_messages=max_messages,
commit_cadence=commit_cadence,
apply_function=_no_op,
)
operator.execute(context={})
# Verify commit was called the expected number of times
assert mock_consumer.commit.call_count == expected_commit_calls
# Verify consumer was closed
mock_consumer.close.assert_called_once()
| TestConsumeFromTopic |
python | realpython__materials | python-unittest/test_weekday.py | {
"start": 82,
"end": 576
} | class ____(unittest.TestCase):
@patch("weekday.datetime")
def test_is_weekday(self, mock_datetime):
mock_datetime.date.today.return_value = datetime.date(2024, 4, 4)
self.assertTrue(weekday.is_weekday())
@patch("weekday.datetime")
def test_is_weekend(self, mock_datetime):
mock_datetime.date.today.return_value = datetime.date(2024, 4, 6)
self.assertFalse(weekday.is_weekday())
if __name__ == "__main__":
unittest.main(verbosity=2)
| TestWeekday |
python | pytorch__pytorch | torch/_inductor/codegen/pallas.py | {
"start": 10256,
"end": 43240
} | class ____(SIMDKernel):
"""
Pallas kernel for elementwise operations with support for strided/scatter access.
Strategy:
- Convert index expressions to JAX-compatible array slicing
- Load/store using indexed access: "in_ptrX[slice]" or full-array "in_ptrX[...]"
- Compute expression with Python operators (compatible with jax.numpy broadcasting)
- Generate Python code that defines a Pallas kernel and a host entrypoint.
- Use async_compile.pallas path to compile and load Python code.
For GPU (Triton backend):
- Use masked loads/stores with power-of-2 block sizes to handle non-power-of-2 shapes
"""
overrides = PallasKernelOverrides # type: ignore[assignment]
kexpr: Callable[[sympy.Expr], str] = pexpr # Use Python expression printer
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Determine device type once at initialization
device = V.graph.get_current_device_or_throw()
self.is_gpu = device.type == "cuda"
self.use_masked_ops: bool | None = None
self.tensor_masks = {} # Map tensor name to mask variable name
def check_bounds(
self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool
) -> None:
"""Check array bounds for indirect indexing."""
# For now, skip explicit bounds checking as JAX/Pallas handles this internally
# TODO: Implement explicit bounds checking with assertions if needed
def _get_index_str(self, index: sympy.Expr) -> str:
"""
Convert an index expression to a string suitable for Pallas indexing.
Pallas operates on full arrays, so we need to convert index expressions
to JAX array slicing. For example:
- x0 -> "..." (contiguous access, full array)
- 2*x0 -> "::2" (strided access with stride 2)
- 2*x0 + 1 -> "1::2" (strided access with offset 1, stride 2)
Args:
index: The indexing expression to convert
Returns:
The indexing string to use in generated code
"""
# Prepare and simplify the index
prepared_index = self.prepare_indexing(index)
# For simple single-symbol access (contiguous case), we can use [...]
# which is more efficient as it operates on the entire array at once
if isinstance(prepared_index, sympy.Symbol):
return "..."
elif prepared_index.is_Integer:
# Scalar index
return str(prepared_index)
else:
# Complex expression (strided/scatter access)
# Try to extract stride and offset for common patterns
return self._convert_to_jax_slice(prepared_index)
def _convert_to_jax_slice(self, index: sympy.Expr) -> str:
"""
Convert a sympy index expression to JAX slice notation.
Handles common patterns like:
- stride*var -> ::stride
- stride*var + offset -> offset::stride
For more complex patterns, falls back to explicit indexing.
Uses BlockPatternMatcher for robust pattern matching.
"""
# Get the iteration variables for this kernel
if not self.range_trees:
return "..."
# Simplify the index
index = V.graph.sizevars.simplify(index)
free_symbols = index.free_symbols
# Get iteration variables from range_tree_nodes
iter_vars = OrderedSet(self.range_tree_nodes.keys())
# Find which iteration variable(s) are used
used_vars = free_symbols & iter_vars
if len(used_vars) == 0:
# No iteration variables, this is a constant index
return str(index)
elif len(used_vars) == 1:
# Single iteration variable - try to extract stride and offset using BlockPatternMatcher
var = next(iter(used_vars))
# Get the subexpression involving this variable
var_expr = BlockPatternMatcher.get_subexpr_involving_symbol(index, var)
# Try to match affine pattern: stride * var
stride = BlockPatternMatcher.match_affine_block_expr(var_expr, var)
if stride is not None:
# Extract the constant offset (terms not involving var)
offset = index - var_expr
offset = V.graph.sizevars.simplify(offset)
# Generate JAX slice notation
if stride == 1 and offset == 0:
# Contiguous access
return "..."
elif offset == 0:
# Pure stride: ::stride
stride_str = self.kexpr(stride)
return f"::{stride_str}"
else:
# Offset + stride: offset::stride
offset_str = self.kexpr(offset)
stride_str = self.kexpr(stride)
return f"{offset_str}::{stride_str}"
else:
# Couldn't match affine pattern, fall back to original logic
offset = index - var_expr
offset = V.graph.sizevars.simplify(offset)
if offset == 0 and var_expr == var:
# Just the variable itself, unit stride
return "..."
elif len(used_vars) > 1:
# Multi-dimensional indexing
# For contiguous multi-dim access, all terms should have unit stride
all_unit_stride = True
for var in used_vars:
var_expr = BlockPatternMatcher.get_subexpr_involving_symbol(index, var)
stride = BlockPatternMatcher.match_affine_block_expr(var_expr, var)
if stride != 1:
all_unit_stride = False
break
if all_unit_stride:
# Contiguous multi-dimensional access
return "..."
else:
# Strided multi-dimensional access - requires advanced indexing
# For now, use ellipsis which may work for many cases
# TODO: Implement proper multi-dimensional strided indexing
return "..."
# For complex cases, raise an error
return self._generate_index_array(index)
def _generate_index_array(self, index: sympy.Expr) -> str:
"""
Generate JAX code to compute an index array for complex indexing patterns.
For very complex patterns that can't be expressed as simple slices,
we need to compute the indices explicitly. This is not yet fully implemented.
"""
# For now, raise an error for complex patterns
# TODO: Implement advanced indexing support
raise Unsupported(
f"Pallas backend does not yet support complex indexing pattern: {index}"
)
def _has_iteration_vars(self, index: sympy.Expr) -> bool:
"""Check if index expression contains iteration variables (x0, x1, etc.)."""
free_symbols = index.free_symbols
iter_vars = OrderedSet(self.range_tree_nodes.keys())
return bool(free_symbols & iter_vars)
def _has_indirect_vars(self, index: sympy.Expr) -> bool:
"""Check if index expression contains indirect variables (tmp0, tmp1, etc.)."""
free_symbols = index.free_symbols
for sym in free_symbols:
if str(sym).startswith("tmp"):
return True
return False
def _get_index_expr(self, index: sympy.Expr) -> tuple[str, bool]:
"""
Get the index expression string and whether it needs flattening.
Returns:
Tuple of (index_str, needs_flatten) where needs_flatten indicates
if the buffer should be flattened before indexing (for mixed indexing).
"""
has_indirect = self._has_indirect_vars(index)
has_iter_vars = self._has_iteration_vars(index)
if has_indirect and has_iter_vars:
return self._handle_mixed_indexing(index), True
elif has_indirect:
return self.kexpr(index), False
else:
return self._get_index_str(index), False
def _determine_masked_ops_for_kernel(self) -> bool:
"""
Determine if we should use masked ops for this entire kernel.
Masked ops with pl.ds(block_size) flatten tensors to 1D, which works when:
1. We're on GPU (CUDA backend uses Triton which requires power-of-2 sizes)
2. All tensors are already 1D (so flattening doesn't change dimensionality)
3. All tensors have the same size (so broadcasting works correctly)
With per-tensor masks, each tensor gets its own mask based on its size.
This should be called once in codegen_kernel() before generating the kernel body.
"""
if not self.is_gpu:
return False
# Get all buffer sizes
# We need ALL buffers - inputs, outputs, and intermediates
all_buffer_names = OrderedSet()
# Get input buffers from args
all_buffer_names.update(self.args.input_buffers.keys())
# Get output buffers from args
all_buffer_names.update(self.args.output_buffers.keys())
# Also get any intermediate buffers from the graph
all_buffer_names.update(V.graph.name_to_buffer.keys())
# Get shapes and sizes for all buffers
buf_info = []
for buf_name in all_buffer_names:
try:
buf = V.graph.get_buffer(buf_name)
size = buf.get_size()
shape = tuple(int(s) if hasattr(s, "__int__") else s for s in size)
# Calculate flattened size
total_size = 1
for s in size:
if hasattr(s, "__int__"):
total_size *= int(s)
else:
total_size *= s
buf_info.append((buf_name, shape, total_size))
except Exception:
pass
# Only use masked ops if:
# 1. All buffers are 1D (single-element shape tuples)
# 2. All buffers have the same size
# This ensures that pl.ds(block_size) flattening works correctly
# and masks can be properly applied without broadcasting issues.
if buf_info and len(buf_info) > 0:
# Check if all are 1D
all_1d = all(len(shape) == 1 for _, shape, _ in buf_info)
if not all_1d:
return False
# Check if all have the same size
first_size = buf_info[0][2]
all_same_size = all(size == first_size for _, _, size in buf_info)
return all_same_size
return False
def _get_or_create_mask(self, buf_name: str) -> str:
"""Get or create a unique mask variable for a buffer."""
if buf_name not in self.tensor_masks:
mask_var = f"mask_{buf_name}"
self.tensor_masks[buf_name] = mask_var
return self.tensor_masks[buf_name]
def load(self, name: str, index: sympy.Expr) -> CSEVariable: # type: ignore[override]
buf = self.args.input(name)
dtype = V.graph.get_dtype(name)
# Determine masked ops strategy on first load/store if not yet determined
if self.use_masked_ops is None:
self.use_masked_ops = self._determine_masked_ops_for_kernel()
index_str, needs_flatten = self._get_index_expr(index)
# Build load expression using string concatenation
use_masked = index_str == "..." and not needs_flatten and self.use_masked_ops
if use_masked:
# GPU masked load: flatten tensor and apply per-tensor mask
mask_var = self._get_or_create_mask(name)
load_expr = f"pltriton.load({buf}.at[pl.ds(block_size)], mask={mask_var})"
elif needs_flatten:
# Flatten then index for non-contiguous access
load_expr = f"{buf}[...].flatten()[{index_str}]"
else:
# Direct indexing for contiguous access
load_expr = f"{buf}[{index_str}]"
return self.cse.generate(
self.compute,
load_expr,
dtype=dtype,
)
def _handle_mixed_indexing(self, index: sympy.Expr) -> str:
"""
Handle indexing with both indirect variables and iteration variables.
For example, x[indices, :] generates index = i0 + stride * tmp0
where tmp0 is loaded from indices and i0 is the iteration variable.
We need to convert this to JAX advanced indexing with proper broadcasting.
"""
# Get iteration variables
iter_vars = OrderedSet(self.range_tree_nodes.keys())
free_symbols = index.free_symbols
used_iter_vars = sorted(free_symbols & iter_vars, key=str)
if len(used_iter_vars) == 0:
return self.kexpr(index)
index_str = self.kexpr(index)
indirect_vars = [str(sym) for sym in free_symbols if str(sym).startswith("tmp")]
for i, var in enumerate(used_iter_vars):
var_name = str(var)
if var in self.range_tree_nodes:
range_entry = self.range_tree_nodes[var]
range_size = range_entry.length
arange_expr = f"jnp.arange({self.kexpr(range_size)})"
if indirect_vars:
arange_expr = f"{arange_expr}[None, :]"
index_str = index_str.replace(var_name, arange_expr)
# Reshape indirect variables for proper broadcasting
for indirect_var in indirect_vars:
index_str = index_str.replace(indirect_var, f"{indirect_var}[:, None]")
return index_str
def store(
self, name: str, index: sympy.Expr, value: CSEVariable, mode: Any = None
) -> None: # type: ignore[override]
if mode is not None:
raise Unsupported("pallas store mode not supported")
out = self.args.output(name)
self.store_buffer_names.add(name)
# Determine masked ops strategy on first load/store if not yet determined
if self.use_masked_ops is None:
self.use_masked_ops = self._determine_masked_ops_for_kernel()
# Check if this is a scalar output (reduction to scalar)
# Only shape () is a true scalar, not (1,) which is a 1-element tensor
try:
buf = V.graph.get_buffer(name)
output_shape = buf.get_size()
is_scalar = len(output_shape) == 0
except Exception:
is_scalar = False
if is_scalar:
# For scalar outputs, use [...] to assign the entire scalar
store_expr = f"{out}[...] = {value}"
else:
index_str, needs_flatten = self._get_index_expr(index)
# Build store expression using string concatenation
use_masked = (
index_str == "..." and not needs_flatten and self.use_masked_ops
)
if use_masked:
# GPU masked store: flatten tensor and apply per-tensor mask
mask_var = self._get_or_create_mask(name)
store_expr = f"pltriton.store({out}.at[pl.ds(block_size)], {value}, mask={mask_var})"
else:
# Direct indexed assignment
store_expr = f"{out}[{index_str}] = {value}"
self.stores.writeline(store_expr)
def reduction(
self,
dtype: torch.dtype,
src_dtype: torch.dtype,
reduction_type: ReductionType,
value: Union[CSEVariable, tuple[CSEVariable, ...]],
) -> Union[CSEVariable, tuple[CSEVariable, ...]]: # type: ignore[override]
"""
Generate code for reduction operations in JAX/Pallas.
Reductions in Pallas work by:
1. Loading the input data into the kernel
2. Applying JAX reduction operations (jnp.sum, jnp.max, etc.)
3. Storing the reduced result
The reduction happens over the loaded block of data.
"""
assert self.inside_reduction
if isinstance(value, tuple):
raise Unsupported(
"Tuple reductions (e.g., welford_combine) not supported in Pallas backend"
)
# Check if this reduction is already cached
cache_key = (src_dtype, reduction_type, value)
if cache_key in self.cse.reduction_cache:
return self.cse.reduction_cache[cache_key]
# Map reduction types to JAX functions
reduction_ops = {
"sum": "jnp.sum",
"prod": "jnp.prod", # CPU only - not supported in Pallas GPU (Triton) backend
"max": "jnp.max",
"min": "jnp.min",
"any": "jnp.any",
}
if reduction_type == "xor_sum":
reduction_expr = f"jnp.bitwise_xor.reduce({value})"
elif reduction_type in reduction_ops:
# Apply reduction over all axes to get scalar result
reduction_expr = f"{reduction_ops[reduction_type]}({value})"
else:
raise Unsupported(
f"Reduction type '{reduction_type}' not yet supported in Pallas backend. "
f"Supported types: {list(reduction_ops.keys())}, xor_sum"
)
# Generate CSE variable for the reduction result
result = self.cse.generate(
self.compute,
reduction_expr,
dtype=dtype,
)
# Cache the result
self.cse.reduction_cache[cache_key] = result
return result
@staticmethod
def _buffer_is_contiguous(buffer_name: str) -> bool:
buf = V.graph.get_buffer(buffer_name)
layout = buf.get_layout()
return layout.is_contiguous()
def codegen_kernel(self, name: Optional[str] = None) -> str: # type: ignore[override]
"""
Generate the complete Pallas kernel code as a Python string.
This includes:
- Import statements for JAX/Pallas
- The kernel function that operates on refs
- The main wrapper function that handles PyTorch<->JAX conversions via DLPack
Args:
name: Optional kernel name (will use placeholder if not provided)
Returns:
str: Complete Python source code for the Pallas kernel
"""
# Ensure one (1) output for now
live_outs = list(self.args.live_output_buffers())
if len(live_outs) != 1:
raise Unsupported(
"Pallas backend currently supports single-output elementwise kernels only"
)
code = IndentedBuffer()
# Define the Pallas kernel: accepts refs, uses broadcasted expressions
arg_defs, _, _, _ = self.args.python_argdefs()
kernel_params = [a.name for a in arg_defs]
pure_out_params = [p for p in kernel_params if p.startswith("out_ptr")]
output_params = [
p for p in kernel_params if p.startswith(("out_ptr", "in_out_ptr"))
]
if not output_params:
raise RuntimeError("Pallas backend requires at least one output buffer")
output_buffer_lookup = {
inner: outer
for outer, inner in self.args.output_buffers.items()
if isinstance(inner, str)
}
kernel_name = name or "<KERNEL_NAME>"
interpret_is_cpu = V.graph.get_current_device_or_throw().type == "cpu"
is_tpu = torch._inductor.config._debug_cpu_to_tpu_pallas
if is_tpu:
if not torch._inductor.config.pallas_take_first_jax_device_only:
raise RuntimeError(
"Pallas backend currently only supports using the first JAX device."
)
if not has_tpu_pallas():
raise RuntimeError(
"PALLAS_TARGET_TPU is set, but no TPU device was found. "
"Please make sure that you have a TPU available and that JAX is configured correctly."
)
interpret_literal = "True" if interpret_is_cpu else "False"
# For GPU (Triton backend), import pltriton for masked loads/stores
# Import math at module level if we'll use it for masked ops
imports = (
"""
import functools
"""
+ ("import math\n " if self.use_masked_ops else "")
+ """import torch
import jax
import jax.numpy as jnp
from jax.experimental import pallas as pl
from torch._inductor.runtime.runtime_utils import torch_dtype_to_jax_runtime
"""
+ (
"\n from jax.experimental.pallas import triton as pltriton"
if not interpret_is_cpu
else ""
)
+ (
"\n from torch._inductor.runtime.runtime_utils import next_power_of_2"
if self.use_masked_ops
else ""
)
)
code.splice(imports, strip=True)
aliasable_flags: dict[str, bool] = {}
for param in pure_out_params:
buffer_name = output_buffer_lookup.get(param)
is_contiguous = buffer_name is not None and self._buffer_is_contiguous(
buffer_name
)
aliasable_flags[param] = (not interpret_is_cpu) and is_contiguous
alias_params = [
f"{param}_alias" for param in pure_out_params if aliasable_flags[param]
]
pointer_tail = [
p for p in kernel_params if p.startswith(("in_out_ptr", "in_ptr"))
]
kernel_input_params = alias_params + pointer_tail
full_kernel_params = alias_params + kernel_params
non_alias_out_set = OrderedSet(
[name for name, flag in aliasable_flags.items() if not flag]
)
copy_output_indices = [
idx for idx, name in enumerate(output_params) if name in non_alias_out_set
]
self.aliasable_out_ptrs = aliasable_flags
# For GPU with masked ops, add block_size as keyword-only parameter
kernel_signature = (
f"def {kernel_name}_kernel({', '.join(full_kernel_params)}"
+ (", *, block_size" if self.use_masked_ops else "")
+ "):"
)
code.writeline(kernel_signature)
with code.indent():
# For masked ops on GPU, generate per-tensor masks at the start
if self.use_masked_ops and self.tensor_masks:
# Create a mapping from buffer name to parameter name
buf_to_param = {}
for outer, inner in self.args.input_buffers.items():
buf_to_param[outer] = inner if isinstance(inner, str) else outer
for outer, inner in self.args.output_buffers.items():
buf_to_param[outer] = inner if isinstance(inner, str) else outer
# Generate a mask for each tensor that was accessed
for buf_name, mask_var in sorted(self.tensor_masks.items()):
param_name = buf_to_param.get(buf_name, buf_name)
# Find the corresponding parameter in kernel_params
matching_param = None
for p in kernel_params:
# Check if this parameter corresponds to the buffer
if param_name == p or buf_name in str(p):
matching_param = p
break
if matching_param:
# Calculate flattened size for this tensor
code.writeline(f"# Mask for {buf_name}")
code.writeline(f"{mask_var}_size = {matching_param}.size")
code.writeline(
f"{mask_var} = jnp.arange(block_size) < {mask_var}_size"
)
# Emit compute (CSE) and store lines; they reference *_ptr[index] directly.
# Iteration variables are implicitly handled by JAX vectorization, so
# explicit indices should be JAX-traced values.
for line in self.compute._lines:
code.writeline(str(line))
for line in self.stores._lines:
code.writeline(str(line))
jit_wrapper_name = f"{kernel_name}_jit_wrapper"
donate_indices = []
for idx, name in enumerate(kernel_input_params):
if (name in alias_params) or name.startswith("in_out_ptr"):
donate_indices.append(idx + 2)
if donate_indices:
donate_literal = "(" + ", ".join(str(x) for x in donate_indices) + ",)"
else:
donate_literal = "()"
code.writeline(
"@functools.partial("
"jax.jit, static_argnums=(0, 1), donate_argnums="
f"{donate_literal})"
)
code.writeline(
f"def {jit_wrapper_name}(out_shapes, out_dtypes, {', '.join(kernel_input_params)}):"
)
with code.indent():
code.writeline("out_specs = tuple(")
code.writeline(" jax.ShapeDtypeStruct(shape, dtype)")
code.writeline(" for shape, dtype in zip(out_shapes, out_dtypes)")
code.writeline(")")
# For masked ops, calculate block_size as next power of 2 of max flattened size
if self.use_masked_ops:
code.writeline(
"# Calculate block_size as next power of 2 for Triton backend"
)
code.writeline("# Find maximum flattened size across all tensors")
code.writeline("max_size = 0")
# Calculate size for all input tensors
for param in kernel_input_params:
code.writeline(f"max_size = max(max_size, {param}.size)")
# Also consider output shapes
code.writeline("for shape in out_shapes:")
code.writeline(
" tensor_size = shape[0] if len(shape) == 1 else math.prod(shape)"
)
code.writeline(" max_size = max(max_size, tensor_size)")
code.writeline("block_size = next_power_of_2(max_size)")
alias_pairs: list[tuple[int, int]] = []
for out_idx, name in enumerate(output_params):
if name.startswith("out_ptr"):
if aliasable_flags.get(name, False):
alias_name = f"{name}_alias"
input_idx = kernel_input_params.index(alias_name)
alias_pairs.append((input_idx, out_idx))
else:
input_idx = kernel_input_params.index(name)
alias_pairs.append((input_idx, out_idx))
alias_map_literal = ", ".join(f"{i}: {o}" for (i, o) in alias_pairs)
# For masked ops, wrap kernel with functools.partial to pass block_size
kernel_arg = (
f"functools.partial({kernel_name}_kernel, block_size=block_size),"
if self.use_masked_ops
else f"{kernel_name}_kernel,"
)
code.writeline("return pl.pallas_call(")
code.writeline(" " + kernel_arg)
code.writeline(" out_shape=out_specs,")
code.writeline(f" interpret={interpret_literal},")
code.writeline(" grid=(1,),")
code.writeline(
f" input_output_aliases={{ {alias_map_literal} }},"
if alias_pairs
else " input_output_aliases={},"
)
code.writeline(")(")
code.writeline(f" {', '.join(kernel_input_params)},")
code.writeline(")")
main_name = f"{kernel_name}_main"
code.writeline(
f"def {main_name}({', '.join(full_kernel_params)}, stream=None):"
)
with code.indent():
code.writeline("# Enable JAX x64 mode for float64/int64 support")
code.writeline("jax.config.update('jax_enable_x64', True)")
if alias_params:
code.writeline("# Convert Torch -> JAX for donated outputs")
for alias_name in alias_params:
# TODO: The `jax.device_put` path is a temporary workaround for a Mosaic compiler bug
# that occurs with DLPack. Once TorchTPU provides a direct method for placing a
# `torch.Tensor` on a TPU device, this should be reverted to use the
# `jax.dlpack.from_dlpack` path.
if is_tpu:
code.writeline(
f"{alias_name}_jax = jax.device_put({alias_name}.cpu().numpy(), device=jax.devices('tpu')[0])"
)
else:
code.writeline(
f"{alias_name}_jax = jax.dlpack.from_dlpack({alias_name})"
)
code.writeline("# Convert Torch -> JAX for in-place tensors")
for ptr in pointer_tail:
if ptr.startswith("in_out_ptr"):
if is_tpu:
code.writeline(
f"{ptr}_jax = jax.device_put({ptr}.cpu().numpy(), device=jax.devices('tpu')[0])"
)
else:
code.writeline(f"{ptr}_jax = jax.dlpack.from_dlpack({ptr})")
code.writeline("# Convert Torch -> JAX for inputs")
for ptr in pointer_tail:
if ptr.startswith("in_ptr"):
if is_tpu:
code.writeline(
f"{ptr}_jax = jax.device_put({ptr}.cpu().numpy(), device=jax.devices('tpu')[0])"
)
else:
code.writeline(
f"{ptr}_jax = jax.dlpack.from_dlpack({ptr}.contiguous())"
)
code.writeline("# Prepare output metadata from PyTorch tensor")
code.writeline(
"out_shapes = ("
+ ", ".join([f"tuple({name}.shape)" for name in output_params])
+ ",)"
)
code.writeline(
"out_dtypes = ("
+ ", ".join(
[
f"torch_dtype_to_jax_runtime({name}.dtype)"
for name in output_params
]
)
+ ",)"
)
arg_name_map: dict[str, str] = {}
for alias_name in alias_params:
arg_name_map[alias_name] = f"{alias_name}_jax"
for ptr in pointer_tail:
arg_name_map[ptr] = f"{ptr}_jax"
if kernel_input_params:
alias_args_str = ", ".join(
arg_name_map[name] for name in kernel_input_params
)
code.writeline(
f"res = {jit_wrapper_name}(out_shapes, out_dtypes, {alias_args_str})"
)
else:
code.writeline(f"res = {jit_wrapper_name}(out_shapes, out_dtypes)")
if copy_output_indices:
code.writeline(
"result_values = res if isinstance(res, tuple) else (res,)"
)
for idx in copy_output_indices:
name = output_params[idx]
if is_tpu:
code.writeline(
f"res_cpu = jax.device_get(result_values[{idx}])"
)
code.writeline(f"{name}.copy_(torch.from_dlpack(res_cpu))")
else:
code.writeline(
f"{name}.copy_(torch.from_dlpack(result_values[{idx}]))"
)
return code.getvalue()
def call_kernel(self, name: str, node: Optional[IRNode] = None) -> None: # type: ignore[override]
"""Generate the Python code that calls this Pallas kernel."""
wrapper = V.graph.wrapper_code
arg_defs, call_args, _, _ = self.args.python_argdefs()
kernel_param_names = [a.name for a in arg_defs]
pure_out_params = [p for p in kernel_param_names if p.startswith("out_ptr")]
call_arg_strs = list(map(str, call_args))
aliasable = getattr(self, "aliasable_out_ptrs", {})
alias_call_args = [
call_arg_strs[kernel_param_names.index(p)]
for p in pure_out_params
if aliasable.get(p, False)
]
# Generate kernel call: kernel_name.run(arg1, arg2, ...)
# Note: async_compile.pallas loads {name}_main function and wraps it in PallasKernelWrapper
# which exposes a run() method
kernel_call = f"{name}.run({', '.join(alias_call_args + call_arg_strs)})"
wrapper.writeline(kernel_call)
| PallasKernel |
python | getsentry__sentry | tests/sentry/replays/endpoints/test_data_export_notifications.py | {
"start": 197,
"end": 1175
} | class ____(APITestCase):
endpoint = "sentry-api-0-data-export-notifications"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
@patch("sentry.replays.endpoints.data_export_notifications.retry_transfer_job_run")
def test_simple(self, retry_transfer_job_run) -> None: # type: ignore[no-untyped-def]
retry_transfer_job_run.return_value = None
data = {
"data": base64.b64encode(
json.dumps(
{
"transferOperation": {
"status": "FAILED",
"transferJobName": "test",
"projectId": "test-project",
}
}
).encode()
).decode("utf-8")
}
self.get_success_response(method="post", **data, status_code=200)
assert retry_transfer_job_run.called
| DataExportNotificationsTestCase |
python | openai__openai-python | src/openai/types/realtime/response_create_event_param.py | {
"start": 310,
"end": 665
} | class ____(TypedDict, total=False):
type: Required[Literal["response.create"]]
"""The event type, must be `response.create`."""
event_id: str
"""Optional client-generated ID used to identify this event."""
response: RealtimeResponseCreateParamsParam
"""Create a new Realtime response with these parameters"""
| ResponseCreateEventParam |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 7270,
"end": 7814
} | class ____(StrEnum):
"""§7.4 of the 1.7 and 2.0 references."""
ASCII_HEX_DECODE = "/ASCIIHexDecode" # abbreviation: AHx
ASCII_85_DECODE = "/ASCII85Decode" # abbreviation: A85
LZW_DECODE = "/LZWDecode" # abbreviation: LZW
FLATE_DECODE = "/FlateDecode" # abbreviation: Fl
RUN_LENGTH_DECODE = "/RunLengthDecode" # abbreviation: RL
CCITT_FAX_DECODE = "/CCITTFaxDecode" # abbreviation: CCF
DCT_DECODE = "/DCTDecode" # abbreviation: DCT
JPX_DECODE = "/JPXDecode"
JBIG2_DECODE = "/JBIG2Decode"
| FilterTypes |
python | walkccc__LeetCode | solutions/1048. Longest String Chain/1048.py | {
"start": 0,
"end": 425
} | class ____:
def longestStrChain(self, words: list[str]) -> int:
wordsSet = set(words)
@functools.lru_cache(None)
def dp(s: str) -> int:
"""Returns the longest chain where s is the last word."""
ans = 1
for i in range(len(s)):
pred = s[:i] + s[i + 1:]
if pred in wordsSet:
ans = max(ans, dp(pred) + 1)
return ans
return max(dp(word) for word in words)
| Solution |
python | google__jax | jax/_src/custom_batching.py | {
"start": 6630,
"end": 15238
} | class ____:
def __init__(self, rule: Callable, debug: core.DebugInfo):
functools.update_wrapper(self, rule)
self.rule = rule
self.debug = debug
def __call__(self, axis_size, all_in_batched, *all_args):
_, args = all_args
consts_batched, in_batched = all_in_batched
assert not any(tree_util.tree_leaves(consts_batched)), consts_batched
return call_rule(self.rule, axis_size, in_batched, args)
def __str__(self):
return str(self.rule)
def ensure_list(xs):
return xs if type(xs) is list else list(xs)
def rule_name(rule):
return getattr(rule, '__name__', '<unnamed rule>')
def call_rule(rule, axis_size, in_batched, args):
return rule(axis_size, ensure_list(in_batched), *args)
def check_vmap_rule_trees(rule, original_out_tree, out_tree, out_batched_tree):
if out_tree != out_batched_tree:
raise ValueError(
'structure of output value and output batching specification returned '
f'by custom vmap rule ({rule_name(rule)}) do not match.\n'
f'Output values: {out_tree}\n'
f'Batching spec: {out_batched_tree}')
if out_tree != original_out_tree:
raise ValueError(
f'structure of output returned by custom vmap rule ({rule_name(rule)}) '
'does not match that of original custom-vmapped function.\n'
f'Original output: {original_out_tree}\n'
f'Rule output: {out_tree}')
# Like batching.bdim_at_front, but doesn't broadcast if not mapped
def maybe_bdim_at_front(x, bdim):
if bdim is not_mapped:
return x
else:
return util.moveaxis(x, bdim, 0)
# Like batching.batch except (a) not curried and (b) returns inferred output
# axes instead of accepting and matching a given spec of output axes. Assumes
# `f` is pytree-flattened
def vmap_unrestricted(f: lu.WrappedFun, *args, in_axes, axis_name, axis_size):
axis_data = batching.AxisData(axis_name, axis_size, None, None)
tag = core.TraceTag()
f, out_axes = batching.batch_subtrace(f, tag, axis_data, in_axes)
outs = f.call_wrapped(*args)
return outs, out_axes()
### custom_vmap_p rules
def custom_vmap_impl(*args, call, rule, in_tree, out_tree):
del rule, in_tree, out_tree
return core.jaxpr_as_fun(call)(*args)
def custom_vmap_batching(args_flat, dims, *, call, rule, in_tree, out_tree):
del call
axis_size, = {x.shape[d] for x, d in zip(args_flat, dims) if d is not None}
args_flat = map(maybe_bdim_at_front, args_flat, dims)
flat_in_batched = [d is not not_mapped for d in dims]
args = tree_unflatten(in_tree, args_flat)
in_batched = tree_unflatten(in_tree, flat_in_batched)
out, out_batched = call_rule(rule, axis_size, in_batched, args)
flat_outs, tree1 = tree_flatten(out)
flat_out_batched, tree2 = tree_flatten(out_batched)
check_vmap_rule_trees(rule, out_tree, tree1, tree2)
flat_out_dims = [0 if b else not_mapped for b in flat_out_batched]
return flat_outs, flat_out_dims
def custom_vmap_abstract_eval(*in_avals, call, **_):
return call.out_avals
def custom_vmap_jvp(primals, tangents, *,
call: core.ClosedJaxpr,
rule: ClosedRule,
in_tree: tree_util.PyTreeDef, out_tree: tree_util.PyTreeDef):
def jvp_of_rule_rule(axis_size: int, in_batched, primals, tangents):
in_batched_ps, in_batched_ts = in_batched
mutually_batched = tree_map(operator.and_, in_batched_ps, in_batched_ts)
extra_batched_ps = tree_map(lambda pb, tb: 0 if pb and not tb else None,
in_batched_ps, in_batched_ts)
extra_batched_ts = tree_map(lambda pb, tb: 0 if tb and not pb else None,
in_batched_ps, in_batched_ts)
out_mutually_batched = lu.Store()
flat_ps_ts, tree_ps_ts = tree_flatten((primals, tangents))
flat_extra_batched_ps_ts, tree_ps_ts2 = tree_flatten(
(extra_batched_ps, extra_batched_ts),
is_leaf=lambda x: x is None)
# TODO(frostig): assert these also equal:
# treedef_tuple((in_tree, in_tree))
# once https://github.com/jax-ml/jax/issues/9066 is fixed
assert tree_ps_ts == tree_ps_ts2
del tree_ps_ts2
def to_jvp(*primals):
out, out_batched = call_rule(rule, axis_size, mutually_batched, primals)
check_vmap_rule_trees(
rule, out_tree, tree_structure(out), tree_structure(out_batched))
out_mutually_batched.store(out_batched)
return out
api_util.save_wrapped_fun_debug_info(to_jvp, call.jaxpr.debug_info)
def to_vmap_over_extra_batched_dims(primals, tangents):
return api.jvp(to_jvp, primals, tangents)
to_vmap_over_extra_batched_dims_flat, out_tree2 = api_util.flatten_fun_nokwargs(
lu.wrap_init(to_vmap_over_extra_batched_dims,
# TODO(necula): fix the debug_info calling convention
debug_info=call.jaxpr.debug_info),
tree_ps_ts)
flat_out_ps_ts, flat_out_axes = vmap_unrestricted(
to_vmap_over_extra_batched_dims_flat, *flat_ps_ts,
in_axes=flat_extra_batched_ps_ts,
axis_name=core.no_axis_name, axis_size=axis_size)
n, ragged = divmod(len(flat_out_ps_ts), 2)
assert not ragged
flat_out_ps, flat_out_ts = flat_out_ps_ts[:n], flat_out_ps_ts[n:]
flat_out_axes_p, flat_out_axes_t = flat_out_axes[:n], flat_out_axes[n:]
flat_out_ps = map(maybe_bdim_at_front, flat_out_ps, flat_out_axes_p)
flat_out_extra_batched_ps = [d is not not_mapped for d in flat_out_axes_p]
flat_out_ts = map(maybe_bdim_at_front, flat_out_ts, flat_out_axes_t)
flat_out_extra_batched_ts = [d is not not_mapped for d in flat_out_axes_t]
out_ps, out_ts = tree_unflatten(
out_tree2(), [*flat_out_ps, *flat_out_ts])
out_extra_batched_ps, out_extra_batched_ts = tree_unflatten(
out_tree2(), [*flat_out_extra_batched_ps, *flat_out_extra_batched_ts])
out_batched_ps = tree_map(
operator.or_, out_mutually_batched.val, out_extra_batched_ps)
out_batched_ts = tree_map(
operator.or_, out_mutually_batched.val, out_extra_batched_ts)
return (out_ps, out_ts), (out_batched_ps, out_batched_ts)
tangents = map(ad.instantiate_zeros, tangents)
jvp_call, _ = ad.jvp_jaxpr(call, [True] * len(primals), True)
jvp_in_tree = treedef_tuple((in_tree, in_tree))
jvp_out_tree = treedef_tuple((out_tree, out_tree))
outs = custom_vmap_p.bind(
*primals, *tangents,
call=jvp_call, rule=jvp_of_rule_rule,
in_tree=jvp_in_tree, out_tree=jvp_out_tree)
assert len(outs) % 2 == 0, len(outs)
out_primals, out_tangents = util.split_list(outs, [len(outs) // 2])
return out_primals, out_tangents
custom_vmap_p = core.Primitive('custom_vmap_call')
custom_vmap_p.multiple_results = True
custom_vmap_p.def_impl(custom_vmap_impl)
custom_vmap_p.def_abstract_eval(custom_vmap_abstract_eval)
batching.primitive_batchers[custom_vmap_p] = custom_vmap_batching
ad.primitive_jvps[custom_vmap_p] = custom_vmap_jvp
pxla.register_initial_style_primitive(custom_vmap_p)
mlir.register_lowering(custom_vmap_p, mlir.lower_fun(
custom_vmap_impl, multiple_results=True))
# -- custom vmap applications
def tree_split(mask, tree):
lhs = tree_map(lambda l, x: x if l else None, mask, tree)
rhs = tree_map(lambda l, x: None if l else x, mask, tree)
return lhs, rhs
def tree_merge(mask, lhs_tree, rhs_tree):
return tree_map(lambda l, x_l, x_r: x_l if l else x_r,
mask, lhs_tree, rhs_tree)
def sequential_vmap(f):
"""A special case of ``custom_vmap`` that uses a loop.
A function decorated with ``sequential_vmap`` will be called sequentially
within a loop when batched. This is useful for functions that don't natively
support batch dimensions.
For example:
>>> @jax.custom_batching.sequential_vmap
... def f(x):
... jax.debug.print("{}", x)
... return x + 1
...
>>> jax.vmap(f)(jnp.arange(3))
0
1
2
Array([1, 2, 3], dtype=int32)
Where the print statements demonstrate that this :py:func:`~jax.vmap` is being
generated using a loop.
See the documentation for :py:class:`~jax.custom_batching.custom_vmap` for
more details.
"""
from jax._src.lax import control_flow # pytype: disable=import-error
f = custom_vmap(f)
@f.def_vmap
def rule(axis_size, in_batched, *args):
del axis_size
def to_map(mapped_args):
args = tree_merge(in_batched, mapped_args, bcast_args)
return f(*args)
mapped_args, bcast_args = tree_split(in_batched, list(args))
out = control_flow.map(to_map, mapped_args)
out_batched = tree_map(lambda _: True, out)
return out, out_batched
return f
| ClosedRule |
python | astropy__astropy | astropy/table/index.py | {
"start": 32522,
"end": 36358
} | class ____:
"""
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
"""
_col_subclasses = {}
def __init__(self, table, mode):
"""
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
"""
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ("freeze", "discard_on_copy", "copy_on_getitem"):
raise ValueError(
"Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
f"'{mode}'"
)
def __enter__(self):
if self.mode == "discard_on_copy":
self.table._copy_indices = False
elif self.mode == "copy_on_getitem":
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == "discard_on_copy":
self.table._copy_indices = True
elif self.mode == "copy_on_getitem":
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = f"_{cls.__name__}WithIndexCopy"
new_cls = type(str(clsname), (cls,), {"__getitem__": __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
| _IndexModeContext |
python | openai__openai-python | src/openai/cli/_api/completions.py | {
"start": 3567,
"end": 4123
} | class ____(BaseModel):
model: str
stream: bool = False
prompt: Optional[str] = None
n: Omittable[int] = omit
stop: Omittable[str] = omit
user: Omittable[str] = omit
echo: Omittable[bool] = omit
suffix: Omittable[str] = omit
best_of: Omittable[int] = omit
top_p: Omittable[float] = omit
logprobs: Omittable[int] = omit
max_tokens: Omittable[int] = omit
temperature: Omittable[float] = omit
presence_penalty: Omittable[float] = omit
frequency_penalty: Omittable[float] = omit
| CLICompletionCreateArgs |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/conditional_values_in_variant/package.py | {
"start": 216,
"end": 1340
} | class ____(Package):
"""Package with conditional possible values in a variant"""
homepage = "https://dev.null"
version("1.73.0")
version("1.72.0")
version("1.62.0")
version("1.60.0")
version("1.50.0")
variant(
"cxxstd",
default="98",
values=(
"98",
"11",
"14",
# C++17 is not supported by Boost < 1.63.0.
conditional("17", when="@1.63.0:"),
# C++20/2a is not supported by Boost < 1.73.0
conditional("2a", when="@1.73.0:"),
),
multi=False,
description="Use the specified C++ standard when building.",
when="@1.60.0:",
)
variant(
"staging",
values=any_combination_of(conditional("flexpath", "dataspaces", when="@1.73.0:")),
description="Enable dataspaces and/or flexpath staging transports",
)
variant(
"foo",
default="foo",
values=(conditional("foo", when=True), conditional("bar", when=False)),
description="Variant with default condition false",
)
| ConditionalValuesInVariant |
python | pypa__pip | src/pip/_vendor/rich/syntax.py | {
"start": 7242,
"end": 7621
} | class ____(NamedTuple):
"""
A range to highlight in a Syntax object.
`start` and `end` are 2-integers tuples, where the first integer is the line number
(starting from 1) and the second integer is the column index (starting from 0).
"""
style: StyleType
start: SyntaxPosition
end: SyntaxPosition
style_before: bool = False
| _SyntaxHighlightRange |
python | RaRe-Technologies__gensim | gensim/test/test_similarity_metrics.py | {
"start": 7457,
"end": 8690
} | class ____(unittest.TestCase):
def test_inputs(self):
# all empty inputs will give a divide by zero exception
vec_1 = []
vec_2 = []
self.assertRaises(ZeroDivisionError, matutils.jaccard, vec_1, vec_2)
def test_distributions(self):
# checking bag of words as inputs
vec_1 = [(2, 1), (3, 4), (4, 1), (5, 1), (1, 1), (7, 2)]
vec_2 = [(1, 1), (3, 8), (4, 1)]
result = matutils.jaccard(vec_2, vec_1)
expected = 1 - 0.3
self.assertAlmostEqual(expected, result)
# checking ndarray, csr_matrix as inputs
vec_1 = np.array([[1, 3], [0, 4], [2, 3]])
vec_2 = csr_matrix([[1, 4], [0, 2], [2, 2]])
result = matutils.jaccard(vec_1, vec_2)
expected = 1 - 0.388888888889
self.assertAlmostEqual(expected, result)
# checking ndarray, list as inputs
vec_1 = np.array([6, 1, 2, 3])
vec_2 = [4, 3, 2, 5]
result = matutils.jaccard(vec_1, vec_2)
expected = 1 - 0.333333333333
self.assertAlmostEqual(expected, result)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| TestJaccard |
python | bokeh__bokeh | tests/support/util/screenshot.py | {
"start": 1473,
"end": 1514
} | class ____(TypedDict):
data: str
| JSImage |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/utils/iofuncs.py | {
"start": 17648,
"end": 21591
} | class ____:
def __init__(self):
self.load_extensions = None
self.save_extensions = None
self.load_filters = None
self.save_filters = None
self.load_funcs = None
self.save_funcs = None
def setup(self):
iofuncs = self.get_internal_funcs()
load_extensions = {}
save_extensions = {}
load_funcs = {}
save_funcs = {}
load_filters = []
save_filters = []
load_ext = []
for ext, name, loadfunc, savefunc in iofuncs:
filter_str = str(name + " (*%s)" % ext)
if loadfunc is not None:
load_filters.append(filter_str)
load_extensions[filter_str] = ext
load_funcs[ext] = loadfunc
load_ext.append(ext)
if savefunc is not None:
save_extensions[filter_str] = ext
save_filters.append(filter_str)
save_funcs[ext] = savefunc
load_filters.insert(
0, str("Supported files" + " (*" + " *".join(load_ext) + ")")
)
load_filters.append(str("All files (*.*)"))
self.load_filters = "\n".join(load_filters)
self.save_filters = "\n".join(save_filters)
self.load_funcs = load_funcs
self.save_funcs = save_funcs
self.load_extensions = load_extensions
self.save_extensions = save_extensions
def get_internal_funcs(self):
return [
('.spydata', "Spyder data files", load_dictionary, save_dictionary),
('.npy', "NumPy arrays", load_array, None),
('.npz', "NumPy zip arrays", load_array, None),
('.mat', "Matlab files", load_matlab, save_matlab),
('.csv', "CSV text files", 'import_wizard', None),
('.txt', "Text files", 'import_wizard', None),
('.jpg', "JPEG images", load_image, None),
('.png', "PNG images", load_image, None),
('.gif', "GIF images", load_image, None),
('.tif', "TIFF images", load_image, None),
('.pkl', "Pickle files", load_pickle, None),
('.pickle', "Pickle files", load_pickle, None),
('.json', "JSON files", load_json, None),
('.h5', "HDF5 files", load_hdf5, save_hdf5),
('.dcm', "DICOM images", load_dicom, None),
]
def save(self, data, filename):
ext = osp.splitext(filename)[1].lower()
if ext in self.save_funcs:
return self.save_funcs[ext](data, filename)
else:
return "<b>Unsupported file type '%s'</b>" % ext
def load(self, filename):
ext = osp.splitext(filename)[1].lower()
if ext in self.load_funcs:
return self.load_funcs[ext](filename)
else:
return None, "<b>Unsupported file type '%s'</b>" % ext
iofunctions = IOFunctions()
iofunctions.setup()
# ---- Test
# -----------------------------------------------------------------------------
if __name__ == "__main__":
import datetime
testdict = {'d': 1, 'a': np.random.rand(10, 10), 'b': [1, 2]}
testdate = datetime.date(1945, 5, 8)
example = {'str': 'kjkj kj k j j kj k jkj',
'unicode': u'éù',
'list': [1, 3, [4, 5, 6], 'kjkj', None],
'tuple': ([1, testdate, testdict], 'kjkj', None),
'dict': testdict,
'float': 1.2233,
'array': np.random.rand(4000, 400),
'empty_array': np.array([]),
'date': testdate,
'datetime': datetime.datetime(1945, 5, 8),
}
import time
t0 = time.time()
save_dictionary(example, "test.spydata")
print(" Data saved in %.3f seconds" % (time.time()-t0))
t0 = time.time()
example2, ok = load_dictionary("test.spydata")
os.remove("test.spydata")
print("Data loaded in %.3f seconds" % (time.time()-t0))
| IOFunctions |
python | ray-project__ray | python/ray/air/tests/execution/test_tracked_actor.py | {
"start": 689,
"end": 1037
} | class ____(RuntimeError):
pass
@pytest.fixture(scope="module")
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4)
yield address_info
ray.shutdown()
@pytest.fixture
def cleanup():
# Garbage collect at the start
# This ensures that all resources are freed up for the upcoming test.
gc.collect()
yield
| Result |
python | astropy__astropy | astropy/extern/configobj/validate.py | {
"start": 12926,
"end": 13328
} | class ____(VdtValueError):
"""The value supplied was of the correct type, but was too small."""
def __init__(self, value):
"""
>>> raise VdtValueTooSmallError('0')
Traceback (most recent call last):
VdtValueTooSmallError: the value "0" is too small.
"""
ValidateError.__init__(self, 'the value "%s" is too small.' % (value,))
| VdtValueTooSmallError |
python | joke2k__faker | faker/providers/ssn/pt_PT/__init__.py | {
"start": 42,
"end": 411
} | class ____(BaseProvider):
"""
A Faker provider for the Portuguese VAT IDs
"""
vat_id_formats = ("PT#########",)
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Portuguese VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
| Provider |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 6570,
"end": 18169
} | class ____(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise. Must be `None` if a covariance matrix
(``cov_matrix``) is provided. If no ``cov_matrix`` is given,
`None` means the default value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
The ``x, y``, ``[x,y]_mean``, and ``[x,y]_stddev`` inputs must have
compatible units or be unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(
\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}
\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(
default=0, description="Peak position (along x axis) of Gaussian"
)
y_mean = Parameter(
default=0, description="Peak position (along y axis) of Gaussian"
)
x_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along x axis)"
)
y_stddev = Parameter(
default=1, description="Standard deviation of the Gaussian (along y axis)"
)
theta = Parameter(
default=0.0,
description=(
"Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle (optional)"
),
)
def __init__(
self,
amplitude=amplitude.default,
x_mean=x_mean.default,
y_mean=y_mean.default,
x_stddev=None,
y_stddev=None,
theta=None,
cov_matrix=None,
**kwargs,
):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError(
"Cannot specify both cov_matrix and x/y_stddev/theta"
)
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault("bounds", {})
kwargs["bounds"].setdefault("x_stddev", (FLOAT_EPSILON, None))
kwargs["bounds"].setdefault("y_stddev", (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude,
x_mean=x_mean,
y_mean=y_mean,
x_stddev=x_stddev,
y_stddev=y_stddev,
theta=theta,
**kwargs,
)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``.
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-5.5, upper=5.5)
y: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-2.0, upper=2.0)
y: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
dx, dy = ellipse_extent(a, b, self.theta)
return (
(self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx),
)
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function."""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(
-((a * xdiff**2) + (b * xdiff * ydiff) + (c * ydiff**2))
)
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters."""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2.0 * theta)
sin2t = np.sin(2.0 * theta)
xstd2 = x_stddev**2
ystd2 = y_stddev**2
xstd3 = x_stddev**3
ystd3 = y_stddev**3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff**2
ydiff2 = ydiff**2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2)))
da_dtheta = sint * cost * ((1.0 / ystd2) - (1.0 / xstd2))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2.0 * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2.0 * c * ydiff))
dg_dx_stddev = g * (
-(
da_dx_stddev * xdiff2
+ db_dx_stddev * xdiff * ydiff
+ dc_dx_stddev * ydiff2
)
)
dg_dy_stddev = g * (
-(
da_dy_stddev * xdiff2
+ db_dy_stddev * xdiff * ydiff
+ dc_dy_stddev * ydiff2
)
)
dg_dtheta = g * (
-(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2)
)
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta]
@property
def input_units(self):
x_unit = self.x_mean.input_unit
y_unit = self.y_mean.input_unit
if x_unit is None and y_unit is None:
return None
return {self.inputs[0]: x_unit, self.inputs[1]: y_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {
"x_mean": inputs_unit[self.inputs[0]],
"y_mean": inputs_unit[self.inputs[0]],
"x_stddev": inputs_unit[self.inputs[0]],
"y_stddev": inputs_unit[self.inputs[0]],
"theta": u.rad,
"amplitude": outputs_unit[self.outputs[0]],
}
| Gaussian2D |
python | google__pytype | pytype/tools/annotate_ast/annotate_ast.py | {
"start": 1268,
"end": 2299
} | class ____(traces.MatchAstVisitor):
"""Traverses an AST and sets type information on its nodes.
This is modeled after ast.NodeVisitor, but doesn't inherit from it because
it is ast-module agnostic so that different AST implementations can be used.
"""
def visit_Name(self, node):
self._maybe_annotate(node)
def visit_Attribute(self, node):
self._maybe_annotate(node)
def visit_FunctionDef(self, node):
self._maybe_annotate(node)
def _maybe_annotate(self, node):
"""Annotates a node."""
try:
ops = self.match(node)
except NotImplementedError:
return
# For lack of a better option, take the first one.
unused_loc, entry = next(iter(ops), (None, None))
self._maybe_set_type(node, entry)
def _maybe_set_type(self, node, trace):
"""Sets type information on the node, if there is any to set."""
if not trace:
return
node.resolved_type = trace.types[-1]
node.resolved_annotation = _annotation_str_from_type_def(trace.types[-1])
| AnnotateAstVisitor |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/interfaces.py | {
"start": 23620,
"end": 39907
} | class ____(
FluentBaseModel,
Generic[_DataAssetT, _ExecutionEngineT],
metaclass=MetaDatasource,
):
"""
A Datasource provides a standard API for accessing and interacting with data from
a wide variety of source systems.
"""
# To subclass Datasource one needs to define:
# asset_types
# type
# assets
#
# The important part of defining `assets` is setting the Dict type correctly.
# In addition, one must define the methods in the `Abstract Methods` section below.
# If one writes a class level docstring, this will become the documenation for the
# data context method `data_context.data_sources.add_my_datasource` method.
# class attrs
asset_types: ClassVar[Sequence[Type[DataAsset]]] = []
# Not all Datasources require a DataConnector
data_connector_type: ClassVar[Optional[Type[DataConnector]]] = None
# Datasource sublcasses should update this set if the field should not be passed to the execution engine # noqa: E501 # FIXME CoP
_EXTRA_EXCLUDED_EXEC_ENG_ARGS: ClassVar[Set[str]] = set()
_type_lookup: ClassVar[TypeLookup] # This attribute is set in `MetaDatasource.__new__`
# Setting this in a Datasource subclass will override the execution engine type.
# The primary use case is to inject an execution engine for testing.
execution_engine_override: ClassVar[Optional[Type[_ExecutionEngineT]]] = None
# instance attrs
type: str
name: str
id: Optional[uuid.UUID] = Field(default=None, description="Datasource id")
assets: MutableSequence[_DataAssetT] = []
# private attrs
_data_context: Union[GXDataContext, None] = pydantic.PrivateAttr(None)
_cached_execution_engine_kwargs: Dict[str, Any] = pydantic.PrivateAttr({})
_execution_engine: Union[_ExecutionEngineT, None] = pydantic.PrivateAttr(None)
@property
def _config_provider(self) -> Union[_ConfigurationProvider, None]:
return getattr(self._data_context, "config_provider", None)
@property
def data_context(self) -> GXDataContext | None:
"""The data context that this datasource belongs to.
This method should only be used by library implementers.
"""
return self._data_context
@pydantic.validator("assets", each_item=True)
@classmethod
def _load_asset_subtype(
cls: Type[Datasource[_DataAssetT, _ExecutionEngineT]], data_asset: DataAsset
) -> _DataAssetT:
"""
Some `data_asset` may be loaded as a less specific asset subtype different than
what was intended.
If a more specific subtype is needed the `data_asset` will be converted to a
more specific `DataAsset`.
"""
logger.debug(f"Loading '{data_asset.name}' asset ->\n{pf(data_asset, depth=4)}")
asset_type_name: str = data_asset.type
asset_type: Type[_DataAssetT] = cls._type_lookup[asset_type_name]
if asset_type is type(data_asset):
# asset is already the intended type
return data_asset
# strip out asset default kwargs
kwargs = data_asset.dict(exclude_unset=True)
logger.debug(f"{asset_type_name} - kwargs\n{pf(kwargs)}")
cls._update_asset_forward_refs(asset_type)
asset_of_intended_type = asset_type(**kwargs)
logger.debug(f"{asset_type_name} - {asset_of_intended_type!r}")
return asset_of_intended_type
@pydantic.validator(_ASSETS_KEY, each_item=True)
def _update_batch_definitions(cls, data_asset: DataAsset) -> DataAsset:
for batch_definition in data_asset.batch_definitions:
batch_definition.set_data_asset(data_asset)
return data_asset
def _execution_engine_type(self) -> Type[_ExecutionEngineT]:
"""Returns the execution engine to be used"""
return self.execution_engine_override or self.execution_engine_type
def add_batch_definition(
self, batch_definition: BatchDefinition[PartitionerT]
) -> BatchDefinition[PartitionerT]:
asset_name = batch_definition.data_asset.name
if not self.data_context:
raise DataContextError( # noqa: TRY003 # FIXME CoP
"Cannot save datasource without a data context."
)
loaded_datasource = self.data_context.data_sources.get(self.name)
if loaded_datasource is not self:
# CachedDatasourceDict will return self; only add batch definition if this is a remote
# copy
assert isinstance(loaded_datasource, Datasource)
loaded_asset = loaded_datasource.get_asset(asset_name)
loaded_asset.batch_definitions.append(batch_definition)
loaded_asset.update_batch_definition_field_set()
updated_datasource = self.data_context.update_datasource(loaded_datasource)
assert isinstance(updated_datasource, Datasource)
updated_asset = updated_datasource.get_asset(asset_name)
updated_batch_definition = updated_asset.get_batch_definition(batch_definition.name)
if batch_definition is not updated_batch_definition:
# update in memory copy with the new ID
batch_definition.id = updated_batch_definition.id
return updated_batch_definition
def delete_batch_definition(self, batch_definition: BatchDefinition[PartitionerT]) -> None:
asset_name = batch_definition.data_asset.name
if not self.data_context:
raise DataContextError( # noqa: TRY003 # FIXME CoP
"Cannot save datasource without a data context."
)
loaded_datasource = self.data_context.data_sources.get(self.name)
if loaded_datasource is not self:
# CachedDatasourceDict will return self; only add batch definition if this is a remote
# copy
assert isinstance(loaded_datasource, Datasource)
loaded_asset = loaded_datasource.get_asset(asset_name)
loaded_asset.batch_definitions.remove(batch_definition)
loaded_asset.update_batch_definition_field_set()
updated_datasource = self.data_context.update_datasource(loaded_datasource)
assert isinstance(updated_datasource, Datasource)
def get_execution_engine(self) -> _ExecutionEngineT:
current_execution_engine_kwargs = self.dict(
exclude=self._get_exec_engine_excludes(),
config_provider=self._config_provider,
)
if (
current_execution_engine_kwargs != self._cached_execution_engine_kwargs
or not self._execution_engine
):
self._execution_engine = self._execution_engine_type()(
**current_execution_engine_kwargs
)
self._cached_execution_engine_kwargs = current_execution_engine_kwargs
return self._execution_engine
def get_batch(self, batch_request: BatchRequest) -> Batch:
"""A Batch that corresponds to the BatchRequest.
Args:
batch_request: A batch request for this asset. Usually obtained by calling
build_batch_request on the asset.
Returns:
A Batch that matches the options specified in the batch request.
"""
data_asset = self.get_asset(batch_request.data_asset_name)
return data_asset.get_batch(batch_request)
def get_batch_identifiers_list(self, batch_request: BatchRequest) -> List[dict]:
data_asset = self.get_asset(batch_request.data_asset_name)
return data_asset.get_batch_identifiers_list(batch_request)
def get_assets_as_dict(self) -> MutableMapping[str, _DataAssetT]:
"""Returns available DataAsset objects as dictionary, with corresponding name as key.
Returns:
Dictionary of "_DataAssetT" objects with "name" attribute serving as key.
"""
asset: _DataAssetT
assets_as_dict: MutableMapping[str, _DataAssetT] = {
asset.name: asset for asset in self.assets
}
return assets_as_dict
def get_asset_names(self) -> Set[str]:
"""Returns the set of available DataAsset names
Returns:
Set of available DataAsset names.
"""
asset: _DataAssetT
return {asset.name for asset in self.assets}
@public_api
def get_asset(self, name: str) -> _DataAssetT:
"""Returns the DataAsset referred to by asset_name
Args:
name: name of DataAsset sought.
Returns:
if named "DataAsset" object exists; otherwise, exception is raised.
"""
# This default implementation will be used if protocol is inherited
try:
asset: _DataAssetT
found_asset: _DataAssetT = list(filter(lambda asset: asset.name == name, self.assets))[
0
]
found_asset._datasource = self
return found_asset
except IndexError as exc:
raise LookupError( # noqa: TRY003 # FIXME CoP
f'"{name}" not found. Available assets are ({", ".join(self.get_asset_names())})'
) from exc
@public_api
def delete_asset(self, name: str) -> None:
"""Removes the DataAsset referred to by asset_name from internal list of available DataAsset objects.
Args:
name: name of DataAsset to be deleted.
""" # noqa: E501 # FIXME CoP
from great_expectations.data_context import CloudDataContext
asset: _DataAssetT
asset = self.get_asset(name=name)
if self._data_context and isinstance(self._data_context, CloudDataContext):
self._data_context._delete_asset(id=str(asset.id))
self.assets = list(filter(lambda asset: asset.name != name, self.assets))
self._save_context_project_config()
def _add_asset(self, asset: _DataAssetT, connect_options: dict | None = None) -> _DataAssetT:
"""Adds an asset to a datasource
Args:
asset: The DataAsset to be added to this datasource.
"""
# The setter for datasource is non-functional, so we access _datasource directly.
# See the comment in DataAsset for more information.
asset._datasource = self
if not connect_options:
connect_options = {}
self._build_data_connector(asset, **connect_options)
asset.test_connection()
asset_names: Set[str] = self.get_asset_names()
if asset.name in asset_names:
raise ValueError( # noqa: TRY003 # FIXME CoP
f'"{asset.name}" already exists (all existing assets are {", ".join(asset_names)})'
)
self.assets.append(asset)
# if asset was added to a cloud FDS, _update_fluent_datasource will return FDS fetched from cloud, # noqa: E501 # FIXME CoP
# which will contain the new asset populated with an id
if self._data_context:
updated_datasource = self._data_context._update_fluent_datasource(datasource=self)
assert isinstance(updated_datasource, Datasource)
if asset_id := updated_datasource.get_asset(name=asset.name).id:
asset.id = asset_id
return asset
def _save_context_project_config(self) -> None:
"""Check if a DataContext is available and save the project config."""
if self._data_context:
try:
self._data_context._save_project_config()
except TypeError as type_err:
warnings.warn(str(type_err), GxSerializationWarning)
def _rebuild_asset_data_connectors(self) -> None:
"""
If Datasource required a data_connector we need to build the data_connector for each asset.
A warning is raised if a data_connector cannot be built for an asset.
Not all users will have access to the needed dependencies (packages or credentials) for every asset.
Missing dependencies will stop them from using the asset but should not stop them from loading it from config.
""" # noqa: E501 # FIXME CoP
asset_build_failure_direct_cause: dict[str, Exception | BaseException] = {}
if self.data_connector_type:
for data_asset in self.assets:
try:
# check if data_connector exist before rebuilding?
connect_options = getattr(data_asset, "connect_options", {})
self._build_data_connector(data_asset, **connect_options)
except Exception as dc_build_err:
logger.info(
f"Unable to build data_connector for {self.type} {data_asset.type} {data_asset.name}", # noqa: E501 # FIXME CoP
exc_info=True,
)
# reveal direct cause instead of generic, unhelpful MyDatasourceError
asset_build_failure_direct_cause[data_asset.name] = (
dc_build_err.__cause__ or dc_build_err
)
if asset_build_failure_direct_cause:
# TODO: allow users to opt out of these warnings
names_and_error: List[str] = [
f"{name}:{type(exc).__name__}"
for (name, exc) in asset_build_failure_direct_cause.items()
]
warnings.warn(
f"data_connector build failure for {self.name} assets - {', '.join(names_and_error)}", # noqa: E501 # FIXME CoP
category=RuntimeWarning,
)
@staticmethod
def _update_asset_forward_refs(asset_type: Type[_DataAssetT]) -> None:
"""Update forward refs of an asset_type if necessary.
Note, this should be overridden in child datasource classes if forward
refs need to be updated. For example, in Spark datasources we need to
update forward refs only if the optional spark dependencies are installed
so this method is overridden. Here it is a no op.
Args:
asset_type: Asset type to update forward refs.
Returns:
None, asset refs is updated in place.
"""
pass
# Abstract Methods
@property
def execution_engine_type(self) -> Type[_ExecutionEngineT]:
"""Return the ExecutionEngine type use for this Datasource"""
raise NotImplementedError(
"""One needs to implement "execution_engine_type" on a Datasource subclass."""
)
def test_connection(self, test_assets: bool = True) -> None:
"""Test the connection for the Datasource.
Args:
test_assets: If assets have been passed to the Datasource, an attempt can be made to test them as well.
Raises:
TestConnectionError: If the connection test fails.
""" # noqa: E501 # FIXME CoP
raise NotImplementedError(
"""One needs to implement "test_connection" on a Datasource subclass."""
)
def _build_data_connector(self, data_asset: _DataAssetT, **kwargs) -> None:
"""Any Datasource subclass that utilizes DataConnector should overwrite this method.
Specific implementations instantiate appropriate DataConnector class and set "self._data_connector" to it.
Args:
data_asset: DataAsset using this DataConnector instance
kwargs: Extra keyword arguments allow specification of arguments used by particular DataConnector subclasses
""" # noqa: E501 # FIXME CoP
pass
@classmethod
def _get_exec_engine_excludes(cls) -> Set[str]:
"""
Return a set of field names to exclude from the execution engine.
All datasource fields are passed to the execution engine by default unless they are in this set.
Default implementation is to return the combined set of field names from `_EXTRA_EXCLUDED_EXEC_ENG_ARGS`
and `_BASE_DATASOURCE_FIELD_NAMES`.
""" # noqa: E501 # FIXME CoP
return cls._EXTRA_EXCLUDED_EXEC_ENG_ARGS.union(_BASE_DATASOURCE_FIELD_NAMES)
# End Abstract Methods
# This is used to prevent passing things like `type`, `assets` etc. to the execution engine
_BASE_DATASOURCE_FIELD_NAMES: Final[Set[str]] = {name for name in Datasource.__fields__}
@dataclasses.dataclass(frozen=True)
| Datasource |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 300785,
"end": 310619
} | class ____(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception as e:
self.compile_time_value_error(e)
def calculate_constant_result(self):
if self.function.is_attribute and self.function.obj.is_literal:
method = self.function.constant_result
if inspect.isbuiltin(method) or inspect.ismethod(method):
method_name = method.__name__
# Prefer the actual builtin type over internal representations like "EncodedString".
object_type = self.function.obj.type
object_type_name = object_type.name if object_type else type(method.__self__).__name__
if Builtin.is_safe_compile_time_method(object_type_name, method_name):
args = self.positional_args.constant_result
kwargs = self.keyword_args.constant_result
self.constant_result = method(*args, **kwargs)
def explicit_args_kwds(self):
if (self.keyword_args and not self.keyword_args.is_dict_literal or
not self.positional_args.is_sequence_constructor):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if (as_type_constructor := self.analyse_as_type_constructor(env)) is not None:
return as_type_constructor
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
self.set_py_result_type(self.function)
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not self.keyword_args.is_dict_literal:
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = {
arg.name for arg in declared_args[:len(pos_args)]
if arg.name
}
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from .UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = {arg.key.value: (i+len(pos_args), arg)
for i, arg in enumerate(kwargs.key_value_pairs)}
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
| GeneralCallNode |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/completion/base.py | {
"start": 4183,
"end": 5333
} | class ____:
"""
Event that called the completer.
:param text_inserted: When True, it means that completions are requested
because of a text insert. (`Buffer.complete_while_typing`.)
:param completion_requested: When True, it means that the user explicitly
pressed the `Tab` key in order to view the completions.
These two flags can be used for instance to implement a completer that
shows some completions when ``Tab`` has been pressed, but not
automatically when the user presses a space. (Because of
`complete_while_typing`.)
"""
def __init__(
self, text_inserted: bool = False, completion_requested: bool = False
) -> None:
assert not (text_inserted and completion_requested)
#: Automatic completion while typing.
self.text_inserted = text_inserted
#: Used explicitly requested completion by pressing 'tab'.
self.completion_requested = completion_requested
def __repr__(self) -> str:
return f"{self.__class__.__name__}(text_inserted={self.text_inserted!r}, completion_requested={self.completion_requested!r})"
| CompleteEvent |
python | PyCQA__pylint | doc/data/messages/a/assigning-non-slot/bad.py | {
"start": 0,
"end": 217
} | class ____:
__slots__ = ("name",)
def __init__(self, name, surname):
self.name = name
self.surname = surname # [assigning-non-slot]
self.setup()
def setup(self):
pass
| Student |
python | pydantic__pydantic | tests/mypy/outputs/mypy-default_ini/plugin_success.py | {
"start": 843,
"end": 1140
} | class ____(BaseModel, from_attributes=True):
x: float
y: str
kwargs_model = KwargsModel(x=1, y='y')
KwargsModel(x=1, y='y', z='z')
# MYPY: error: Unexpected keyword argument "z" for "KwargsModel" [call-arg]
kwargs_model.x = 2
kwargs_model.model_validate(kwargs_model.__dict__)
| KwargsModel |
python | scipy__scipy | benchmarks/benchmarks/optimize_linprog.py | {
"start": 4119,
"end": 4912
} | class ____(Benchmark):
params = [
methods,
[3, 6, 9]
]
param_names = ['method', 'dimensions']
def setup(self, meth, dims):
self.c, self.A_ub, self.b_ub, self.xf, self.obj = klee_minty(dims)
self.fun = None
def time_klee_minty(self, meth, dims):
method, options = meth
res = linprog(c=self.c, A_ub=self.A_ub, b_ub=self.b_ub,
method=method, options=options)
self.fun = res.fun
self.x = res.x
def track_klee_minty(self, meth, prob):
if self.fun is None:
self.time_klee_minty(meth, prob)
self.abs_error = np.abs(self.fun - self.obj)
self.rel_error = np.abs((self.fun - self.obj)/self.obj)
return min(self.abs_error, self.rel_error)
| KleeMinty |
python | sympy__sympy | sympy/physics/secondquant.py | {
"start": 71822,
"end": 80582
} | class ____(StrPrinter):
"""Printer for which only equal objects are equal in print"""
def _print_Dummy(self, expr):
return "(%s_%i)" % (expr.name, expr.dummy_index)
def __kprint(expr):
p = KeyPrinter()
return p.doprint(expr)
def _get_ordered_dummies(mul, verbose=False):
"""Returns all dummies in the mul sorted in canonical order.
Explanation
===========
The purpose of the canonical ordering is that dummies can be substituted
consistently across terms with the result that equivalent terms can be
simplified.
It is not possible to determine if two terms are equivalent based solely on
the dummy order. However, a consistent substitution guided by the ordered
dummies should lead to trivially (non-)equivalent terms, thereby revealing
the equivalence. This also means that if two terms have identical sequences of
dummies, the (non-)equivalence should already be apparent.
Strategy
--------
The canonical order is given by an arbitrary sorting rule. A sort key
is determined for each dummy as a tuple that depends on all factors where
the index is present. The dummies are thereby sorted according to the
contraction structure of the term, instead of sorting based solely on the
dummy symbol itself.
After all dummies in the term has been assigned a key, we check for identical
keys, i.e. unorderable dummies. If any are found, we call a specialized
method, _determine_ambiguous(), that will determine a unique order based
on recursive calls to _get_ordered_dummies().
Key description
---------------
A high level description of the sort key:
1. Range of the dummy index
2. Relation to external (non-dummy) indices
3. Position of the index in the first factor
4. Position of the index in the second factor
The sort key is a tuple with the following components:
1. A single character indicating the range of the dummy (above, below
or general.)
2. A list of strings with fully masked string representations of all
factors where the dummy is present. By masked, we mean that dummies
are represented by a symbol to indicate either below fermi, above or
general. No other information is displayed about the dummies at
this point. The list is sorted stringwise.
3. An integer number indicating the position of the index, in the first
factor as sorted in 2.
4. An integer number indicating the position of the index, in the second
factor as sorted in 2.
If a factor is either of type AntiSymmetricTensor or SqOperator, the index
position in items 3 and 4 is indicated as 'upper' or 'lower' only.
(Creation operators are considered upper and annihilation operators lower.)
If the masked factors are identical, the two factors cannot be ordered
unambiguously in item 2. In this case, items 3, 4 are left out. If several
indices are contracted between the unorderable factors, it will be handled by
_determine_ambiguous()
"""
# setup dicts to avoid repeated calculations in key()
args = Mul.make_args(mul)
fac_dum = { fac: fac.atoms(Dummy) for fac in args }
fac_repr = { fac: __kprint(fac) for fac in args }
all_dums = set().union(*fac_dum.values())
mask = {}
for d in all_dums:
if d.assumptions0.get('below_fermi'):
mask[d] = '0'
elif d.assumptions0.get('above_fermi'):
mask[d] = '1'
else:
mask[d] = '2'
dum_repr = {d: __kprint(d) for d in all_dums}
def _key(d):
dumstruct = [ fac for fac in fac_dum if d in fac_dum[fac] ]
other_dums = set().union(*[fac_dum[fac] for fac in dumstruct])
fac = dumstruct[-1]
if other_dums is fac_dum[fac]:
other_dums = fac_dum[fac].copy()
other_dums.remove(d)
masked_facs = [ fac_repr[fac] for fac in dumstruct ]
for d2 in other_dums:
masked_facs = [ fac.replace(dum_repr[d2], mask[d2])
for fac in masked_facs ]
all_masked = [ fac.replace(dum_repr[d], mask[d])
for fac in masked_facs ]
masked_facs = dict(list(zip(dumstruct, masked_facs)))
# dummies for which the ordering cannot be determined
if has_dups(all_masked):
all_masked.sort()
return mask[d], tuple(all_masked) # positions are ambiguous
# sort factors according to fully masked strings
keydict = dict(list(zip(dumstruct, all_masked)))
dumstruct.sort(key=lambda x: keydict[x])
all_masked.sort()
pos_val = []
for fac in dumstruct:
if isinstance(fac, AntiSymmetricTensor):
if d in fac.upper:
pos_val.append('u')
if d in fac.lower:
pos_val.append('l')
elif isinstance(fac, Creator):
pos_val.append('u')
elif isinstance(fac, Annihilator):
pos_val.append('l')
elif isinstance(fac, NO):
ops = [ op for op in fac if op.has(d) ]
for op in ops:
if isinstance(op, Creator):
pos_val.append('u')
else:
pos_val.append('l')
else:
# fallback to position in string representation
facpos = -1
while 1:
facpos = masked_facs[fac].find(dum_repr[d], facpos + 1)
if facpos == -1:
break
pos_val.append(facpos)
return (mask[d], tuple(all_masked), pos_val[0], pos_val[-1])
dumkey = dict(list(zip(all_dums, list(map(_key, all_dums)))))
result = sorted(all_dums, key=lambda x: dumkey[x])
if has_dups(iter(dumkey.values())):
# We have ambiguities
unordered = defaultdict(set)
for d, k in dumkey.items():
unordered[k].add(d)
for k in [ k for k in unordered if len(unordered[k]) < 2 ]:
del unordered[k]
unordered = [ unordered[k] for k in sorted(unordered) ]
result = _determine_ambiguous(mul, result, unordered)
return result
def _determine_ambiguous(term, ordered, ambiguous_groups):
# We encountered a term for which the dummy substitution is ambiguous.
# This happens for terms with 2 or more contractions between factors that
# cannot be uniquely ordered independent of summation indices. For
# example:
#
# Sum(p, q) v^{p, .}_{q, .}v^{q, .}_{p, .}
#
# Assuming that the indices represented by . are dummies with the
# same range, the factors cannot be ordered, and there is no
# way to determine a consistent ordering of p and q.
#
# The strategy employed here, is to relabel all unambiguous dummies with
# non-dummy symbols and call _get_ordered_dummies again. This procedure is
# applied to the entire term so there is a possibility that
# _determine_ambiguous() is called again from a deeper recursion level.
# break recursion if there are no ordered dummies
all_ambiguous = set()
for dummies in ambiguous_groups:
all_ambiguous |= dummies
all_ordered = set(ordered) - all_ambiguous
if not all_ordered:
# FIXME: If we arrive here, there are no ordered dummies. A method to
# handle this needs to be implemented. In order to return something
# useful nevertheless, we choose arbitrarily the first dummy and
# determine the rest from this one. This method is dependent on the
# actual dummy labels which violates an assumption for the
# canonicalization procedure. A better implementation is needed.
group = [ d for d in ordered if d in ambiguous_groups[0] ]
d = group[0]
all_ordered.add(d)
ambiguous_groups[0].remove(d)
stored_counter = _symbol_factory._counter
subslist = []
for d in [ d for d in ordered if d in all_ordered ]:
nondum = _symbol_factory._next()
subslist.append((d, nondum))
newterm = term.subs(subslist)
neworder = _get_ordered_dummies(newterm)
_symbol_factory._set_counter(stored_counter)
# update ordered list with new information
for group in ambiguous_groups:
ordered_group = [ d for d in neworder if d in group ]
ordered_group.reverse()
result = []
for d in ordered:
if d in group:
result.append(ordered_group.pop())
else:
result.append(d)
ordered = result
return ordered
| KeyPrinter |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 29483,
"end": 30398
} | class ____(AssetSelection):
"""Superclass for classes like `AndAssetSelection` and `OrAssetSelection` that operate on
a list of sub-AssetSelections.
"""
operands: Sequence[AssetSelection]
def to_serializable_asset_selection(self, asset_graph: BaseAssetGraph) -> "AssetSelection":
return copy(
self,
operands=[
operand.to_serializable_asset_selection(asset_graph) for operand in self.operands
],
)
def __eq__(self, other):
if not isinstance(other, OperandListAssetSelection):
return False
num_operands = len(self.operands)
return len(other.operands) == num_operands and all(
self.operands[i] == other.operands[i] for i in range(num_operands)
)
def needs_parentheses_when_operand(self) -> bool:
return True
@whitelist_for_serdes
| OperandListAssetSelection |
python | conda__conda | conda/env/specs/requirements.py | {
"start": 580,
"end": 5123
} | class ____(EnvironmentSpecBase):
"""
Reads dependencies from requirements files (including explicit files)
and returns an Environment object from it.
"""
msg: str | None = None
extensions: ClassVar[set[str]] = {".txt"}
@deprecated.argument("24.7", "26.3", "name")
def __init__(
self, filename: str | None = None, name: str | None = None, **kwargs
) -> None:
"""Initialize the requirements specification.
:param filename: Path to the requirements file
:param name: (Deprecated) Name of the environment
:param kwargs: Additional arguments
"""
self.filename = filename
self._name = name
@property
@deprecated("25.9", "26.3", addendum="This attribute is not used anymore.")
def name(self): # type: ignore[misc]
return self._name
@name.setter # type: ignore[misc]
@deprecated("25.9", "26.3", addendum="This attribute is not used anymore.")
def name(self, value): # type: ignore[misc]
self._name = value
@deprecated("25.9", "26.3", addendum="This method is not used anymore.")
def _valid_file(self) -> bool:
"""Check if the file exists.
:return: True if the file exists, False otherwise
"""
if self.filename and os.path.exists(self.filename):
return True
else:
self.msg = "There is no requirements.txt"
return False
@deprecated("25.9", "26.3", addendum="This method is not used anymore.")
def _valid_name(self) -> bool:
"""Check if the name is valid.
:return: True if the name is valid, False otherwise
"""
if self.name is None:
self.msg = "The environment does not have a name"
return False
else:
return True
def can_handle(self) -> bool:
"""
Validates that this spec can process the environment definition.
This checks if:
* a filename was provided
* the file has a supported extension
* the file exists
* the file content is valid for this specifier type
:return: True if the file can be handled, False otherwise
"""
# Return early if no filename was provided
if self.filename is None:
return False
# Extract the file extension (e.g., '.txt' or '' if no extension)
_, file_ext = os.path.splitext(self.filename)
# Check if the file has a supported extension
if not any(spec_ext == file_ext for spec_ext in self.extensions):
self.msg = f"File {self.filename} does not have a supported extension: {', '.join(self.extensions)}"
return False
# Ensure this is not an explicit file. Requirements.txt and explicit files
# may sometimes share file extension.
dependencies_list = list(yield_lines(self.filename))
if "@EXPLICIT" in dependencies_list:
return False
return True
@property
@deprecated("26.3", "26.9", addendum="This method is not used anymore, use 'env'")
def environment(self) -> EnvironmentYaml:
"""
Build an environment from the requirements file.
This method reads the file as a generator and passes it directly to EnvironmentYaml.
:return: An Environment object containing the package specifications
:raises ValueError: If the file cannot be read
"""
if not self.filename:
raise CondaValueError("No filename provided")
# Convert generator to list since Dependencies needs to access it multiple times
dependencies_list = list(yield_lines(self.filename))
return EnvironmentYaml(
dependencies=dependencies_list,
filename=self.filename,
)
@property
def env(self) -> Environment:
"""
Build an environment from the requirements file.
:return: An Environment object containing the package specifications
:raises ValueError: If the file cannot be read
"""
if not self.filename:
raise ValueError("No filename provided")
# Convert generator to list since Dependencies needs to access it multiple times
dependencies_list = list(yield_lines(self.filename))
requested_packages = [MatchSpec(dep) for dep in dependencies_list]
return Environment(
platform=context.subdir,
requested_packages=requested_packages,
)
| RequirementsSpec |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataform.py | {
"start": 4153,
"end": 5045
} | class ____:
@mock.patch(HOOK_STR)
@mock.patch(COMPILATION_RESULT_STR)
def test_execute(self, compilation_result_mock, hook_mock):
op = DataformGetCompilationResultOperator(
task_id="get_compilation_result",
project_id=PROJECT_ID,
region=REGION,
repository_id=REPOSITORY_ID,
compilation_result_id=COMPILATION_RESULT_ID,
)
compilation_result_mock.return_value.to_dict.return_value = None
op.execute(context=mock.MagicMock())
hook_mock.return_value.get_compilation_result.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
repository_id=REPOSITORY_ID,
compilation_result_id=COMPILATION_RESULT_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataformGetCompilationResultOperator |
python | streamlit__streamlit | lib/tests/streamlit/elements/image_test.py | {
"start": 4302,
"end": 22157
} | class ____(DeltaGeneratorTestCase):
"""Test streamlit.image."""
@parameterized.expand(
[
(IMAGES["img_32_32_3_rgb"]["np"], "png"),
(IMAGES["img_32_32_3_bgr"]["np"], "png"),
(IMAGES["img_64_64_rgb"]["np"], "jpeg"),
(IMAGES["img_32_32_3_rgba"]["np"], "jpeg"),
(IMAGES["gif_64_64"]["gif"], "gif"),
]
)
def test_marshall_images(self, data_in: AtomicImage, format: str):
"""Test streamlit.elements.lib.image_utils.marshall_images.
Need to test the following:
- if list
- if not list (is rgb vs is bgr)
- if captions is not list but image is
- if captions length doesn't match image length
- if the caption is set.
- PIL Images
- Numpy Arrays
- Url
- Path
- Bytes
"""
mimetype = f"image/{format}"
if isinstance(data_in, bytes):
file_id = _calculate_file_id(data_in, mimetype=mimetype)
else:
file_id = _calculate_file_id(
_np_array_to_bytes(data_in, output_format=format),
mimetype=mimetype,
)
st.image(data_in, output_format=format)
imglist = self.get_delta_from_queue().new_element.imgs
assert len(imglist.imgs) == 1
assert imglist.imgs[0].url.startswith(MEDIA_ENDPOINT)
assert imglist.imgs[0].url.endswith(get_extension_for_mimetype(mimetype))
assert file_id in imglist.imgs[0].url
@parameterized.expand(
[
(IMAGES["img_32_32_3_rgb"]["np"], ".jpg"),
(IMAGES["img_32_32_3_bgr"]["np"], ".jpg"),
(IMAGES["img_64_64_rgb"]["np"], ".jpg"),
(IMAGES["img_32_32_3_rgba"]["np"], ".png"),
(IMAGES["img_32_32_3_rgb"]["pil"], ".jpg"),
(IMAGES["img_32_32_3_bgr"]["pil"], ".jpg"),
(IMAGES["img_64_64_rgb"]["pil"], ".jpg"),
(IMAGES["img_32_32_3_rgba"]["pil"], ".png"),
(IMAGES["gif_64_64"]["gif"], ".gif"),
]
)
def test_marshall_images_with_auto_output_format(
self, data_in: AtomicImage, expected_extension: str
):
"""Test streamlit.elements.lib.image_utils.marshall_images.
with auto output_format
"""
st.image(data_in, output_format="auto")
imglist = self.get_delta_from_queue().new_element.imgs
assert len(imglist.imgs) == 1
assert imglist.imgs[0].url.endswith(expected_extension)
@parameterized.expand(
[
(IMAGES["img_32_32_3_rgb"]["np"], "/media/"),
(IMAGES["gif_64_64"]["gif"], "/media/"),
("https://streamlit.io/test.png", "https://streamlit.io/test.png"),
("https://streamlit.io/test.svg", "https://streamlit.io/test.svg"),
(
"<svg fake></svg>",
"data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciICBmYWtlPjwvc3ZnPg==",
),
]
)
def test_image_to_url_prefix(self, img, expected_prefix):
url = image_to_url(
img,
layout_config=LayoutConfig(width="stretch"),
clamp=False,
channels="RGB",
output_format="JPEG",
image_id="blah",
)
assert url.startswith(expected_prefix)
@parameterized.expand(
[
(IMAGES["img_32_32_3_rgb"]["np"], ".jpg"),
(IMAGES["gif_64_64"]["gif"], ".gif"),
("https://streamlit.io/test.png", ".png"),
("https://streamlit.io/test.svg", ".svg"),
]
)
def test_image_to_url_suffix(self, img, expected_suffix):
url = image_to_url(
img,
layout_config=LayoutConfig(width="stretch"),
clamp=False,
channels="RGB",
output_format="auto",
image_id="blah",
)
assert url.endswith(expected_suffix)
@parameterized.expand(
[
("foo.png", "image/png", False),
(Path("foo.png"), "image/png", False),
("path/to/foo.jpg", "image/jpeg", False),
(Path("path/to/foo.jpg"), "image/jpeg", False),
("path/to/foo.gif", "image/gif", False),
(Path("path/to/foo.gif"), "image/gif", False),
("foo.unknown_extension", "application/octet-stream", False),
(Path("foo.unknown_extension"), "application/octet-stream", False),
("foo", "application/octet-stream", False),
(Path("foo"), "application/octet-stream", False),
("https://foo.png", "image/png", True),
("https://foo.gif", "image/gif", True),
]
)
def test_image_to_url_adds_filenames_to_media_file_mgr(
self, input_string: str | Path, expected_mimetype: str, is_url: bool
):
"""if `image_to_url` is unable to open an image passed by name, it
still passes the filename to MediaFileManager. (MediaFileManager may have a
storage backend that's able to open the file, so it's up to the manager -
and not image_to_url - to throw an error.)
"""
# Mock out save_media_data to avoid polluting the cache for later tests
with (
mock.patch(
"streamlit.runtime.media_file_manager.MediaFileManager.add"
) as mock_mfm_add,
mock.patch("streamlit.runtime.caching.save_media_data"),
):
mock_mfm_add.return_value = "https://mockoutputurl.com"
result = image_to_url(
input_string,
layout_config=LayoutConfig(width="stretch"),
clamp=False,
channels="RGB",
output_format="auto",
image_id="mock_image_id",
)
if is_url:
# URLs should be returned as-is, and should not result in a call to
# MediaFileManager.add
assert input_string == result
mock_mfm_add.assert_not_called()
else:
# Other strings and Path objects should be passed to MediaFileManager.add
assert result == "https://mockoutputurl.com"
expected_input = (
str(input_string)
if isinstance(input_string, Path)
else input_string
)
mock_mfm_add.assert_called_once_with(
expected_input, expected_mimetype, "mock_image_id"
)
@parameterized.expand(
[
(
"<svg fake></svg>",
"data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciICBmYWtlPjwvc3ZnPg==",
),
(
"<svg\nfake></svg>",
"data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIApmYWtlPjwvc3ZnPg==",
),
(
"\n<svg fake></svg>",
"data:image/svg+xml;base64,CjxzdmcgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiAgZmFrZT48L3N2Zz4=",
),
(
'<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n<!-- Created with Inkscape (http://www.inkscape.org/) -->\n\n<svg\n fake></svg>',
"data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiIHN0YW5kYWxvbmU9Im5vIj8+CjwhLS0gQ3JlYXRlZCB3aXRoIElua3NjYXBlIChodHRwOi8vd3d3Lmlua3NjYXBlLm9yZy8pIC0tPgoKPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIAogZmFrZT48L3N2Zz4=",
),
(
'<?xml version="1.0" encoding="utf-8"?><!-- Generator: Adobe Illustrator 17.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) --><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><svg fake></svg>',
"data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz48IS0tIEdlbmVyYXRvcjogQWRvYmUgSWxsdXN0cmF0b3IgMTcuMS4wLCBTVkcgRXhwb3J0IFBsdWctSW4gLiBTVkcgVmVyc2lvbjogNi4wMCBCdWlsZCAwKSAgLS0+PCFET0NUWVBFIHN2ZyBQVUJMSUMgIi0vL1czQy8vRFREIFNWRyAxLjEvL0VOIiAiaHR0cDovL3d3dy53My5vcmcvR3JhcGhpY3MvU1ZHLzEuMS9EVEQvc3ZnMTEuZHRkIj48c3ZnIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgIGZha2U+PC9zdmc+",
),
(
'\n<?xml version="1.0" encoding="utf-8"?>\n<!-- Generator: Adobe Illustrator 17.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->\n<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n<svg fake></svg>',
"data:image/svg+xml;base64,Cjw/eG1sIHZlcnNpb249IjEuMCIgZW5jb2Rpbmc9InV0Zi04Ij8+CjwhLS0gR2VuZXJhdG9yOiBBZG9iZSBJbGx1c3RyYXRvciAxNy4xLjAsIFNWRyBFeHBvcnQgUGx1Zy1JbiAuIFNWRyBWZXJzaW9uOiA2LjAwIEJ1aWxkIDApICAtLT4KPCFET0NUWVBFIHN2ZyBQVUJMSUMgIi0vL1czQy8vRFREIFNWRyAxLjEvL0VOIiAiaHR0cDovL3d3dy53My5vcmcvR3JhcGhpY3MvU1ZHLzEuMS9EVEQvc3ZnMTEuZHRkIj4KPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciICBmYWtlPjwvc3ZnPg==",
),
]
)
def test_marshall_svg(self, image_markup: str, expected_prefix: str):
image_list_proto = ImageListProto()
marshall_images(
None,
image_markup,
None,
0,
image_list_proto,
False,
)
img = image_list_proto.imgs[0]
assert img.url.startswith(expected_prefix)
def test_BytesIO_to_bytes(self):
"""Test streamlit.image.BytesIO_to_bytes."""
pass
def test_verify_np_shape(self):
"""Test streamlit.image.verify_np_shape.
Need to test the following:
- check shape not (2, 3)
- check shape 3 but dims 1, 3, 4
- if only one channel convert to just 2 dimensions.
"""
with pytest.raises(StreamlitAPIException) as shape_exc:
st.image(np.ndarray(shape=1))
assert str(shape_exc.value) == "Numpy shape has to be of length 2 or 3."
with pytest.raises(StreamlitAPIException) as shape2_exc:
st.image(np.ndarray(shape=(1, 2, 2)))
assert (
str(shape2_exc.value)
== "Channel can only be 1, 3, or 4 got 2. Shape is (1, 2, 2)"
)
def test_clip_image(self):
"""Test streamlit.image.clip_image.
Need to test the following:
- float
- int
- float with clipping
- int with clipping
"""
pass
@parameterized.expand([("P", True), ("RGBA", True), ("LA", True), ("RGB", False)])
def test_image_may_have_alpha_channel(self, format: str, expected_alpha: bool):
img = Image.new(format, (1, 1))
assert _image_may_have_alpha_channel(img) == expected_alpha
def test_st_image_PIL_image(self):
"""Test st.image with PIL image."""
img = Image.new("RGB", (64, 64), color="red")
st.image(img, caption="some caption", width=100, output_format="PNG")
el = self.get_delta_from_queue().new_element
assert el.width_config.pixel_width == 100
assert el.imgs.imgs[0].caption == "some caption"
# locate resultant file in the file manager and check its metadata.
file_id = _calculate_file_id(_pil_to_bytes(img, format="PNG"), "image/png")
media_file = self.media_file_storage.get_file(file_id)
assert media_file is not None
assert media_file.mimetype == "image/png"
assert self.media_file_storage.get_url(file_id) == el.imgs.imgs[0].url
def test_st_image_PIL_array(self):
"""Test st.image with a PIL array."""
imgs = [
Image.new("RGB", (64, 64), color="red"),
Image.new("RGB", (64, 64), color="blue"),
Image.new("RGB", (64, 64), color="green"),
]
st.image(
imgs,
caption=["some caption"] * 3,
width=200,
use_column_width=True,
clamp=True,
output_format="PNG",
)
el = self.get_delta_from_queue().new_element
assert el.width_config.use_stretch
# locate resultant file in the file manager and check its metadata.
for idx in range(len(imgs)):
file_id = _calculate_file_id(
_pil_to_bytes(imgs[idx], format="PNG"), "image/png"
)
assert el.imgs.imgs[idx].caption == "some caption"
media_file = self.media_file_storage.get_file(file_id)
assert media_file is not None
assert media_file.mimetype == "image/png"
assert self.media_file_storage.get_url(file_id) == el.imgs.imgs[idx].url
def test_st_image_with_single_url(self):
"""Test st.image with single url."""
url = "http://server/fake0.jpg"
st.image(url, caption="some caption", width=300)
el = self.get_delta_from_queue().new_element
assert el.width_config.pixel_width == 300
assert el.imgs.imgs[0].caption == "some caption"
assert el.imgs.imgs[0].url == url
def test_st_image_with_list_of_urls(self):
"""Test st.image with list of urls."""
urls = [
"http://server/fake0.jpg",
"http://server/fake1.png",
"http://server/fake2.gif",
]
st.image(urls, caption=["some caption"] * 3, width=300)
el = self.get_delta_from_queue().new_element
assert el.width_config.pixel_width == 300
for idx, url in enumerate(urls):
assert el.imgs.imgs[idx].caption == "some caption"
assert el.imgs.imgs[idx].url == url
def test_st_image_bad_width(self):
"""Test st.image with bad width."""
st.image(
Image.new("RGB", (64, 64), color="red"),
use_column_width=False,
width=-1234,
)
el = self.get_delta_from_queue().new_element
assert el.width_config.use_content
def test_st_image_default_width(self):
"""Test st.image without specifying a use_container_width."""
img = Image.new("RGB", (64, 64), color="red")
st.image(img)
el = self.get_delta_from_queue().new_element
assert el.width_config.use_content
def test_st_image_use_container_width_true(self):
"""Test st.image with use_container_width=True."""
img = Image.new("RGB", (64, 64), color="red")
st.image(img, use_container_width=True)
el = self.get_delta_from_queue().new_element
assert el.width_config.use_stretch
def test_st_image_use_container_width_false(self):
"""Test st.image with use_container_width=False."""
img = Image.new("RGB", (64, 64), color="red")
st.image(img, use_container_width=False)
el = self.get_delta_from_queue().new_element
assert el.width_config.use_content
def test_st_image_use_container_width_true_and_given_width(self):
"""Test st.image with use_container_width=True and a given width."""
img = Image.new("RGB", (64, 64), color="red")
st.image(img, width=100, use_container_width=True)
el = self.get_delta_from_queue().new_element
assert el.width_config.use_stretch
def test_st_image_use_container_width_false_and_given_width(self):
"""Test st.image with use_container_width=False and a given width."""
img = Image.new("RGB", (64, 64), color="red")
st.image(img, width=100, use_container_width=False)
el = self.get_delta_from_queue().new_element
assert el.width_config.pixel_width == 100
def test_st_image_use_container_width_and_use_column_width(self):
"""Test st.image with use_container_width and use_column_width."""
img = Image.new("RGB", (64, 64), color="red")
with pytest.raises(StreamlitAPIException) as e:
st.image(img, use_container_width=True, use_column_width=True)
assert (
"`use_container_width` and `use_column_width` cannot be set at the same time."
in str(e.value)
)
def test_st_image_width_stretch(self):
"""Test st.image with width='stretch'."""
img = Image.new("RGB", (64, 64), color="red")
st.image(img, width="stretch")
el = self.get_delta_from_queue().new_element
assert el.width_config.use_stretch
def test_st_image_width_content(self):
"""Test st.image with width='content'."""
img = Image.new("RGB", (64, 64), color="red")
st.image(img, width="content")
el = self.get_delta_from_queue().new_element
assert el.width_config.use_content
@parameterized.expand(
[
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
"",
"Invalid width value: ''. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-1,
"Invalid width value: -1. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
None,
"Invalid width value: None. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
)
def test_st_image_invalid_width(self, invalid_width, expected_error_message):
"""Test st.image with invalid width values."""
img = Image.new("RGB", (64, 64), color="red")
with pytest.raises(StreamlitAPIException) as exc_info:
st.image(img, width=invalid_width)
assert str(exc_info.value) == expected_error_message
| ImageProtoTest |
python | tensorflow__tensorflow | tensorflow/python/keras/legacy_tf_layers/core.py | {
"start": 1001,
"end": 8321
} | class ____(keras_layers.Dense, base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.compat.v1.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
_reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Args:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.compat.v1.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor the same shape as `inputs` except the last dimension is of
size `units`.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.dense` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.Dense` instead.')
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
| Dense |
python | huggingface__transformers | src/transformers/models/llama4/image_processing_llama4_fast.py | {
"start": 11303,
"end": 17180
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = [0.5, 0.5, 0.5]
image_std = [0.5, 0.5, 0.5]
size = {"height": 336, "width": 336}
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
max_patches = 16
resize_to_max_canvas = False
valid_kwargs = Llama4ImageProcessorKwargs
def __init__(self, **kwargs: Unpack[Llama4ImageProcessorKwargs]):
super().__init__(**kwargs)
# Disable compilation here as conversion to bfloat16 causes differences in the output of the compiled and non-compiled versions
@torch.compiler.disable
def rescale_and_normalize(
self,
images: "torch.Tensor",
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Union[float, list[float]],
image_std: Union[float, list[float]],
) -> "torch.Tensor":
"""
Rescale and normalize images.
Override to rescale and normalize the images in torch.bfloat16 as in the original implementation
"""
if do_rescale and do_normalize:
images = images.to(dtype=torch.bfloat16) * rescale_factor
images = self.normalize(images, image_mean, image_std)
elif do_rescale:
images = images * rescale_factor
elif do_normalize:
images = self.normalize(images, image_mean, image_std)
return images
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[Llama4ImageProcessorKwargs]) -> BatchFeature:
return super().preprocess(images, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
size: SizeDict,
max_patches: int,
resize_to_max_canvas: bool,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
possible_resolutions = find_supported_resolutions(max_num_chunks=max_patches, patch_size=size)
possible_resolutions = torch.tensor(possible_resolutions, device=images[0].device)
# process images by batch, grouped by shape
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
grouped_processed_images = {}
grouped_aspect_ratios = {}
for shape, stacked_images in grouped_images.items():
image_size = stacked_images.shape[-2:]
target_size = get_best_fit(image_size, possible_resolutions, resize_to_max_canvas=resize_to_max_canvas)
# If target_size requires upscaling, we might want to limit the upscaling to max_upscaling_size
max_upscaling_size = None if resize_to_max_canvas else size.height
if max_upscaling_size is not None:
new_target_height = min(max(image_size[0], max_upscaling_size), target_size[0])
new_target_width = min(max(image_size[1], max_upscaling_size), target_size[1])
target_size_without_distortion = (new_target_height, new_target_width)
# resize to target_size while preserving aspect ratio
new_size_without_distortion = get_max_res_without_distortion(image_size, target_size_without_distortion)
new_size_without_distortion = SizeDict(
height=max(new_size_without_distortion[0], 1), width=max(new_size_without_distortion[1], 1)
)
processed_images = self.resize(
stacked_images,
new_size_without_distortion,
interpolation=interpolation,
)
# pad to target_size to be able to split into tiles
processed_images = pad_to_best_fit(processed_images, target_size)
processed_images = self.rescale_and_normalize(
processed_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
ratio_h, ratio_w = (
target_size[0] // size.height,
target_size[1] // size.width,
)
# split into tiles
processed_images = split_to_tiles(processed_images, ratio_h, ratio_w)
grouped_processed_images[shape] = processed_images
grouped_aspect_ratios[shape] = torch.tensor(
[[ratio_h, ratio_w]] * stacked_images.shape[0], device=images[0].device
)
# add a global tile to the processed tile if there are more than one tile
if ratio_h * ratio_w > 1:
global_tiles = self.resize(
stacked_images,
size,
interpolation=interpolation,
)
global_tiles = self.rescale_and_normalize(
global_tiles, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
grouped_processed_images[shape] = torch.cat([processed_images, global_tiles.unsqueeze(1)], dim=1)
processed_images = reorder_images(grouped_processed_images, grouped_images_index)
aspect_ratios_list = reorder_images(grouped_aspect_ratios, grouped_images_index)
processed_images = torch.cat(processed_images, dim=0) if return_tensors else processed_images
aspect_ratios = torch.stack(aspect_ratios_list, dim=0) if return_tensors else aspect_ratios_list
return BatchFeature(
data={"pixel_values": processed_images, "aspect_ratios": aspect_ratios}, tensor_type=return_tensors
)
__all__ = ["Llama4ImageProcessorFast"]
| Llama4ImageProcessorFast |
python | getsentry__sentry | src/sentry/models/team.py | {
"start": 3843,
"end": 3971
} | class ____:
ACTIVE = 0
PENDING_DELETION = 1
DELETION_IN_PROGRESS = 2
@snowflake_id_model
@region_silo_model
| TeamStatus |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 90171,
"end": 90831
} | class ____(INT8MMTemplateConfigMixin, CPUConfigHeuristic):
"""Int8 MM template heuristic for CPU"""
def __init__(self) -> None:
super().__init__()
# Override mm_configs to use int8_mm_configs
self.mm_configs = self.int8_mm_configs
# NOTE: overriding exhaustive configs here to be the same as mm_configs
# as we haven't validated exhaustive support here yet
# TODO(coconutruben): remove this once we have validated exhaustive support
# for scaled_mm
self.exhaustive_configs = self.int8_mm_configs
@register_template_heuristic(mm_plus_mm_template.uid, "cpu")
| CPUInt8MMTemplateConfigHeuristic |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 2344,
"end": 5771
} | class ____(Lexer):
"""
Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
lexer.
Just highlights ruby code between the preprocessor directives, other data
is left untouched by the lexer.
All options are also forwarded to the `RubyLexer`.
"""
name = 'ERB'
aliases = ['erb']
mimetypes = ['application/x-ruby-templating']
_block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
def __init__(self, **options):
from pygments.lexers.ruby import RubyLexer
self.ruby_lexer = RubyLexer(**options)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return
def analyse_text(text):
if '<%' in text and '%>' in text:
return 0.4
| ErbLexer |
python | numba__numba | numba/np/ufunc/wrappers.py | {
"start": 26649,
"end": 27203
} | class ____(_ArrayArgLoader):
"""
Handle GUFunc argument loading where the shape signature specifies
a scalar "()" but a 1D array is used for the type of the core function.
"""
def _shape_and_strides(self, context, builder):
# Set shape and strides for a 1D size 1 array
one = context.get_constant(types.intp, 1)
zero = context.get_constant(types.intp, 0)
shape = cgutils.pack_array(builder, [one])
strides = cgutils.pack_array(builder, [zero])
return shape, strides
| _ArrayAsScalarArgLoader |
python | huggingface__transformers | src/transformers/models/idefics3/processing_idefics3.py | {
"start": 3313,
"end": 18450
} | class ____(ProcessorMixin):
r"""
Constructs a Idefics3 processor which wraps a LLama tokenizer and Idefics3 image processor into a single processor.
[`Idefics3Processor`] offers all the functionalities of [`Idefics3ImageProcessor`] and [`Idefics3TokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`Idefics3ImageProcessor`):
An instance of [`Idefics3ImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
image_seq_len (`int`, *optional*, defaults to 169):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
This parameter is used to build the string from the input prompt and image tokens and should match the
value the model used. It is computed as: image_seq_len = int(((image_size // patch_size) ** 2) / (scale_factor**2))
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(
self, image_processor, tokenizer=None, image_seq_len: int = 169, chat_template: Optional[str] = None, **kwargs
):
self.fake_image_token = AddedToken("<fake_token_around_image>", normalized=False, special=True).content
self.image_token = AddedToken("<image>", normalized=False, special=True).content
self.end_of_utterance_token = AddedToken("<end_of_utterance>", normalized=False, special=True).content
self.global_image_tag = "<global-img>" # https://github.com/huggingface/transformers/pull/32473/files/8063e5e17362571b693f1db95167f5443a3be1b2#r1734825341
self.image_seq_len = image_seq_len
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.fake_image_token_id = tokenizer.convert_tokens_to_ids(self.fake_image_token)
self.global_image_token_id = tokenizer.convert_tokens_to_ids(self.global_image_tag)
self.row_col_ids = [
tokenizer.convert_tokens_to_ids(f"<row_{i + 1}_col_{j + 1}>") for i in range(6) for j in range(6)
]
# This regex matches one or more occurrences of <global-img> tags (optionally surrounded by newline characters)
# or <row_x_col_y> tags (where x and y are digits, also optionally surrounded by newline characters).
self._regex_to_remove_extra_special_tokens = re.compile(r"(\n?<global-img>\n?|<row_\d+_col_\d+>\n?)+")
tokens_to_add = {
"additional_special_tokens": [
self.fake_image_token,
self.image_token,
self.end_of_utterance_token,
]
}
tokenizer.add_special_tokens(tokens_to_add)
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs)
def _extract_images_from_prompts(self, prompts):
prompt_images = []
for prompt in prompts:
images = []
for elem in prompt:
if is_valid_image(elem):
images.append(elem)
elif is_url(elem):
images.append(load_image(elem))
prompt_images.append(images)
return prompt_images
def __call__(
self,
images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]] = None,
text: Union[TextInput, "PreTokenizedInput", list[TextInput], list["PreTokenizedInput"]] = None,
image_seq_len: Optional[int] = None,
**kwargs: Unpack[Idefics3ProcessorKwargs],
) -> BatchEncoding:
"""
Processes the input prompts and returns a BatchEncoding.
Example:
```python
>>> import requests
>>> from transformers import Idefics3Processor
>>> from transformers.image_utils import load_image
>>> processor = Idefics3Processor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]
>>> text = [
... "<image>In this image, we see",
... "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<|begin_of_text|><fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image> In this image, we see', '<|reserved_special_token_0|><|reserved_special_token_0|><|reserved_special_token_0|><|begin_of_text|>bla bla bla<fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image>']
```
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Wherever an image token, `<image>` is encountered it is expanded to
`<fake_token_around_image>` + `<row_x_col_y>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
image_seq_len (`int`, *optional*):
The length of the image sequence. If not provided, the default value of self.image_seq_len is used.
image_seq_len should be equal to int(((image_size // patch_size) ** 2) / (scale_factor**2))
return_tensors (`Union[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
"""
if text is None and images is None:
raise ValueError("You must provide either `text` or `images`.")
output_kwargs = self._merge_kwargs(
Idefics3ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
n_images_in_text = []
n_images_in_images = []
inputs = {}
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
n_images_in_text = [sample.count(self.image_token) for sample in text]
if images is not None:
if is_image_or_image_url(images):
images = [[images]]
elif isinstance(images, (list, tuple)) and is_image_or_image_url(images[0]):
if text is not None:
if sum(n_images_in_text) != len(images):
raise ValueError(
f"The total number of {self.image_token} tokens in the prompts should be the same as the number of images passed."
f" Found {sum(n_images_in_text)} {self.image_token} tokens and {len(images)} images."
)
# Reorganize the images to match the prompts
cumsum_images_in_text = [0] + list(accumulate(n_images_in_text))
images = [
images[cumsum_images_in_text[i] : cumsum_images_in_text[i + 1]]
for i in range(len(n_images_in_text))
]
else:
images = [images]
elif (
not isinstance(images, (list, tuple))
and not isinstance(images[0], (list, tuple))
and not is_image_or_image_url(images[0][0])
):
raise ValueError(
"Invalid input images. Please provide a single image or a list of images or a list of list of images."
)
n_images_in_images = [len(sample) for sample in images]
# Load images if they are URLs
images = [[load_image(im) if is_url(im) else im for im in sample] for sample in images]
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
inputs.update(image_inputs)
if text is not None:
if n_images_in_images != n_images_in_text:
raise ValueError(
f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
)
image_rows = inputs.pop("rows", [[0] * n_images for n_images in n_images_in_text])
image_cols = inputs.pop("cols", [[0] * n_images for n_images in n_images_in_text])
fake_image_token = self.fake_image_token
image_token = self.image_token
global_img_token = self.global_image_tag
prompt_strings = []
batch_image_seq_lengths = []
for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols):
# Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
image_prompt_strings = []
image_seq_lengths = []
for n_rows, n_cols in zip(sample_rows, sample_cols):
image_prompt_string = get_image_prompt_string(
n_rows,
n_cols,
image_seq_len,
image_token=image_token,
fake_token_around_image=fake_image_token,
global_img_token=global_img_token,
)
# Add +2 and +3 for special BOI/EOI/fake_image_wrapper tokens
row_length = (self.image_seq_len + 2) * n_cols + 1
image_seq_lengths.append((self.image_seq_len + 3) + row_length * n_rows)
image_prompt_strings.append(image_prompt_string)
batch_image_seq_lengths.append(image_seq_lengths)
split_sample = sample.split(image_token)
if len(split_sample) == 0:
raise ValueError("The image token should be present in the text.")
# Place in the image prompt strings where the image tokens are
sample = split_sample[0]
for i, image_prompt_string in enumerate(image_prompt_strings):
sample += image_prompt_string + split_sample[i + 1]
prompt_strings.append(sample)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
inputs.update(text_inputs)
elif text is not None:
if any(n_images_in_text):
raise ValueError(
f"Found {sum(n_images_in_text)} {self.image_token} tokens in the text but no images were passed."
)
text_inputs = self.tokenizer(text=text, **output_kwargs["text_kwargs"])
inputs.update(text_inputs)
if return_mm_token_type_ids:
array_ids = np.array(inputs["input_ids"])
mm_token_type_ids = np.zeros_like(array_ids)
for i, seq_lengths in enumerate(batch_image_seq_lengths):
image_start_positions = np.where(array_ids[i] == self.fake_image_token_id)[0]
j = 0
for seq_len in seq_lengths:
if j >= len(image_start_positions):
break
start = image_start_positions[j]
end = start + seq_len
mm_token_type_ids[i, start:end] = 1
j = np.searchsorted(image_start_positions, end)
inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data=inputs, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Idefics3ProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
num_image_row_cols = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
base_image_length = self.image_seq_len + 3
col_length = self.image_seq_len + 2
num_image_tokens = []
num_image_patches = []
for num_patches, num_rows, num_cols in num_image_row_cols:
row_length = col_length * num_cols + 1
num_image_tokens.append(base_image_length + (row_length * num_rows))
num_image_patches.append(num_patches)
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["Idefics3Processor"]
| Idefics3Processor |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.