language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
scipy__scipy
|
scipy/spatial/tests/test_distance.py
|
{
"start": 79562,
"end": 82528
}
|
class ____:
def test_pdist_jaccard_random(self):
eps = 1e-8
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_jaccard_random_float32(self):
eps = 1e-8
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_jaccard_random_nonC(self):
eps = 1e-8
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test2 = wpdist(X, 'test_jaccard')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_djaccard_random(self):
eps = 1e-8
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_djaccard_random_float32(self):
eps = 1e-8
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
assert_allclose(Y_test1, Y_right, rtol=eps)
def test_pdist_djaccard_allzeros(self):
eps = 1e-15
Y = pdist(np.zeros((5, 3)), 'jaccard')
assert_allclose(np.zeros(10), Y, rtol=eps)
def test_pdist_djaccard_random_nonC(self):
eps = 1e-8
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test2 = wpdist(X, 'test_jaccard')
assert_allclose(Y_test2, Y_right, rtol=eps)
def test_pdist_djaccard_allzeros_nonC(self):
eps = 1e-15
Y = pdist(np.zeros((5, 3)), 'test_jaccard')
assert_allclose(np.zeros(10), Y, rtol=eps)
def test_pdist_jaccard_mtica1(self):
m = wjaccard(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wjaccard(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_jaccard_mtica2(self):
m = wjaccard(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wjaccard(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
def test_non_01_input(self):
# Non-0/1 numeric input should be cast to bool before computation.
# See gh-21176.
x = np.array([-10, 2.5, 0]) # [True, True, False]
y = np.array([ 2, -5, 2]) # [True, True, True]
eps = np.finfo(float).eps
assert_allclose(jaccard(x, y), 1/3, rtol=eps)
assert_allclose(cdist([x], [y], 'jaccard'), [[1/3]])
assert_allclose(pdist([x, y], 'jaccard'), [1/3])
|
TestJaccard
|
python
|
huggingface__transformers
|
examples/modular-transformers/modeling_roberta.py
|
{
"start": 24474,
"end": 33715
}
|
class ____(RobertaPreTrainedModel):
_no_split_modules = ["RobertaEmbeddings", "RobertaLayer"]
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
attention_mask, encoder_attention_mask = self._create_attention_masks(
input_shape=input_shape,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
embedding_output=embedding_output,
encoder_hidden_states=encoder_hidden_states,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs.last_hidden_state
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
)
def _create_attention_masks(
self,
input_shape,
attention_mask,
encoder_attention_mask,
embedding_output,
encoder_hidden_states,
cache_position,
past_key_values,
):
if attention_mask is not None and attention_mask.dim() == 2:
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
else:
attention_mask = self._update_full_mask(
attention_mask,
embedding_output,
)
elif attention_mask is not None and attention_mask.dim() == 3:
if "flash" in self.config._attn_implementation or self.config._attn_implementation == "flex_attention":
raise ValueError(
"Passing attention mask with a 3D/4D shape does not work with type "
f"{self.config._attn_implementation} - please use either `sdpa` or `eager` instead."
)
attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if encoder_attention_mask is not None:
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = self._update_cross_attn_mask(
encoder_hidden_states,
encoder_attention_mask,
embedding_output.shape[:2],
embedding_output,
)
else:
if "flash" in self.config._attn_implementation or self.config._attn_implementation == "flex_attention":
raise ValueError(
"Passing attention mask with a 3D/4D shape does not work with type "
f"{self.config._attn_implementation} - please use either `sdpa` or `eager` instead."
)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
return attention_mask, encoder_attention_mask
def _update_full_mask(
self,
attention_mask: Union[torch.Tensor, None],
inputs_embeds: torch.Tensor,
):
if attention_mask is not None:
if "flash" in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == "sdpa":
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_cross_attn_mask(
self,
encoder_hidden_states: Union[torch.Tensor, None],
encoder_attention_mask: Union[torch.Tensor, None],
input_shape: torch.Size,
inputs_embeds: torch.Tensor,
):
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if "flash" in self.config._attn_implementation:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == "sdpa":
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(
encoder_attention_mask,
inputs_embeds.dtype,
tgt_len=input_shape[-1],
)
elif self.config._attn_implementation == "flex_attention":
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(
encoder_attention_mask,
query_length=input_shape[-1],
is_causal=False,
)
else:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _prepare_4d_attention_mask(
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
return encoder_attention_mask
|
RobertaModel
|
python
|
sympy__sympy
|
sympy/functions/combinatorial/factorials.py
|
{
"start": 16377,
"end": 22498
}
|
class ____(CombinatorialFunction):
r"""
Rising factorial (also called Pochhammer symbol [1]_) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by:
.. math:: \texttt{rf(y, k)} = (x)^k = x \cdot (x+1) \cdots (x+k-1)
where `x` can be arbitrary expression and `k` is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit https://mathworld.wolfram.com/RisingFactorial.html page.
When `x` is a `~.Poly` instance of degree $\ge 1$ with a single variable,
`(x)^k = x(y) \cdot x(y+1) \cdots x(y+k-1)`, where `y` is the
variable of `x`. This is as described in [2]_.
Examples
========
>>> from sympy import rf, Poly
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
>>> rf(Poly(x**3, x), 2)
Poly(x**6 + 3*x**5 + 3*x**4 + x**3, x, domain='ZZ')
Rewriting is complicated unless the relationship between
the arguments is known, but rising factorial can
be rewritten in terms of gamma, factorial, binomial,
and falling factorial.
>>> from sympy import Symbol, factorial, ff, binomial, gamma
>>> n = Symbol('n', integer=True, positive=True)
>>> R = rf(n, n + 2)
>>> for i in (rf, ff, factorial, binomial, gamma):
... R.rewrite(i)
...
RisingFactorial(n, n + 2)
FallingFactorial(2*n + 1, n + 2)
factorial(2*n + 1)/factorial(n - 1)
binomial(2*n + 1, n + 2)*factorial(n + 2)
gamma(2*n + 2)/gamma(n)
See Also
========
factorial, factorial2, FallingFactorial
References
==========
.. [1] https://en.wikipedia.org/wiki/Pochhammer_symbol
.. [2] Peter Paule, "Greatest Factorial Factorization and Symbolic
Summation", Journal of Symbolic Computation, vol. 20, pp. 235-268,
1995.
"""
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN or k is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k.is_zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return reduce(lambda r, i:
r*(x.shift(i)),
range(int(k)), 1)
else:
return reduce(lambda r, i: r*(x + i),
range(int(k)), 1)
else:
if x is S.Infinity or x is S.NegativeInfinity:
return S.Infinity
else:
if isinstance(x, Poly):
gens = x.gens
if len(gens)!= 1:
raise ValueError("rf only defined for "
"polynomials on one generator")
else:
return 1/reduce(lambda r, i:
r*(x.shift(-i)),
range(1, abs(int(k)) + 1), 1)
else:
return 1/reduce(lambda r, i:
r*(x - i),
range(1, abs(int(k)) + 1), 1)
if k.is_integer == False:
if x.is_integer and x.is_negative:
return S.Zero
def _eval_rewrite_as_gamma(self, x, k, piecewise=True, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.special.gamma_functions import gamma
if not piecewise:
if (x <= 0) == True:
return S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1)
return gamma(x + k) / gamma(x)
return Piecewise(
(gamma(x + k) / gamma(x), x > 0),
(S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1), True))
def _eval_rewrite_as_FallingFactorial(self, x, k, **kwargs):
return FallingFactorial(x + k - 1, k)
def _eval_rewrite_as_factorial(self, x, k, **kwargs):
from sympy.functions.elementary.piecewise import Piecewise
if x.is_integer and k.is_integer:
return Piecewise(
(factorial(k + x - 1)/factorial(x - 1), x > 0),
(S.NegativeOne**k*factorial(-x)/factorial(-k - x), True))
def _eval_rewrite_as_binomial(self, x, k, **kwargs):
if k.is_integer:
return factorial(k) * binomial(x + k - 1, k)
def _eval_rewrite_as_tractable(self, x, k, limitvar=None, **kwargs):
from sympy.functions.special.gamma_functions import gamma
if limitvar:
k_lim = k.subs(limitvar, S.Infinity)
if k_lim is S.Infinity:
return (gamma(x + k).rewrite('tractable', deep=True) / gamma(x))
elif k_lim is S.NegativeInfinity:
return (S.NegativeOne**k*gamma(1 - x) / gamma(-k - x + 1).rewrite('tractable', deep=True))
return self.rewrite(gamma).rewrite('tractable', deep=True)
def _eval_is_integer(self):
return fuzzy_and((self.args[0].is_integer, self.args[1].is_integer,
self.args[1].is_nonnegative))
|
RisingFactorial
|
python
|
optuna__optuna
|
optuna/_transform.py
|
{
"start": 301,
"end": 11926
}
|
class ____:
"""Transform a search space and parameter configurations to continuous space.
The search space bounds and parameter configurations are represented as ``numpy.ndarray``s and
transformed into continuous space. Bounds and parameters associated with categorical
distributions are one-hot encoded. Parameter configurations in this space can additionally be
untransformed, or mapped back to the original space. This type of
transformation/untransformation is useful for e.g. implementing samplers without having to
condition on distribution types before sampling parameter values.
Args:
search_space:
The search space. If any transformations are to be applied, parameter configurations
are assumed to hold parameter values for all of the distributions defined in this
search space. Otherwise, assertion failures will be raised.
transform_log:
If :obj:`True`, apply log/exp operations to the bounds and parameters with
corresponding distributions in log space during transformation/untransformation.
Should always be :obj:`True` if any parameters are going to be sampled from the
transformed space.
transform_step:
If :obj:`True`, offset the lower and higher bounds by a half step each, increasing the
space by one step. This allows fair sampling for values close to the bounds.
Should always be :obj:`True` if any parameters are going to be sampled from the
transformed space.
transform_0_1:
If :obj:`True`, apply a linear transformation to the bounds and parameters so that
they are in the unit cube.
Attributes:
bounds:
Constructed bounds from the given search space.
column_to_encoded_columns:
Constructed mapping from original parameter column index to encoded column indices.
encoded_column_to_column:
Constructed mapping from encoded column index to original parameter column index.
Note:
Parameter values are not scaled to the unit cube.
Note:
``transform_log`` and ``transform_step`` are useful for constructing bounds and parameters
without any actual transformations by setting those arguments to :obj:`False`. This is
needed for e.g. the hyperparameter importance assessments.
"""
def __init__(
self,
search_space: dict[str, BaseDistribution],
transform_log: bool = True,
transform_step: bool = True,
transform_0_1: bool = False,
) -> None:
bounds, column_to_encoded_columns, encoded_column_to_column = _transform_search_space(
search_space, transform_log, transform_step
)
self._raw_bounds = bounds
self._column_to_encoded_columns = column_to_encoded_columns
self._encoded_column_to_column = encoded_column_to_column
self._search_space = search_space
self._transform_log = transform_log
self._transform_0_1 = transform_0_1
@property
def bounds(self) -> np.ndarray:
if self._transform_0_1:
return np.array([[0.0, 1.0]] * self._raw_bounds.shape[0])
else:
return self._raw_bounds
@property
def column_to_encoded_columns(self) -> list[np.ndarray]:
return self._column_to_encoded_columns
@property
def encoded_column_to_column(self) -> np.ndarray:
return self._encoded_column_to_column
def transform(self, params: dict[str, Any]) -> np.ndarray:
"""Transform a parameter configuration from actual values to continuous space.
Args:
params:
A parameter configuration to transform.
Returns:
A 1-dimensional ``numpy.ndarray`` holding the transformed parameters in the
configuration.
"""
trans_params = np.zeros(self._raw_bounds.shape[0], dtype=np.float64)
bound_idx = 0
for name, distribution in self._search_space.items():
assert name in params, "Parameter configuration must contain all distributions."
param = params[name]
if isinstance(distribution, CategoricalDistribution):
choice_idx = int(distribution.to_internal_repr(param))
trans_params[bound_idx + choice_idx] = 1
bound_idx += len(distribution.choices)
else:
trans_params[bound_idx] = _transform_numerical_param(
param, distribution, self._transform_log
)
bound_idx += 1
if self._transform_0_1:
single_mask = self._raw_bounds[:, 0] == self._raw_bounds[:, 1]
trans_params[single_mask] = 0.5
trans_params[~single_mask] = (
trans_params[~single_mask] - self._raw_bounds[~single_mask, 0]
) / (self._raw_bounds[~single_mask, 1] - self._raw_bounds[~single_mask, 0])
return trans_params
def untransform(self, trans_params: np.ndarray) -> dict[str, Any]:
"""Untransform a parameter configuration from continuous space to actual values.
Args:
trans_params:
A 1-dimensional ``numpy.ndarray`` in the transformed space corresponding to a
parameter configuration.
Returns:
A dictionary of an untransformed parameter configuration. Keys are parameter names.
Values are untransformed parameter values.
"""
assert trans_params.shape == (self._raw_bounds.shape[0],)
if self._transform_0_1:
trans_params = self._raw_bounds[:, 0] + trans_params * (
self._raw_bounds[:, 1] - self._raw_bounds[:, 0]
)
params = {}
for (name, distribution), encoded_columns in zip(
self._search_space.items(), self.column_to_encoded_columns
):
trans_param = trans_params[encoded_columns]
if isinstance(distribution, CategoricalDistribution):
# Select the highest rated one-hot encoding.
param = distribution.to_external_repr(trans_param.argmax())
else:
param = _untransform_numerical_param(
trans_param.item(), distribution, self._transform_log
)
params[name] = param
return params
def _transform_search_space(
search_space: dict[str, BaseDistribution], transform_log: bool, transform_step: bool
) -> tuple[np.ndarray, list[np.ndarray], np.ndarray]:
assert len(search_space) > 0, "Cannot transform if no distributions are given."
n_bounds = sum(
len(d.choices) if isinstance(d, CategoricalDistribution) else 1
for d in search_space.values()
)
bounds = np.empty((n_bounds, 2), dtype=np.float64)
column_to_encoded_columns: list[np.ndarray] = []
encoded_column_to_column = np.empty(n_bounds, dtype=np.int64)
bound_idx = 0
for distribution in search_space.values():
d = distribution
if isinstance(d, CategoricalDistribution):
n_choices = len(d.choices)
bounds[bound_idx : bound_idx + n_choices] = (0, 1) # Broadcast across all choices.
encoded_columns = np.arange(bound_idx, bound_idx + n_choices)
encoded_column_to_column[encoded_columns] = len(column_to_encoded_columns)
column_to_encoded_columns.append(encoded_columns)
bound_idx += n_choices
elif isinstance(
d,
(
FloatDistribution,
IntDistribution,
),
):
if isinstance(d, FloatDistribution):
if d.step is not None:
half_step = 0.5 * d.step if transform_step else 0.0
bds = (
_transform_numerical_param(d.low, d, transform_log) - half_step,
_transform_numerical_param(d.high, d, transform_log) + half_step,
)
else:
bds = (
_transform_numerical_param(d.low, d, transform_log),
_transform_numerical_param(d.high, d, transform_log),
)
elif isinstance(d, IntDistribution):
half_step = 0.5 * d.step if transform_step else 0.0
if d.log:
bds = (
_transform_numerical_param(d.low - half_step, d, transform_log),
_transform_numerical_param(d.high + half_step, d, transform_log),
)
else:
bds = (
_transform_numerical_param(d.low, d, transform_log) - half_step,
_transform_numerical_param(d.high, d, transform_log) + half_step,
)
else:
assert False, "Should not reach. Unexpected distribution."
bounds[bound_idx] = bds
encoded_column = np.atleast_1d(bound_idx)
encoded_column_to_column[encoded_column] = len(column_to_encoded_columns)
column_to_encoded_columns.append(encoded_column)
bound_idx += 1
else:
assert False, "Should not reach. Unexpected distribution."
assert bound_idx == n_bounds
return bounds, column_to_encoded_columns, encoded_column_to_column
def _transform_numerical_param(
param: int | float, distribution: BaseDistribution, transform_log: bool
) -> float:
d = distribution
if isinstance(d, CategoricalDistribution):
assert False, "Should not reach. Should be one-hot encoded."
elif isinstance(d, FloatDistribution):
if d.log:
trans_param = math.log(param) if transform_log else float(param)
else:
trans_param = float(param)
elif isinstance(d, IntDistribution):
if d.log:
trans_param = math.log(param) if transform_log else float(param)
else:
trans_param = float(param)
else:
assert False, "Should not reach. Unexpected distribution."
return trans_param
def _untransform_numerical_param(
trans_param: float, distribution: BaseDistribution, transform_log: bool
) -> int | float:
d = distribution
if isinstance(d, CategoricalDistribution):
assert False, "Should not reach. Should be one-hot encoded."
elif isinstance(d, FloatDistribution):
if d.log:
param = math.exp(trans_param) if transform_log else trans_param
if d.single():
pass
else:
param = min(param, np.nextafter(d.high, d.high - 1))
elif d.step is not None:
param = float(
np.clip(np.round((trans_param - d.low) / d.step) * d.step + d.low, d.low, d.high)
)
else:
if d.single():
param = trans_param
else:
param = min(trans_param, np.nextafter(d.high, d.high - 1))
elif isinstance(d, IntDistribution):
if d.log:
if transform_log:
param = int(np.clip(np.round(math.exp(trans_param)), d.low, d.high))
else:
param = int(trans_param)
else:
param = int(
np.clip(np.round((trans_param - d.low) / d.step) * d.step + d.low, d.low, d.high)
)
else:
assert False, "Should not reach. Unexpected distribution."
return param
|
_SearchSpaceTransform
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/kernel_tests/from_tensor_slices_test.py
|
{
"start": 16021,
"end": 17520
}
|
class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_tensor_slices_dataset(self, components, options=None):
dataset = dataset_ops.Dataset.from_tensor_slices(components)
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def test(self, verify_fn, symbolic_checkpoint):
# Equal length components
components = (np.tile(np.array([[1], [2], [3], [4]]),
20), np.tile(np.array([[12], [13], [14], [15]]),
22), np.array([37.0, 38.0, 39.0, 40.0]))
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self,
lambda: self._build_tensor_slices_dataset(components, options),
num_outputs=4)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def testDict(self, verify_fn):
dict_components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
verify_fn(
self,
lambda: self._build_tensor_slices_dataset(dict_components),
num_outputs=3)
|
FromTensorSlicesCheckpointTest
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-docker/prefect_docker/worker.py
|
{
"start": 2322,
"end": 14625
}
|
class ____(BaseJobConfiguration):
"""
Configuration class used by the Docker worker.
An instance of this class is passed to the Docker worker's `run` method
for each flow run. It contains all the information necessary to execute the
flow run as a Docker container.
Attributes:
name: The name to give to created Docker containers.
command: The command executed in created Docker containers to kick off
flow run execution.
env: The environment variables to set in created Docker containers.
labels: The labels to set on created Docker containers.
image: The image reference of a container image to use for created jobs.
If not set, the latest Prefect image will be used.
image_pull_policy: The image pull policy to use when pulling images.
networks: Docker networks that created containers should be connected to.
network_mode: The network mode for the created containers (e.g. host, bridge).
If 'networks' is set, this cannot be set.
auto_remove: If set, containers will be deleted on completion.
volumes: Docker volumes that should be mounted in created containers.
stream_output: If set, the output from created containers will be streamed
to local standard output.
mem_limit: Memory limit of created containers. Accepts a value
with a unit identifier (e.g. 100000b, 1000k, 128m, 1g.) If a value is
given without a unit, bytes are assumed.
memswap_limit: Total memory (memory + swap), -1 to disable swap. Should only be
set if `mem_limit` is also set. If `mem_limit` is set, this defaults to
allowing the container to use as much swap as memory. For example, if
`mem_limit` is 300m and `memswap_limit` is not set, containers can use
600m in total of memory and swap.
privileged: Give extended privileges to created containers.
container_create_kwargs: Extra args for docker py when creating container.
"""
image: str = Field(
default_factory=get_prefect_image_name,
description="The image reference of a container image to use for created jobs. "
"If not set, the latest Prefect image will be used.",
examples=["docker.io/prefecthq/prefect:3-latest"],
)
registry_credentials: Optional[DockerRegistryCredentials] = Field(
default=None,
description="Credentials for logging into a Docker registry to pull"
" images from.",
)
image_pull_policy: Optional[Literal["IfNotPresent", "Always", "Never"]] = Field(
default=None,
description="The image pull policy to use when pulling images.",
)
networks: list[str] = Field(
default_factory=list,
description="Docker networks that created containers should be connected to.",
)
network_mode: Optional[str] = Field(
default=None,
description=(
"The network mode for the created containers (e.g. host, bridge). If"
" 'networks' is set, this cannot be set."
),
)
auto_remove: bool = Field(
default=False,
description="If set, containers will be deleted on completion.",
)
volumes: list[VolumeStr] = Field(
default_factory=list,
description="A list of volume to mount into created containers.",
examples=["/my/local/path:/path/in/container"],
)
stream_output: bool = Field(
default=True,
description=(
"If set, the output from created containers will be streamed to local "
"standard output."
),
)
mem_limit: Optional[str] = Field(
default=None,
title="Memory Limit",
description=(
"Memory limit of created containers. Accepts a value "
"with a unit identifier (e.g. 100000b, 1000k, 128m, 1g.) "
"If a value is given without a unit, bytes are assumed."
),
)
memswap_limit: Optional[str] = Field(
default=None,
title="Memory Swap Limit",
description=(
"Total memory (memory + swap), -1 to disable swap. Should only be "
"set if `mem_limit` is also set. If `mem_limit` is set, this defaults to"
"allowing the container to use as much swap as memory. For example, if "
"`mem_limit` is 300m and `memswap_limit` is not set, containers can use "
"600m in total of memory and swap."
),
)
privileged: bool = Field(
default=False,
description="Give extended privileges to created container.",
)
container_create_kwargs: Optional[dict[str, Any]] = Field(
default=None,
title="Container Configuration",
description=(
"Configuration for containers created by workers. See the [`docker-py` documentation](https://docker-py.readthedocs.io/en/stable/containers.html) for accepted values."
),
)
def _convert_labels_to_docker_format(self, labels: dict[str, str]):
"""Converts labels to the format expected by Docker."""
labels = labels or {}
new_labels = {}
for name, value in labels.items():
if "/" in name:
namespace, key = name.split("/", maxsplit=1)
new_namespace = ".".join(reversed(namespace.split(".")))
new_labels[f"{new_namespace}.{key}"] = value
else:
new_labels[name] = value
return new_labels
def _slugify_container_name(self) -> Optional[str]:
"""
Generates a container name to match the configured name, ensuring it is Docker
compatible.
"""
# Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+` in the end
if not self.name:
return None
return (
slugify(
self.name,
lowercase=False,
# Docker does not limit length but URL limits apply eventually so
# limit the length for safety
max_length=250,
# Docker allows these characters for container names
regex_pattern=r"[^a-zA-Z0-9_.-]+",
).lstrip(
# Docker does not allow leading underscore, dash, or period
"_-."
)
# Docker does not allow 0 character names so cast to null if the name is
# empty after slufification
or None
)
def _base_environment(self):
"""
If the API URL has been set update the value to ensure connectivity
when using a bridge network by updating local connections to use the
docker internal host unless the network mode is "host" where localhost
is available already.
"""
base_env = super()._base_environment()
network_mode = self.get_network_mode()
if (
"PREFECT_API_URL" in base_env
and base_env["PREFECT_API_URL"] is not None
and network_mode != "host"
):
base_env["PREFECT_API_URL"] = (
base_env["PREFECT_API_URL"]
.replace("localhost", "host.docker.internal")
.replace("127.0.0.1", "host.docker.internal")
)
return base_env
def prepare_for_flow_run(
self,
flow_run: "FlowRun",
deployment: "DeploymentResponse | None" = None,
flow: "APIFlow | None" = None,
work_pool: "WorkPool | None" = None,
worker_name: "str | None" = None,
):
"""
Prepares the flow run by setting the image, labels, and name
attributes.
"""
super().prepare_for_flow_run(flow_run, deployment, flow, work_pool, worker_name)
self.image = self.image or get_prefect_image_name()
self.labels = self._convert_labels_to_docker_format(
{**self.labels, **CONTAINER_LABELS}
)
self.name = self._slugify_container_name()
def get_network_mode(self) -> Optional[str]:
"""
Returns the network mode to use for the container based on the configured
options and the platform.
"""
# User's value takes precedence; this may collide with the incompatible options
# mentioned below.
if self.network_mode:
if sys.platform != "linux" and self.network_mode == "host":
warnings.warn(
f"{self.network_mode!r} network mode is not supported on platform "
f"{sys.platform!r} and may not work as intended."
)
return self.network_mode
# Network mode is not compatible with networks or ports (we do not support ports
# yet though)
if self.networks:
return None
# Check for a local API connection
api_url = self.env.get("PREFECT_API_URL", PREFECT_API_URL.value())
if api_url:
try:
_, netloc, _, _, _, _ = urllib.parse.urlparse(api_url)
except Exception as exc:
warnings.warn(
f"Failed to parse host from API URL {api_url!r} with exception: "
f"{exc}\nThe network mode will not be inferred."
)
return None
host = netloc.split(":")[0]
# If using a locally hosted API, use a host network on linux
if sys.platform == "linux" and (host == "127.0.0.1" or host == "localhost"):
return "host"
# Default to unset
return None
def get_extra_hosts(self, docker_client: DockerClient) -> Optional[dict[str, str]]:
"""
A host.docker.internal -> host-gateway mapping is necessary for communicating
with the API on Linux machines. Docker Desktop on macOS will automatically
already have this mapping.
"""
if sys.platform == "linux" and (
# Do not warn if the user has specified a host manually that does not use
# a local address
"PREFECT_API_URL" not in self.env
or re.search(
".*(localhost)|(127.0.0.1)|(host.docker.internal).*",
self.env["PREFECT_API_URL"],
)
):
user_version = packaging.version.parse(
format_outlier_version_name(docker_client.version()["Version"])
)
required_version = packaging.version.parse("20.10.0")
if user_version < required_version:
warnings.warn(
"`host.docker.internal` could not be automatically resolved to"
" your local ip address. This feature is not supported on Docker"
f" Engine v{user_version}, upgrade to v{required_version}+ if you"
" encounter issues."
)
return {}
else:
# Compatibility for linux -- https://github.com/docker/cli/issues/2290
# Only supported by Docker v20.10.0+ which is our minimum recommend
# version
return {"host.docker.internal": "host-gateway"}
def _determine_image_pull_policy(self) -> ImagePullPolicy:
"""
Determine the appropriate image pull policy.
1. If they specified an image pull policy, use that.
2. If they did not specify an image pull policy and gave us
the "latest" tag, use ImagePullPolicy.always.
3. If they did not specify an image pull policy and did not
specify a tag, use ImagePullPolicy.always.
4. If they did not specify an image pull policy and gave us
a tag other than "latest", use ImagePullPolicy.if_not_present.
This logic matches the behavior of Kubernetes.
See:https://kubernetes.io/docs/concepts/containers/images/#imagepullpolicy-defaulting
"""
if not self.image_pull_policy:
_, tag = parse_image_tag(self.image)
if tag == "latest" or not tag:
return ImagePullPolicy.ALWAYS
return ImagePullPolicy.IF_NOT_PRESENT
return ImagePullPolicy(self.image_pull_policy)
|
DockerWorkerJobConfiguration
|
python
|
aio-libs__aiohttp
|
tests/test_test_utils.py
|
{
"start": 2638,
"end": 12649
}
|
class ____(AioHTTPTestCase):
async def get_application(self) -> web.Application:
return _create_example_app()
async def test_example_with_loop(self) -> None:
request = await self.client.request("GET", "/")
assert request.status == 200
text = await request.text()
assert _hello_world_str == text
async def test_example_without_explicit_loop(self) -> None:
request = await self.client.request("GET", "/")
assert request.status == 200
text = await request.text()
assert _hello_world_str == text
async def test_inner_example(self) -> None:
async def test_get_route() -> None:
resp = await self.client.request("GET", "/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
await test_get_route()
def test_get_route(loop: asyncio.AbstractEventLoop, test_client: _TestClient) -> None:
async def test_get_route() -> None:
resp = await test_client.request("GET", "/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
loop.run_until_complete(test_get_route())
async def test_client_websocket(
loop: asyncio.AbstractEventLoop, test_client: _TestClient
) -> None:
resp = await test_client.ws_connect("/websocket")
await resp.send_str("foo")
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.TEXT
assert "foo" in msg.data
await resp.send_str("close")
msg = await resp.receive()
assert msg.type == aiohttp.WSMsgType.CLOSE
async def test_client_cookie(
loop: asyncio.AbstractEventLoop, test_client: _TestClient
) -> None:
assert not test_client.session.cookie_jar
await test_client.get("/cookie")
cookies = list(test_client.session.cookie_jar)
assert cookies[0].key == "cookie"
assert cookies[0].value == "val"
@pytest.mark.parametrize(
"method", ["get", "post", "options", "post", "put", "patch", "delete"]
)
async def test_test_client_methods(
method: str, loop: asyncio.AbstractEventLoop, test_client: _TestClient
) -> None:
resp = await getattr(test_client, method)("/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
async def test_test_client_head(
loop: asyncio.AbstractEventLoop, test_client: _TestClient
) -> None:
resp = await test_client.head("/")
assert resp.status == 200
@pytest.mark.parametrize("headers", [{"token": "x"}, CIMultiDict({"token": "x"}), {}])
def test_make_mocked_request(headers: Mapping[str, str]) -> None:
req = make_mocked_request("GET", "/", headers=headers)
assert req.method == "GET"
assert req.path == "/"
assert isinstance(req, web.Request)
assert isinstance(req.headers, CIMultiDictProxy)
def test_make_mocked_request_sslcontext() -> None:
req = make_mocked_request("GET", "/")
assert req.transport is not None
assert req.transport.get_extra_info("sslcontext") is None
def test_make_mocked_request_unknown_extra_info() -> None:
req = make_mocked_request("GET", "/")
assert req.transport is not None
assert req.transport.get_extra_info("unknown_extra_info") is None
def test_make_mocked_request_app() -> None:
app = mock.Mock()
req = make_mocked_request("GET", "/", app=app)
assert req.app is app
def test_make_mocked_request_app_can_store_values() -> None:
req = make_mocked_request("GET", "/")
req.app["a_field"] = "a_value"
assert req.app["a_field"] == "a_value"
def test_make_mocked_request_app_access_non_existing() -> None:
req = make_mocked_request("GET", "/")
with pytest.raises(AttributeError):
req.app.foo # type: ignore[attr-defined]
def test_make_mocked_request_match_info() -> None:
req = make_mocked_request("GET", "/", match_info={"a": "1", "b": "2"})
assert req.match_info == {"a": "1", "b": "2"}
def test_make_mocked_request_content() -> None:
payload = mock.Mock()
req = make_mocked_request("GET", "/", payload=payload)
assert req.content is payload
async def test_make_mocked_request_empty_payload() -> None:
req = make_mocked_request("GET", "/")
assert await req.read() == b""
def test_make_mocked_request_transport() -> None:
transport = mock.Mock()
req = make_mocked_request("GET", "/", transport=transport)
assert req.transport is transport
async def test_test_client_props() -> None:
app = _create_example_app()
server = TestServer(app, scheme="http", host="127.0.0.1")
client = TestClient(server)
assert client.scheme == "http"
assert client.host == "127.0.0.1"
assert client.port == 0
async with client:
assert isinstance(client.port, int)
assert client.server is not None
if sys.version_info >= (3, 11):
assert_type(client.app, web.Application)
assert client.app is not None
assert client.port == 0
async def test_test_client_raw_server_props() -> None:
async def hello(request: web.BaseRequest) -> NoReturn:
assert False
server = RawTestServer(hello, scheme="http", host="127.0.0.1")
client = TestClient(server)
assert client.scheme == "http"
assert client.host == "127.0.0.1"
assert client.port == 0
async with client:
assert isinstance(client.port, int)
assert client.server is not None
if sys.version_info >= (3, 11):
assert_type(client.app, None)
assert client.app is None
assert client.port == 0
async def test_test_server_context_manager(loop: asyncio.AbstractEventLoop) -> None:
app = _create_example_app()
async with TestServer(app) as server:
client = aiohttp.ClientSession()
resp = await client.head(server.make_url("/"))
assert resp.status == 200
resp.close()
await client.close()
def test_client_unsupported_arg() -> None:
with pytest.raises(TypeError) as e:
TestClient("string") # type: ignore[call-overload]
assert (
str(e.value) == "server must be TestServer instance, found type: <class 'str'>"
)
async def test_server_make_url_yarl_compatibility(
loop: asyncio.AbstractEventLoop,
) -> None:
app = _create_example_app()
async with TestServer(app) as server:
make_url = server.make_url
assert make_url(URL("/foo")) == make_url("/foo")
with pytest.raises(AssertionError):
make_url("http://foo.com")
with pytest.raises(AssertionError):
make_url(URL("http://foo.com"))
@pytest.mark.xfail(reason="https://github.com/pytest-dev/pytest/issues/13546")
def test_testcase_no_app(
testdir: pytest.Testdir, loop: asyncio.AbstractEventLoop
) -> None:
testdir.makepyfile(
"""
from aiohttp.test_utils import AioHTTPTestCase
class InvalidTestCase(AioHTTPTestCase):
def test_noop(self) -> None:
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*TypeError*"])
async def test_disable_retry_persistent_connection(
aiohttp_client: AiohttpClient,
) -> None:
num_requests = 0
async def handler(request: web.Request) -> web.Response:
nonlocal num_requests
num_requests += 1
request.protocol.force_close()
return web.Response()
app = web.Application()
app.router.add_get("/", handler)
client = await aiohttp_client(app)
with pytest.raises(aiohttp.ServerDisconnectedError):
await client.get("/")
assert num_requests == 1
async def test_server_context_manager(
app: web.Application, loop: asyncio.AbstractEventLoop
) -> None:
async with TestServer(app) as server:
async with aiohttp.ClientSession() as client:
async with client.head(server.make_url("/")) as resp:
assert resp.status == 200
@pytest.mark.parametrize(
"method", ["head", "get", "post", "options", "post", "put", "patch", "delete"]
)
async def test_client_context_manager_response(
method: str, app: web.Application, loop: asyncio.AbstractEventLoop
) -> None:
async with TestClient(TestServer(app)) as client:
async with getattr(client, method)("/") as resp:
assert resp.status == 200
if method != "head":
text = await resp.text()
assert "Hello, world" in text
async def test_custom_port(
loop: asyncio.AbstractEventLoop,
app: web.Application,
unused_port_socket: socket.socket,
) -> None:
sock = unused_port_socket
port = sock.getsockname()[1]
client = TestClient(
TestServer(app, port=port, socket_factory=lambda *args, **kwargs: sock)
)
await client.start_server()
assert client.server.port == port
resp = await client.get("/")
assert resp.status == 200
text = await resp.text()
assert _hello_world_str == text
await client.close()
@pytest.mark.parametrize(
("hostname", "expected_host"),
[("127.0.0.1", "127.0.0.1"), ("localhost", "127.0.0.1"), ("::1", "::1")],
)
async def test_test_server_hostnames(
hostname: str, expected_host: str, loop: asyncio.AbstractEventLoop
) -> None:
app = _create_example_app()
server = TestServer(app, host=hostname, loop=loop)
async with server:
pass
assert server.host == expected_host
@pytest.mark.parametrize("test_server_cls", [TestServer, RawTestServer])
async def test_base_test_server_socket_factory(
test_server_cls: type, app: web.Application, loop: asyncio.AbstractEventLoop
) -> None:
factory_called = False
def factory(host: str, port: int, family: socket.AddressFamily) -> socket.socket:
nonlocal factory_called
factory_called = True
return get_port_socket(host, port, family)
server = test_server_cls(app, loop=loop, socket_factory=factory)
async with server:
pass
assert factory_called
|
TestAioHTTPTestCase
|
python
|
walkccc__LeetCode
|
solutions/2254. Design Video Sharing Platform/2254.py
|
{
"start": 0,
"end": 1704
}
|
class ____:
def __init__(self):
self.currVideoId = 0
self.usedIds = []
self.videoIdToVideo = {}
self.videoIdToViews = collections.Counter()
self.videoIdToLikes = collections.Counter()
self.videoIdToDislikes = collections.Counter()
def upload(self, video: str) -> int:
videoId = self._getVideoId()
self.videoIdToVideo[videoId] = video
return videoId
def remove(self, videoId: int) -> None:
if videoId in self.videoIdToVideo:
heapq.heappush(self.usedIds, videoId)
del self.videoIdToVideo[videoId]
del self.videoIdToViews[videoId]
del self.videoIdToLikes[videoId]
del self.videoIdToDislikes[videoId]
def watch(self, videoId: int, startMinute: int, endMinute: int) -> str:
if videoId not in self.videoIdToVideo:
return '-1'
self.videoIdToViews[videoId] += 1
video = self.videoIdToVideo[videoId]
return video[startMinute:min(endMinute + 1, len(video))]
def like(self, videoId: int) -> None:
if videoId in self.videoIdToVideo:
self.videoIdToLikes[videoId] += 1
def dislike(self, videoId: int) -> None:
if videoId in self.videoIdToVideo:
self.videoIdToDislikes[videoId] += 1
def getLikesAndDislikes(self, videoId: int) -> list[int]:
if videoId in self.videoIdToVideo:
return [self.videoIdToLikes[videoId], self.videoIdToDislikes[videoId]]
return [-1]
def getViews(self, videoId: int) -> int:
if videoId in self.videoIdToVideo:
return self.videoIdToViews[videoId]
return -1
def _getVideoId(self) -> int:
if not self.usedIds:
self.currVideoId += 1
return self.currVideoId - 1
return heapq.heappop(self.usedIds)
|
VideoSharingPlatform
|
python
|
pypa__pip
|
src/pip/_vendor/cachecontrol/cache.py
|
{
"start": 722,
"end": 1291
}
|
class ____(BaseCache):
def __init__(self, init_dict: MutableMapping[str, bytes] | None = None) -> None:
self.lock = Lock()
self.data = init_dict or {}
def get(self, key: str) -> bytes | None:
return self.data.get(key, None)
def set(
self, key: str, value: bytes, expires: int | datetime | None = None
) -> None:
with self.lock:
self.data.update({key: value})
def delete(self, key: str) -> None:
with self.lock:
if key in self.data:
self.data.pop(key)
|
DictCache
|
python
|
python-visualization__folium
|
folium/map.py
|
{
"start": 768,
"end": 2106
}
|
class ____(MacroElement):
"""The root class of the leaflet class hierarchy"""
_includes: defaultdict[str, dict] = defaultdict(dict)
@classmethod
def include(cls, **kwargs):
cls._includes[cls].update(**kwargs)
@classproperty
def includes(cls):
return cls._includes[cls]
@property
def leaflet_class_name(self):
# TODO: I did not check all Folium classes to see if
# this holds up. This breaks at least for CustomIcon.
return f"L.{self._name}"
def render(self, **kwargs):
figure = self.get_root()
assert isinstance(
figure, Figure
), "You cannot render this Element if it is not in a Figure."
if self.includes:
stmt = IncludeStatement(self.leaflet_class_name, **self.includes)
# A bit weird. I tried adding IncludeStatement directly to both
# figure and script, but failed. So we render this ourself.
figure.script.add_child(
Element(stmt._template.render(this=stmt, kwargs=self.includes)),
# make sure each class include gets rendered only once
name=self._name + "_includes",
# make sure this renders before the element itself
index=-1,
)
super().render(**kwargs)
|
Class
|
python
|
openai__openai-python
|
src/openai/types/responses/function_tool_param.py
|
{
"start": 251,
"end": 861
}
|
class ____(TypedDict, total=False):
name: Required[str]
"""The name of the function to call."""
parameters: Required[Optional[Dict[str, object]]]
"""A JSON schema object describing the parameters of the function."""
strict: Required[Optional[bool]]
"""Whether to enforce strict parameter validation. Default `true`."""
type: Required[Literal["function"]]
"""The type of the function tool. Always `function`."""
description: Optional[str]
"""A description of the function.
Used by the model to determine whether or not to call the function.
"""
|
FunctionToolParam
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 141200,
"end": 141597
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(SponsorableOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
|
SponsorableOrder
|
python
|
getsentry__sentry
|
src/sentry/integrations/slack/webhooks/base.py
|
{
"start": 4726,
"end": 8981
}
|
class ____(MessagingIntegrationCommandDispatcher[Response]):
endpoint: SlackDMEndpoint
request: SlackDMRequest
# Define mapping of messages to halt reasons
@property
def TEAM_HALT_MAPPINGS(self) -> dict[str, MessageCommandHaltReason]:
from sentry.integrations.slack.webhooks.command import (
INSUFFICIENT_ROLE_MESSAGE,
LINK_FROM_CHANNEL_MESSAGE,
LINK_USER_FIRST_MESSAGE,
TEAM_NOT_LINKED_MESSAGE,
)
return {
LINK_FROM_CHANNEL_MESSAGE: MessageCommandHaltReason.LINK_FROM_CHANNEL,
LINK_USER_FIRST_MESSAGE: MessageCommandHaltReason.LINK_USER_FIRST,
INSUFFICIENT_ROLE_MESSAGE: MessageCommandHaltReason.INSUFFICIENT_ROLE,
TEAM_NOT_LINKED_MESSAGE: MessageCommandHaltReason.TEAM_NOT_LINKED,
}
@property
def integration_spec(self) -> MessagingIntegrationSpec:
return SlackMessagingSpec()
def help_handler(self, input: CommandInput) -> IntegrationResponse[Response]:
response = self.endpoint.help(slack_request=self.request, command=input.cmd_value)
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=response,
)
def link_user_handler(self, input: CommandInput) -> IntegrationResponse[Response]:
response = self.endpoint.link_user(self.request)
if ALREADY_LINKED_MESSAGE.format(username=self.request.identity_str) in str(response.data):
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=response,
outcome_reason=str(MessageCommandHaltReason.ALREADY_LINKED),
context_data={"email": self.request.identity_str},
)
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=response,
)
def unlink_user_handler(self, input: CommandInput) -> IntegrationResponse[Response]:
response = self.endpoint.unlink_user(self.request)
if NOT_LINKED_MESSAGE in str(response.data):
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=response,
outcome_reason=str(MessageCommandHaltReason.NOT_LINKED),
context_data={"email": self.request.identity_str},
)
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=response,
)
def link_team_handler(self, input: CommandInput) -> IntegrationResponse[Response]:
response = self.endpoint.link_team(self.request)
for message, reason in self.TEAM_HALT_MAPPINGS.items():
if message in str(response.data):
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.HALTED,
response=response,
outcome_reason=str(reason),
)
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=response,
)
def unlink_team_handler(self, input: CommandInput) -> IntegrationResponse[Response]:
response = self.endpoint.unlink_team(self.request)
for message, reason in self.TEAM_HALT_MAPPINGS.items():
if message in str(response.data):
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.HALTED,
response=response,
outcome_reason=str(reason),
)
return IntegrationResponse(
interaction_result=EventLifecycleOutcome.SUCCESS,
response=response,
)
@property
def command_handlers(
self,
) -> Iterable[tuple[MessagingIntegrationCommand, CommandHandler[Response]]]:
yield commands.HELP, self.help_handler
yield commands.LINK_IDENTITY, self.link_user_handler
yield commands.UNLINK_IDENTITY, self.unlink_user_handler
yield commands.LINK_TEAM, self.link_team_handler
yield commands.UNLINK_TEAM, self.unlink_team_handler
|
SlackCommandDispatcher
|
python
|
tensorflow__tensorflow
|
third_party/xla/xla/tools/buffer_debug_log/checksum_mismatch_report.py
|
{
"start": 1515,
"end": 1764
}
|
class ____:
"""Thunk metadata, read from ThunkMetadataListProto.
Stored in a separate type to enable type checking.
"""
thunk_id: ThunkId
thunk_kind: str
profile_annotation: Optional[str]
@dataclasses.dataclass(frozen=True)
|
ThunkMetadata
|
python
|
kamyu104__LeetCode-Solutions
|
Python/destroying-asteroids.py
|
{
"start": 33,
"end": 386
}
|
class ____(object):
def asteroidsDestroyed(self, mass, asteroids):
"""
:type mass: int
:type asteroids: List[int]
:rtype: bool
"""
asteroids.sort()
for x in asteroids:
if x > mass:
return False
mass += min(x, asteroids[-1]-mass)
return True
|
Solution
|
python
|
spyder-ide__spyder
|
external-deps/qtconsole/qtconsole/rich_jupyter_widget.py
|
{
"start": 1279,
"end": 18210
}
|
class ____(RichIPythonWidget):
""" An JupyterWidget that supports rich text, including lists, images, and
tables. Note that raw performance will be reduced compared to the plain
text version.
"""
# RichJupyterWidget protected class variables.
_payload_source_plot = 'ipykernel.pylab.backend_payload.add_plot_payload'
_jpg_supported = Bool(False)
# Used to determine whether a given html export attempt has already
# displayed a warning about being unable to convert a png to svg.
_svg_warning_displayed = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
""" Create a RichJupyterWidget.
"""
kw['kind'] = 'rich'
super().__init__(*args, **kw)
# Configure the ConsoleWidget HTML exporter for our formats.
self._html_exporter.image_tag = self._get_image_tag
# Dictionary for resolving document resource names to SVG data.
self._name_to_svg_map = {}
# Do we support jpg ?
# it seems that sometime jpg support is a plugin of QT, so try to assume
# it is not always supported.
self._jpg_supported = 'jpeg' in QtGui.QImageReader.supportedImageFormats()
#---------------------------------------------------------------------------
# 'ConsoleWidget' public interface overides
#---------------------------------------------------------------------------
def export_html(self):
""" Shows a dialog to export HTML/XML in various formats.
Overridden in order to reset the _svg_warning_displayed flag prior
to the export running.
"""
self._svg_warning_displayed = False
super().export_html()
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _context_menu_make(self, pos):
""" Reimplemented to return a custom context menu for images.
"""
format = self._control.cursorForPosition(pos).charFormat()
name = format.stringProperty(QtGui.QTextFormat.ImageName)
if name:
menu = QtWidgets.QMenu(self)
menu.addAction('Copy Image', lambda: self._copy_image(name))
menu.addAction('Save Image As...', lambda: self._save_image(name))
menu.addSeparator()
svg = self._name_to_svg_map.get(name, None)
if svg is not None:
menu.addSeparator()
menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))
menu.addAction('Save SVG As...',
lambda: save_svg(svg, self._control))
else:
menu = super()._context_menu_make(pos)
return menu
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#---------------------------------------------------------------------------
def _pre_image_append(self, msg, prompt_number):
"""Append the Out[] prompt and make the output nicer
Shared code for some the following if statement
"""
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
self._append_plain_text('\n', True)
def _handle_execute_result(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("execute_result: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
metadata = msg['content']['metadata']
if 'image/svg+xml' in data:
self._pre_image_append(msg, prompt_number)
self._append_svg(data['image/svg+xml'], True)
self._append_html(self.output_sep2, True)
elif 'image/png' in data:
self._pre_image_append(msg, prompt_number)
png = b64decode(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png',
None))
self._append_html(self.output_sep2, True)
elif 'image/jpeg' in data and self._jpg_supported:
self._pre_image_append(msg, prompt_number)
jpg = b64decode(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg',
None))
self._append_html(self.output_sep2, True)
elif 'text/latex' in data:
self._pre_image_append(msg, prompt_number)
try:
self._append_latex(data['text/latex'], True)
except LatexError:
return super()._handle_display_data(msg)
self._append_html(self.output_sep2, True)
else:
# Default back to the plain text representation.
return super()._handle_execute_result(msg)
def _handle_display_data(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("display_data: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
data = msg['content']['data']
metadata = msg['content']['metadata']
# Try to use the svg or html representations.
# FIXME: Is this the right ordering of things to try?
self.log.debug("display: %s", msg.get('content', ''))
if 'image/svg+xml' in data:
svg = data['image/svg+xml']
self._append_svg(svg, True)
elif 'image/png' in data:
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
png = b64decode(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
elif 'image/jpeg' in data and self._jpg_supported:
jpg = b64decode(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
elif 'text/latex' in data and latex_to_png:
try:
self._append_latex(data['text/latex'], True)
except LatexError:
return super()._handle_display_data(msg)
else:
# Default back to the plain text representation.
return super()._handle_display_data(msg)
#---------------------------------------------------------------------------
# 'RichJupyterWidget' protected interface
#---------------------------------------------------------------------------
def _is_latex_math(self, latex):
"""
Determine if a Latex string is in math mode
This is the only mode supported by qtconsole
"""
basic_envs = ['math', 'displaymath']
starable_envs = ['equation', 'eqnarray' 'multline', 'gather', 'align',
'flalign', 'alignat']
star_envs = [env + '*' for env in starable_envs]
envs = basic_envs + starable_envs + star_envs
env_syntax = [r'\begin{{{0}}} \end{{{0}}}'.format(env).split() for env in envs]
math_syntax = [
(r'\[', r'\]'), (r'\(', r'\)'),
('$$', '$$'), ('$', '$'),
]
for start, end in math_syntax + env_syntax:
inner = latex[len(start):-len(end)]
if start in inner or end in inner:
return False
if latex.startswith(start) and latex.endswith(end):
return True
return False
def _get_color(self, color):
"""Get color from the current syntax style if loadable."""
try:
return get_colors(self.syntax_style)[color]
except ClassNotFound:
# The syntax_style has been sideloaded (e.g. by spyder).
# In this case the overloading class should override this method.
return get_colors('default')[color]
def _append_latex(self, latex, before_prompt=False, metadata=None):
""" Append latex data to the widget."""
png = None
if self._is_latex_math(latex):
png = latex_to_png(latex, wrap=False, backend='dvipng',
color=self._get_color('fgcolor'))
# Matplotlib only supports strings enclosed in dollar signs
if png is None and latex.startswith('$') and latex.endswith('$'):
# To avoid long and ugly errors, like the one reported in
# spyder-ide/spyder#7619
try:
png = latex_to_png(latex, wrap=False, backend='matplotlib',
color=self._get_color('fgcolor'))
except Exception:
pass
if png:
self._append_png(png, before_prompt, metadata)
else:
raise LatexError
def _append_jpg(self, jpg, before_prompt=False, metadata=None):
""" Append raw JPG data to the widget."""
self._append_custom(self._insert_jpg, jpg, before_prompt, metadata=metadata)
def _append_png(self, png, before_prompt=False, metadata=None):
""" Append raw PNG data to the widget.
"""
self._append_custom(self._insert_png, png, before_prompt, metadata=metadata)
def _append_svg(self, svg, before_prompt=False):
""" Append raw SVG data to the widget.
"""
self._append_custom(self._insert_svg, svg, before_prompt)
def _add_image(self, image):
""" Adds the specified QImage to the document and returns a
QTextImageFormat that references it.
"""
document = self._control.document()
name = str(image.cacheKey())
document.addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name), image)
format = QtGui.QTextImageFormat()
format.setName(name)
return format
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtWidgets.QApplication.clipboard().setImage(image)
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image
def _get_image_tag(self, match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with
match.group("Name") containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files may be
written (e.g., for linked images). If None, all images are to be
included inline.
format : "png"|"svg"|"jpg", optional [default "png"]
Format for returned or referenced images.
"""
if format in ("png","jpg"):
try:
image = self._get_image(match.group("name"))
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
if path is not None:
_ensure_dir_exists(path)
relpath = os.path.basename(path)
if image.save("%s/qt_img%s.%s" % (path, match.group("name"), format),
"PNG"):
return '<img src="%s/qt_img%s.%s">' % (relpath,
match.group("name"),format)
else:
return "<b>Couldn't save image!</b>"
else:
ba = QtCore.QByteArray()
buffer_ = QtCore.QBuffer(ba)
buffer_.open(QtCore.QIODevice.WriteOnly)
image.save(buffer_, format.upper())
buffer_.close()
return '<img src="data:image/%s;base64,\n%s\n" />' % (
format,re.sub(r'(.{60})',r'\1\n', str(ba.toBase64().data().decode())))
elif format == "svg":
try:
svg = str(self._name_to_svg_map[match.group("name")])
except KeyError:
if not self._svg_warning_displayed:
QtWidgets.QMessageBox.warning(self, 'Error converting PNG to SVG.',
'Cannot convert PNG images to SVG, export with PNG figures instead. '
'If you want to export matplotlib figures as SVG, add '
'to your ipython config:\n\n'
'\tc.InlineBackend.figure_format = \'svg\'\n\n'
'And regenerate the figures.',
QtWidgets.QMessageBox.Ok)
self._svg_warning_displayed = True
return ("<b>Cannot convert PNG images to SVG.</b> "
"You must export this session with PNG images. "
"If you want to export matplotlib figures as SVG, add to your config "
"<span>c.InlineBackend.figure_format = 'svg'</span> "
"and regenerate the figures.")
# Not currently checking path, because it's tricky to find a
# cross-browser way to embed external SVG images (e.g., via
# object or embed tags).
# Chop stand-alone header from matplotlib SVG
offset = svg.find("<svg")
assert(offset > -1)
return svg[offset:]
else:
return '<b>Unrecognized image format</b>'
def _insert_jpg(self, cursor, jpg, metadata=None):
""" Insert raw PNG data into the widget."""
self._insert_img(cursor, jpg, 'jpg', metadata=metadata)
def _insert_png(self, cursor, png, metadata=None):
""" Insert raw PNG data into the widget.
"""
self._insert_img(cursor, png, 'png', metadata=metadata)
def _insert_img(self, cursor, img, fmt, metadata=None):
""" insert a raw image, jpg or png """
if metadata:
width = metadata.get('width', None)
height = metadata.get('height', None)
else:
width = height = None
try:
image = QtGui.QImage()
image.loadFromData(img, fmt.upper())
if width and height:
image = image.scaled(int(width), int(height),
QtCore.Qt.IgnoreAspectRatio,
QtCore.Qt.SmoothTransformation)
elif width and not height:
image = image.scaledToWidth(int(width), QtCore.Qt.SmoothTransformation)
elif height and not width:
image = image.scaledToHeight(int(height), QtCore.Qt.SmoothTransformation)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)
else:
format = self._add_image(image)
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _insert_svg(self, cursor, svg):
""" Insert raw SVG data into the widet.
"""
try:
image = svg_to_image(svg)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid SVG data.')
else:
format = self._add_image(image)
self._name_to_svg_map[format.name()] = svg
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtWidgets.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
# Clobber RichIPythonWidget above:
|
RichJupyterWidget
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-document360/llama_index/readers/document360/base.py
|
{
"start": 428,
"end": 12265
}
|
class ____(BaseReader):
def __init__(
self,
api_key: str,
should_process_project_version: Optional[
Callable[[ProjectVersion], bool]
] = None,
should_process_category: Optional[
Callable[[Category, list[Category]], bool]
] = None,
should_process_article: Optional[Callable[[ArticleSlim], bool]] = None,
handle_batch_finished: Optional[Callable[[], Any]] = None,
handle_rate_limit_error: Optional[Callable[[], Any]] = None,
handle_request_http_error: Optional[
Callable[[requests.exceptions.HTTPError], Any]
] = None,
handle_category_processing_started: Optional[Callable[[Category], Any]] = None,
handle_article_processing_started: Optional[Callable[[Article], Any]] = None,
handle_article_processing_error: Optional[
Callable[[Union[Article, ArticleSlim]], Any]
] = None,
handle_load_data_error: Optional[Callable[[Exception, Article], Any]] = None,
article_to_custom_document: Optional[Callable[[Article], Document]] = None,
rate_limit_num_retries=10,
rate_limit_retry_wait_time=30,
):
self.api_key = api_key
self.processed_articles = []
self.should_process_project_version = should_process_project_version
self.should_process_category = should_process_category
self.should_process_article = should_process_article
self.handle_batch_finished = handle_batch_finished
self.handle_rate_limit_error = handle_rate_limit_error
self.handle_request_http_error = handle_request_http_error
self.handle_article_processing_error = handle_article_processing_error
self.handle_category_processing_started = handle_category_processing_started
self.handle_article_processing_started = handle_article_processing_started
self.handle_load_data_error = handle_load_data_error
self.headers = {"api_token": api_key, "Content-Type": "application/json"}
self.article_to_custom_document = article_to_custom_document
self.rate_limit_num_retries = rate_limit_num_retries
self.rate_limit_retry_wait_time = rate_limit_retry_wait_time
self._make_request = self._configure_request_retry(self._make_request)
def _configure_request_retry(self, func):
return retry(
stop=stop_after_attempt(self.rate_limit_num_retries),
wait=wait_fixed(self.rate_limit_retry_wait_time),
retry=retry_if_exception_type(RateLimitException),
)(func)
def _make_request(
self, method, url, headers=None, params=None, data=None, json=None
):
response = requests.request(
method, url, headers=headers, params=params, data=data, json=json
)
if response.status_code == 429:
self.handle_rate_limit_error and self.handle_rate_limit_error()
raise RateLimitException("Rate limit exceeded")
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
self.handle_request_http_error and self.handle_request_http_error(e)
raise
return response
def _fetch_project_versions(self):
url = f"{BASE_URL}/ProjectVersions"
response = self._make_request("GET", url, headers=self.headers)
return response.json()
def _get_categories(self, project_version_id: str):
url = f"{BASE_URL}/ProjectVersions/{project_version_id}/categories"
response = self._make_request("GET", url, headers=self.headers)
return response.json()
def _fetch_article(self, articleId):
url = f"{BASE_URL}/Articles/{articleId}"
response = self._make_request("GET", url, headers=self.headers)
return response.json()
def _get_document360_response_data(self, response):
return response["data"]
def _process_category_recursively(self, category: Category, parent_categories=[]):
if self.should_process_category and not self.should_process_category(
category, parent_categories
):
# we still might find the category of interest in the child categories
# even if the current category is not of interest
for child_category in category.child_categories:
self._process_category_recursively(
child_category,
[*parent_categories, category],
)
return
self.handle_category_processing_started and self.handle_category_processing_started(
category
)
articles = category.articles
for article_slim in articles:
article = None
try:
if self.should_process_article and not self.should_process_article(
article_slim
):
continue
article_response = self._fetch_article(article_slim.id)
_article = self._get_document360_response_data(article_response)
article = Article(
id=_article["id"],
title=_article["title"],
content=_article["content"],
html_content=_article["html_content"],
category_id=_article["category_id"],
project_version_id=_article["project_version_id"],
version_number=_article["version_number"],
public_version=_article["public_version"],
latest_version=_article["latest_version"],
enable_rtl=_article["enable_rtl"],
hidden=_article["hidden"],
status=_article["status"],
order=_article["order"],
created_by=_article["created_by"],
authors=_article["authors"],
created_at=_article["created_at"],
modified_at=_article["modified_at"],
slug=_article["slug"],
is_fall_back_content=_article["is_fall_back_content"],
description=_article["description"],
category_type=_article["category_type"],
content_type=_article["content_type"],
is_shared_article=_article["is_shared_article"],
translation_option=_article["translation_option"],
url=_article["url"],
)
self.handle_article_processing_started and self.handle_article_processing_started(
article
)
self.processed_articles.append(article)
except Exception as e:
self.handle_article_processing_error and self.handle_article_processing_error(
e, article or article_slim
)
continue
for child_category in category.child_categories:
self._process_category_recursively(
child_category,
[*parent_categories, category],
)
def _fetch_articles(self):
project_versions_response = self._fetch_project_versions()
project_versions = self._get_document360_response_data(
project_versions_response
)
for _project_version in project_versions:
project_version = ProjectVersion(
id=_project_version["id"],
version_number=_project_version["version_number"],
base_version_number=_project_version["base_version_number"],
version_code_name=_project_version["version_code_name"],
is_main_version=_project_version["is_main_version"],
is_beta=_project_version["is_beta"],
is_public=_project_version["is_public"],
is_deprecated=_project_version["is_deprecated"],
created_at=_project_version["created_at"],
modified_at=_project_version["modified_at"],
language_versions=_project_version["language_versions"],
slug=_project_version["slug"],
order=_project_version["order"],
version_type=_project_version["version_type"],
)
if (
self.should_process_project_version
and not self.should_process_project_version(project_version)
):
continue
categories_response = self._get_categories(project_version.id)
categories = self._get_document360_response_data(categories_response)
for _category in categories:
category = Category(
id=_category["id"],
name=_category["name"],
child_categories=_category["child_categories"],
articles=_category["articles"],
description=_category["description"],
project_version_id=_category["project_version_id"],
order=_category["order"],
parent_category_id=_category["parent_category_id"],
hidden=_category["hidden"],
icon=_category["icon"],
slug=_category["slug"],
language_code=_category["language_code"],
category_type=_category["category_type"],
created_at=_category["created_at"],
modified_at=_category["modified_at"],
status=_category["status"],
content_type=_category["content_type"],
)
self._process_category_recursively(category)
self.handle_batch_finished and self.handle_batch_finished()
articles_collected = self.processed_articles.copy()
self.processed_articles = []
return articles_collected
def article_to_document(self, article: Article):
return Document(
doc_id=article.id,
text=article.html_content,
extra_info={
"title": article.title,
"content": article.content,
"category_id": article.category_id,
"project_version_id": article.project_version_id,
"version_number": article.version_number,
"public_version": article.public_version,
"latest_version": article.latest_version,
"enable_rtl": article.enable_rtl,
"hidden": article.hidden,
"status": article.status,
"order": article.order,
"created_by": article.created_by,
"authors": article.authors,
"created_at": article.created_at,
"modified_at": article.modified_at,
"slug": article.slug,
"is_fall_back_content": article.is_fall_back_content,
"description": article.description,
"category_type": article.category_type,
"content_type": article.content_type,
"is_shared_article": article.is_shared_article,
"translation_option": article.translation_option,
"url": article.url,
},
)
def load_data(self):
try:
articles = self._fetch_articles()
return list(
map(
self.article_to_custom_document or self.article_to_document,
articles,
)
)
except Exception as e:
if not self.handle_load_data_error:
raise
self.handle_load_data_error(e, self.processed_articles)
|
Document360Reader
|
python
|
redis__redis-py
|
redis/maint_notifications.py
|
{
"start": 13539,
"end": 18568
}
|
class ____:
"""
Configuration class for maintenance notifications handling behaviour. Notifications are received through
push notifications.
This class defines how the Redis client should react to different push notifications
such as node moving, migrations, etc. in a Redis cluster.
"""
def __init__(
self,
enabled: Union[bool, Literal["auto"]] = "auto",
proactive_reconnect: bool = True,
relaxed_timeout: Optional[Number] = 10,
endpoint_type: Optional[EndpointType] = None,
):
"""
Initialize a new MaintNotificationsConfig.
Args:
enabled (bool | "auto"): Controls maintenance notifications handling behavior.
- True: The CLIENT MAINT_NOTIFICATIONS command must succeed during connection setup,
otherwise a ResponseError is raised.
- "auto": The CLIENT MAINT_NOTIFICATIONS command is attempted but failures are
gracefully handled - a warning is logged and normal operation continues.
- False: Maintenance notifications are completely disabled.
Defaults to "auto".
proactive_reconnect (bool): Whether to proactively reconnect when a node is replaced.
Defaults to True.
relaxed_timeout (Number): The relaxed timeout to use for the connection during maintenance.
If -1 is provided - the relaxed timeout is disabled. Defaults to 20.
endpoint_type (Optional[EndpointType]): Override for the endpoint type to use in CLIENT MAINT_NOTIFICATIONS.
If None, the endpoint type will be automatically determined based on the host and TLS configuration.
Defaults to None.
Raises:
ValueError: If endpoint_type is provided but is not a valid endpoint type.
"""
self.enabled = enabled
self.relaxed_timeout = relaxed_timeout
self.proactive_reconnect = proactive_reconnect
self.endpoint_type = endpoint_type
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"enabled={self.enabled}, "
f"proactive_reconnect={self.proactive_reconnect}, "
f"relaxed_timeout={self.relaxed_timeout}, "
f"endpoint_type={self.endpoint_type!r}"
f")"
)
def is_relaxed_timeouts_enabled(self) -> bool:
"""
Check if the relaxed_timeout is enabled. The '-1' value is used to disable the relaxed_timeout.
If relaxed_timeout is set to None, it will make the operation blocking
and waiting until any response is received.
Returns:
True if the relaxed_timeout is enabled, False otherwise.
"""
return self.relaxed_timeout != -1
def get_endpoint_type(
self, host: str, connection: "MaintNotificationsAbstractConnection"
) -> EndpointType:
"""
Determine the appropriate endpoint type for CLIENT MAINT_NOTIFICATIONS command.
Logic:
1. If endpoint_type is explicitly set, use it
2. Otherwise, check the original host from connection.host:
- If host is an IP address, use it directly to determine internal-ip vs external-ip
- If host is an FQDN, get the resolved IP to determine internal-fqdn vs external-fqdn
Args:
host: User provided hostname to analyze
connection: The connection object to analyze for endpoint type determination
Returns:
"""
# If endpoint_type is explicitly set, use it
if self.endpoint_type is not None:
return self.endpoint_type
# Check if the host is an IP address
try:
ip_addr = ipaddress.ip_address(host)
# Host is an IP address - use it directly
is_private = ip_addr.is_private
return EndpointType.INTERNAL_IP if is_private else EndpointType.EXTERNAL_IP
except ValueError:
# Host is an FQDN - need to check resolved IP to determine internal vs external
pass
# Host is an FQDN, get the resolved IP to determine if it's internal or external
resolved_ip = connection.get_resolved_ip()
if resolved_ip:
try:
ip_addr = ipaddress.ip_address(resolved_ip)
is_private = ip_addr.is_private
# Use FQDN types since the original host was an FQDN
return (
EndpointType.INTERNAL_FQDN
if is_private
else EndpointType.EXTERNAL_FQDN
)
except ValueError:
# This shouldn't happen since we got the IP from the socket, but fallback
pass
# Final fallback: use heuristics on the FQDN itself
is_private = _is_private_fqdn(host)
return EndpointType.INTERNAL_FQDN if is_private else EndpointType.EXTERNAL_FQDN
|
MaintNotificationsConfig
|
python
|
spyder-ide__spyder
|
spyder/api/plugins/tests.py
|
{
"start": 654,
"end": 3676
}
|
class ____(QMainWindow):
"""QMainWindow mock for plugin tests."""
def __init__(self):
# This avoids using the cli options passed to pytest
sys_argv = [sys.argv[0]]
self._cli_options = get_options(sys_argv)[0]
super().__init__()
PLUGIN_REGISTRY.set_main(self)
def register_plugin(self, plugin_class: type[SpyderPluginClass]):
plugin = PLUGIN_REGISTRY.register_plugin(self, plugin_class)
plugin._register()
return plugin
@staticmethod
def unregister_plugin(plugin: SpyderPluginClass):
assert PLUGIN_REGISTRY.delete_plugin(
plugin.NAME
), f"{plugin.NAME} not deleted"
plugin._unregister()
@staticmethod
def get_plugin(plugin_name, error=False):
return PLUGIN_REGISTRY.get_plugin(plugin_name)
@staticmethod
def is_plugin_available(plugin_name):
return PLUGIN_REGISTRY.is_plugin_available(plugin_name)
@pytest.fixture(scope="session")
def main_window_mock(qapp):
"""Create a QMainWindow mock for plugin tests."""
window = MainWindowMock()
try:
yield window
finally:
CONF.reset_manager()
PLUGIN_REGISTRY.reset()
del window
gc.collect()
@pytest.fixture(scope="session")
def plugins_cls() -> typing.Generator[
typing.Iterable[typing.Tuple[str, type[SpyderPluginClass]]], None, None
]:
"""Fixture that yields the plugin's classes to be tested.
before the yield statement, it will be run at startup.
after the yield statement, it will be run at teardown.
Yields:
List[Tuple[str, type[SpyderPluginClass]]]: A list of fixture's names
and plugin's classes to
create a fixture for.
Raises:
NotImplementedError: This fixture must be implemented by the test.
"""
raise NotImplementedError("This fixture must be implemented by the test.")
@pytest.fixture(scope="session", autouse=True)
def register_fixture(request: SubRequest, plugins_cls):
"""
Dynamically adds fixture for registering plugins.
"""
for fixture_name, plugin_cls in plugins_cls:
# Create a factory function so each fixture gets its own function
def register_plugin_factory(plugin_cls):
def register_plugin(main_window_mock):
plugin = main_window_mock.register_plugin(plugin_cls)
try:
yield plugin
finally:
main_window_mock.unregister_plugin(plugin)
return register_plugin
request._fixturemanager._arg2fixturedefs[fixture_name] = [
FixtureDef(
argname=fixture_name,
func=register_plugin_factory(plugin_cls),
scope="session",
fixturemanager=request._fixturemanager,
baseid=request.node.nodeid,
params=None,
)
]
|
MainWindowMock
|
python
|
walkccc__LeetCode
|
solutions/913. Cat and Mouse/913.py
|
{
"start": 92,
"end": 1987
}
|
class ____:
def catMouseGame(self, graph: list[list[int]]) -> int:
n = len(graph)
# result of (cat, mouse, move)
# move := 0 (mouse) // 1 (cat)
states = [[[0] * 2 for _ in range(n)] for _ in range(n)]
outDegree = [[[0] * 2 for _ in range(n)] for _ in range(n)]
q = collections.deque() # (cat, mouse, move, state)
for cat in range(n):
for mouse in range(n):
outDegree[cat][mouse][0] = len(graph[mouse])
outDegree[cat][mouse][1] = len(graph[cat]) - graph[cat].count(0)
# Start from the states s.t. the winner can be determined.
for cat in range(1, n):
for move in range(2):
# Mouse is in the hole.
states[cat][0][move] = int(State.MOUSE_WIN)
q.append((cat, 0, move, int(State.MOUSE_WIN)))
# Cat catches mouse.
states[cat][cat][move] = int(State.CAT_WIN)
q.append((cat, cat, move, int(State.CAT_WIN)))
while q:
cat, mouse, move, state = q.popleft()
if cat == 2 and mouse == 1 and move == 0:
return state
prevMove = move ^ 1
for prev in graph[cat if prevMove else mouse]:
prevCat = prev if prevMove else cat
if prevCat == 0: # invalid
continue
prevMouse = mouse if prevMove else prev
# The state has been determined.
if states[prevCat][prevMouse][prevMove]:
continue
if (prevMove == 0 and state == int(State.MOUSE_WIN) or
prevMove == 1 and state == int(State.CAT_WIN)):
states[prevCat][prevMouse][prevMove] = state
q.append((prevCat, prevMouse, prevMove, state))
else:
outDegree[prevCat][prevMouse][prevMove] -= 1
if outDegree[prevCat][prevMouse][prevMove] == 0:
states[prevCat][prevMouse][prevMove] = state
q.append((prevCat, prevMouse, prevMove, state))
return states[2][1][0]
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1597593,
"end": 1597742
}
|
class ____(sgqlc.types.Union):
"""Any referencable object"""
__schema__ = github_schema
__types__ = (Issue, PullRequest)
|
ReferencedSubject
|
python
|
tiangolo__fastapi
|
docs_src/query_param_models/tutorial001.py
|
{
"start": 155,
"end": 457
}
|
class ____(BaseModel):
limit: int = Field(100, gt=0, le=100)
offset: int = Field(0, ge=0)
order_by: Literal["created_at", "updated_at"] = "created_at"
tags: List[str] = []
@app.get("/items/")
async def read_items(filter_query: FilterParams = Query()):
return filter_query
|
FilterParams
|
python
|
pallets__werkzeug
|
src/werkzeug/middleware/lint.py
|
{
"start": 1223,
"end": 3035
}
|
class ____:
def __init__(self, stream: t.IO[bytes]) -> None:
self._stream = stream
def read(self, *args: t.Any) -> bytes:
if len(args) == 0:
warn(
"WSGI does not guarantee an EOF marker on the input stream, thus making"
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
" return from this call.",
WSGIWarning,
stacklevel=2,
)
elif len(args) != 1:
warn(
"Too many parameters passed to 'wsgi.input.read()'.",
WSGIWarning,
stacklevel=2,
)
return self._stream.read(*args)
def readline(self, *args: t.Any) -> bytes:
if len(args) == 0:
warn(
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
" 'wsgi.input.read()' instead.",
WSGIWarning,
stacklevel=2,
)
elif len(args) == 1:
warn(
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
" support this, although it's available on all major servers.",
WSGIWarning,
stacklevel=2,
)
else:
raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
return self._stream.readline(*args)
def __iter__(self) -> t.Iterator[bytes]:
try:
return iter(self._stream)
except TypeError:
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
return iter(())
def close(self) -> None:
warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
self._stream.close()
|
InputStream
|
python
|
celery__celery
|
t/unit/utils/test_collections.py
|
{
"start": 12898,
"end": 13188
}
|
class ____:
def test_observers_not_shared(self):
a = ChainMap()
b = ChainMap()
callback = Mock()
a.bind_to(callback)
b.update(x=1)
callback.assert_not_called()
a.update(x=1)
callback.assert_called_once_with(x=1)
|
test_ChainMap
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/error/syntax_error.py
|
{
"start": 112,
"end": 1132
}
|
class ____(GraphQLError):
def __init__(self, source, position, description):
location = get_location(source, position)
super(GraphQLSyntaxError, self).__init__(
message=u'Syntax Error {} ({}:{}) {}\n\n{}'.format(
source.name,
location.line,
location.column,
description,
highlight_source_at_location(source, location),
),
source=source,
positions=[position],
)
def highlight_source_at_location(source, location):
line = location.line
lines = source.body.splitlines()
pad_len = len(str(line + 1))
result = u''
format = (u'{:>' + str(pad_len) + '}: {}\n').format
if line >= 2:
result += format(line - 1, lines[line - 2])
result += format(line, lines[line - 1])
result += ' ' * (1 + pad_len + location.column) + '^\n'
if line < len(lines):
result += format(line + 1, lines[line])
return result
|
GraphQLSyntaxError
|
python
|
openai__openai-python
|
examples/responses/background_streaming.py
|
{
"start": 192,
"end": 1002
}
|
class ____(BaseModel):
steps: List[Step]
final_answer: str
client = OpenAI()
id = None
with client.responses.stream(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
text_format=MathResponse,
background=True,
) as stream:
for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break
print("Interrupted. Continuing...")
assert id is not None
with client.responses.stream(
response_id=id,
starting_after=10,
text_format=MathResponse,
) as stream:
for event in stream:
if "output_text" in event.type:
rich.print(event)
rich.print(stream.get_final_response())
|
MathResponse
|
python
|
huggingface__transformers
|
src/transformers/models/flava/configuration_flava.py
|
{
"start": 10798,
"end": 14809
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate
an FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
use_cls_token (`bool`, *optional*, defaults to `True`):
Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model.
Example:
```python
>>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel
>>> # Initializing a FlavaMultimodalModel with style configuration
>>> configuration = FlavaMultimodalConfig()
>>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration
>>> model = FlavaMultimodalModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "flava_multimodal_model"
base_config_key = "multimodal_config"
def __init__(
self,
hidden_size: int = 768,
num_hidden_layers: int = 6,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: int = "gelu",
hidden_dropout_prob: int = 0.0,
attention_probs_dropout_prob: int = 0.0,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
qkv_bias: bool = True,
use_cls_token: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.use_cls_token = use_cls_token
|
FlavaMultimodalConfig
|
python
|
pytorch__pytorch
|
torch/distributed/remote_device.py
|
{
"start": 43,
"end": 4706
}
|
class ____:
"""
Represents a device on a remote worker.
Args:
remote_device (str or torch.device): Represents a device on a remote worker.
The string format should be one of the following:
1. "<workername>/<device>", where the device field can be parsed as torch.device type.
E.g., "trainer0/cpu", "trainer0", "ps0/cuda:0".
In addition, the device field can be optional and the default value is "cpu".
2. "rank:<rank>/<device>", where <rank> is the rank of the
process and device can be parsed as torch.device type.
E.g., "rank:0/cpu", "rank:0", "rank:0/cuda:0"
3. <workername> and <rank> are optional and formats like "cpu"
and "cuda:1", just represent local devices.
"""
def __init__(self, remote_device: str | torch.device):
PARSE_ERROR = (
f"Could not parse remote_device: {remote_device}. The valid format is "
"'<workername>/<device>' or 'rank:<rank>/<device>' or '<device>'"
)
self._worker_name = None
self._rank = None
self._device: str | int | torch.device | None = None
if isinstance(remote_device, torch.device):
self._device = remote_device
elif isinstance(remote_device, str):
fields = remote_device.split("/")
if len(fields) == 2:
# pyrefly: ignore [bad-assignment]
self._worker_name, self._device = fields
elif len(fields) == 1:
# Check if this is a valid device.
if _remote_device._is_valid_local_device(fields[0]):
self._device = fields[0]
else:
# pyrefly: ignore [bad-assignment]
self._worker_name = fields[0]
self._device = "cpu"
else:
raise ValueError(PARSE_ERROR)
else:
raise TypeError(f"Invalid type for remote_device: {type(remote_device)}")
# Do some basic sanity check (no empty string)
if self._worker_name is not None and not self._worker_name:
raise ValueError(PARSE_ERROR)
# Validate the device.
self._device = torch.device(self._device)
# Check for rank based format.
if self._worker_name is not None:
fields = self._worker_name.split(":")
if len(fields) == 2:
# rank:<rank>/device format, extract rank
if fields[0] == "rank" and fields[1].isdigit():
self._rank = int(fields[1]) # type: ignore[assignment]
# pyrefly: ignore [bad-assignment]
self._worker_name = None
else:
raise ValueError(PARSE_ERROR)
elif len(fields) > 2:
raise ValueError(PARSE_ERROR)
@staticmethod
def _is_valid_local_device(device):
# Check for torch.device
try:
torch.device(device)
return True
except Exception:
return False
def worker_name(self) -> str | None:
"""Return the name of remote worker representing the remote device and ``None`` if no worker name is available."""
return self._worker_name
def rank(self) -> int | None:
"""
Returns the rank of remote worker representing the remote device.
Returns ``None`` if no rank is available.
"""
return self._rank
def device(self) -> torch.device:
"""Return the local device on the remote worker."""
return self._device # type: ignore[return-value]
def __repr__(self):
if self._device is not None:
if self._worker_name is not None:
return f"{self._worker_name}/{self._device}"
elif self._rank is not None:
return f"rank:{self._rank}/{self._device}"
else:
return str(self._device)
else:
if self._worker_name is not None:
return f"{self._worker_name}"
elif self._rank is not None:
return f"{self._rank}"
else:
raise RuntimeError("Invalid state!")
def __eq__(self, other):
return isinstance(other, _remote_device) and (
self._worker_name == other._worker_name
and self._device == other._device
and self._rank == other._rank
)
def __hash__(self):
return hash(self._worker_name) ^ hash(self._device) ^ hash(self._rank)
|
_remote_device
|
python
|
getsentry__sentry
|
src/sentry/metrics/middleware.py
|
{
"start": 743,
"end": 3529
}
|
class ____(RuntimeError):
pass
def _filter_tags(key: str, tags: MutableTags) -> MutableTags:
"""Removes unwanted tags from the tag mapping and returns a filtered one."""
if key in _METRICS_THAT_CAN_HAVE_BAD_TAGS:
return tags
discarded = frozenset(
key
for key in tags
if key not in _NOT_BAD_TAGS and (key.endswith("_id") or key in _BAD_TAGS)
)
if not discarded:
return tags
if settings.SENTRY_METRICS_DISALLOW_BAD_TAGS:
raise BadMetricTags(
f"discarded illegal metric tags: {sorted(discarded)} for metric {key!r}"
)
return {k: v for k, v in tags.items() if k not in discarded}
_THREAD_LOCAL_TAGS = local()
_GLOBAL_TAGS: list[Tags] = []
def _add_global_tags(
all_threads: bool = False, set_sentry_tags: bool = False, tags: Tags | None = None
) -> list[Tags]:
if all_threads:
stack = _GLOBAL_TAGS
else:
if not hasattr(_THREAD_LOCAL_TAGS, "stack"):
stack = _THREAD_LOCAL_TAGS.stack = []
else:
stack = _THREAD_LOCAL_TAGS.stack
if tags is None:
tags = {}
stack.append(tags)
if set_sentry_tags:
sentry_sdk.set_tags(tags)
return stack
def add_global_tags(
all_threads: bool = False, set_sentry_tags: bool = False, tags: Tags | None = None
) -> None:
"""
Set multiple metric tags onto the global or thread-local stack which then
apply to all metrics.
If `set_sentry_tags` is True, also sets the given tags in sentry_sdk.
When used in combination with the `global_tags` context manager,
`add_global_tags` is reverted in any wrapping invocation of `global_tags`.
However, tags set in the current sentry_sdk instance will remain set there.
For example::
with global_tags(tags={"tag_a": 123}):
add_global_tags(tags={"tag_b": 123})
# tag_b is no longer visible
"""
_add_global_tags(all_threads=all_threads, set_sentry_tags=set_sentry_tags, tags=tags)
@contextmanager
def global_tags(
all_threads: bool = False, set_sentry_tags: bool = False, tags: Tags | None = None
) -> Generator[None]:
"""
The context manager version of `add_global_tags` that reverts all tag
changes upon exit.
See docstring of `add_global_tags` for how those two methods interact.
"""
stack = _add_global_tags(all_threads=all_threads, set_sentry_tags=set_sentry_tags, tags=tags)
old_len = len(stack) - 1
try:
yield
finally:
del stack[old_len:]
def get_current_global_tags() -> MutableTags:
rv: MutableTags = {}
for tags in _GLOBAL_TAGS:
rv.update(tags)
for tags in getattr(_THREAD_LOCAL_TAGS, "stack", None) or ():
rv.update(tags)
return rv
|
BadMetricTags
|
python
|
doocs__leetcode
|
solution/2000-2099/2022.Convert 1D Array Into 2D Array/Solution.py
|
{
"start": 0,
"end": 226
}
|
class ____:
def construct2DArray(self, original: List[int], m: int, n: int) -> List[List[int]]:
if m * n != len(original):
return []
return [original[i : i + n] for i in range(0, m * n, n)]
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-ways-to-form-a-target-string-given-a-dictionary.py
|
{
"start": 781,
"end": 1436
}
|
class ____(object):
def numWays(self, words, target):
"""
:type words: List[str]
:type target: str
:rtype: int
"""
MOD = 10**9+7
# dp[i+1][j+1]: number of ways of target[0..j] using count[0..i].
dp = [[0]*(len(target)+1) for _ in xrange(2)]
for i in xrange(len(dp)):
dp[i][0] = 1
for i in xrange(len(words[0])):
count = collections.Counter(w[i] for w in words)
for j in reversed(xrange(len(target))):
dp[(i+1)%2][j+1] = dp[i%2][j+1]+dp[i%2][j]*count[target[j]] % MOD
return dp[(len(words[0]))%2][-1] % MOD
|
Solution2
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-aws/tests/cli/test_ecs_worker.py
|
{
"start": 16949,
"end": 29570
}
|
class ____:
"""Test work pool defaults update functionality."""
@pytest.fixture
async def test_work_pool(self):
"""Create a test work pool with ECS base template."""
async with prefect.get_client() as client:
work_pool = await client.create_work_pool(
WorkPoolCreate(
name=f"test-ecs-pool-{uuid.uuid4()}",
base_job_template=ECSWorker.get_default_base_job_template(),
)
)
try:
yield work_pool
finally:
await client.delete_work_pool(work_pool.name)
async def test_update_work_pool_defaults_success(self, test_work_pool):
"""Test successful work pool defaults update."""
from prefect_aws._cli.utils import update_work_pool_defaults
stack_outputs = {
"VpcId": "vpc-123456",
"ClusterArn": "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster",
"PrefectApiKeySecretArnOutput": "arn:aws:secretsmanager:us-east-1:123456789012:secret:test-key",
"TaskExecutionRoleArn": "arn:aws:iam::123456789012:role/test-execution-role",
"SubnetIds": "subnet-1,subnet-2",
}
# Update the work pool defaults
update_work_pool_defaults(test_work_pool.name, stack_outputs)
# Verify the updates were applied
async with prefect.get_client() as client:
updated_work_pool = await client.read_work_pool(test_work_pool.name)
properties = updated_work_pool.base_job_template["variables"]["properties"]
assert properties["vpc_id"]["default"] == "vpc-123456"
assert (
properties["cluster"]["default"]
== "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster"
)
assert (
properties["prefect_api_key_secret_arn"]["default"]
== "arn:aws:secretsmanager:us-east-1:123456789012:secret:test-key"
)
assert (
properties["execution_role_arn"]["default"]
== "arn:aws:iam::123456789012:role/test-execution-role"
)
# Check network configuration subnets
network_config = properties["network_configuration"]["properties"]
subnets = network_config["awsvpcConfiguration"]["properties"]["subnets"]
assert subnets["default"] == ["subnet-1", "subnet-2"]
async def test_update_work_pool_defaults_preserves_existing(self, test_work_pool):
"""Test that existing defaults are preserved."""
from prefect_aws._cli.utils import update_work_pool_defaults
# First, set some existing defaults
async with prefect.get_client() as client:
work_pool = await client.read_work_pool(test_work_pool.name)
base_template = work_pool.base_job_template
base_template["variables"]["properties"]["vpc_id"]["default"] = (
"existing-vpc"
)
base_template["variables"]["properties"]["execution_role_arn"][
"default"
] = "existing-role"
from prefect.client.schemas.actions import WorkPoolUpdate
await client.update_work_pool(
test_work_pool.name, WorkPoolUpdate(base_job_template=base_template)
)
stack_outputs = {
"VpcId": "vpc-123456",
"ClusterArn": "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster",
"TaskExecutionRoleArn": "arn:aws:iam::123456789012:role/test-execution-role",
}
# Update with new stack outputs
update_work_pool_defaults(test_work_pool.name, stack_outputs)
# Verify existing defaults were preserved and only empty ones updated
async with prefect.get_client() as client:
updated_work_pool = await client.read_work_pool(test_work_pool.name)
properties = updated_work_pool.base_job_template["variables"]["properties"]
# Existing defaults should be preserved
assert properties["vpc_id"]["default"] == "existing-vpc"
assert properties["execution_role_arn"]["default"] == "existing-role"
# Empty defaults should be updated
assert (
properties["cluster"]["default"]
== "arn:aws:ecs:us-east-1:123456789012:cluster/test-cluster"
)
async def test_update_work_pool_defaults_handles_missing_outputs(
self, test_work_pool
):
"""Test handling of missing stack outputs."""
from prefect_aws._cli.utils import update_work_pool_defaults
# Only provide some outputs
stack_outputs = {
"VpcId": "vpc-123456",
# Missing ClusterArn, TaskExecutionRoleArn, etc.
}
update_work_pool_defaults(test_work_pool.name, stack_outputs)
# Should still update available outputs
async with prefect.get_client() as client:
updated_work_pool = await client.read_work_pool(test_work_pool.name)
properties = updated_work_pool.base_job_template["variables"]["properties"]
assert properties["vpc_id"]["default"] == "vpc-123456"
# cluster should still have its original default since ClusterArn wasn't provided
assert properties["cluster"]["default"] is None
async def test_update_work_pool_defaults_handles_nonexistent_pool(self):
"""Test that errors for nonexistent work pools are handled gracefully."""
from prefect_aws._cli.utils import update_work_pool_defaults
stack_outputs = {"VpcId": "vpc-123456"}
# Should not raise an exception for nonexistent work pool
update_work_pool_defaults("nonexistent-pool", stack_outputs)
async def test_deploy_service_updates_work_pool_when_wait_true(
self, test_work_pool
):
"""Test that deploy-service updates work pool defaults when --wait is True."""
def mock_deploy_and_get_status(cf_client, stack_name, **kwargs):
"""Mock deploy_stack that simulates successful deployment"""
pass
def mock_get_status(cf_client, stack_name):
"""Mock get_stack_status that returns outputs"""
return {
"StackName": stack_name,
"StackStatus": "CREATE_COMPLETE",
"Outputs": [
{"OutputKey": "VpcId", "OutputValue": "vpc-deployed"},
{
"OutputKey": "ClusterArn",
"OutputValue": "arn:aws:ecs:us-east-1:123456789012:cluster/deployed-cluster",
},
{
"OutputKey": "TaskExecutionRoleArn",
"OutputValue": "arn:aws:iam::123456789012:role/deployed-role",
},
{
"OutputKey": "SubnetIds",
"OutputValue": "subnet-deployed-1,subnet-deployed-2",
},
],
}
with (
patch(
"prefect_aws._cli.ecs_worker.deploy_stack",
side_effect=mock_deploy_and_get_status,
),
patch(
"prefect_aws._cli.ecs_worker.get_stack_status",
side_effect=mock_get_status,
),
patch("prefect_aws._cli.ecs_worker.load_template") as mock_load_template,
patch(
"prefect_aws._cli.ecs_worker.validate_aws_credentials",
return_value=True,
),
patch(
"prefect_aws._cli.ecs_worker.validate_ecs_cluster", return_value=True
),
patch(
"prefect_aws._cli.ecs_worker.validate_vpc_and_subnets",
return_value=(True, ""),
),
):
mock_load_template.return_value = {"AWSTemplateFormatVersion": "2010-09-09"}
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
test_work_pool.name,
"--stack-name",
"test-stack",
"--prefect-api-url",
"http://localhost:4200/api",
"--existing-cluster-identifier",
"test-cluster",
"--existing-vpc-id",
"vpc-12345",
"--existing-subnet-ids",
"subnet-1,subnet-2",
"--region",
"us-east-1",
"--prefect-auth-string",
"this-is-a-test-auth-string",
"--wait", # This should trigger work pool update
],
)
assert result.exit_code == 0, result.stdout
# Verify the work pool was updated with deployment values
async with prefect.get_client() as client:
updated_work_pool = await client.read_work_pool(test_work_pool.name)
properties = updated_work_pool.base_job_template["variables"]["properties"]
assert properties["vpc_id"]["default"] == "vpc-deployed"
assert (
properties["cluster"]["default"]
== "arn:aws:ecs:us-east-1:123456789012:cluster/deployed-cluster"
)
assert (
properties["execution_role_arn"]["default"]
== "arn:aws:iam::123456789012:role/deployed-role"
)
# Check subnets
network_config = properties["network_configuration"]["properties"]
subnets = network_config["awsvpcConfiguration"]["properties"]["subnets"]
assert subnets["default"] == ["subnet-deployed-1", "subnet-deployed-2"]
async def test_deploy_service_skips_work_pool_update_when_no_wait(
self, test_work_pool
):
"""Test that deploy-service skips work pool update when --no-wait is used."""
def mock_deploy_no_wait(cf_client, stack_name, **kwargs):
# Should not call get_stack_status when wait=False
pass
with (
patch(
"prefect_aws._cli.ecs_worker.deploy_stack",
side_effect=mock_deploy_no_wait,
),
patch("prefect_aws._cli.ecs_worker.load_template") as mock_load_template,
patch(
"prefect_aws._cli.ecs_worker.validate_aws_credentials",
return_value=True,
),
patch(
"prefect_aws._cli.ecs_worker.validate_ecs_cluster", return_value=True
),
patch(
"prefect_aws._cli.ecs_worker.validate_vpc_and_subnets",
return_value=(True, ""),
),
):
mock_load_template.return_value = {"AWSTemplateFormatVersion": "2010-09-09"}
runner = CliRunner()
result = runner.invoke(
app,
[
"ecs-worker",
"deploy-service",
"--work-pool-name",
test_work_pool.name,
"--stack-name",
"test-stack",
"--prefect-api-url",
"http://localhost:4200/api",
"--existing-cluster-identifier",
"test-cluster",
"--existing-vpc-id",
"vpc-12345",
"--existing-subnet-ids",
"subnet-1,subnet-2",
"--region",
"us-east-1",
"--prefect-auth-string",
"this-is-a-test-auth-string",
"--no-wait", # This should skip work pool update
],
)
assert result.exit_code == 0, result.stdout
# Verify the work pool was NOT updated (should still have original defaults)
async with prefect.get_client() as client:
work_pool = await client.read_work_pool(test_work_pool.name)
properties = work_pool.base_job_template["variables"]["properties"]
# Should still have original None defaults, not updated values
assert properties["vpc_id"]["default"] is None
assert properties["cluster"]["default"] is None
assert properties["execution_role_arn"]["default"] is None
|
TestWorkPoolDefaults
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/models.py
|
{
"start": 104340,
"end": 104577
}
|
class ____(Response):
"""
Response of models.move endpoint.
"""
_service = "models"
_action = "move"
_version = "2.13"
_schema = {"additionalProperties": True, "definitions": {}, "type": "object"}
|
MoveResponse
|
python
|
gevent__gevent
|
src/greentest/3.9/test_threading.py
|
{
"start": 51253,
"end": 52413
}
|
class ____(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
|
InterruptMainTests
|
python
|
python-poetry__poetry
|
src/poetry/console/commands/build.py
|
{
"start": 1171,
"end": 5021
}
|
class ____:
def __init__(self, poetry: Poetry, env: Env, io: IO) -> None:
self.poetry = poetry
self.env = env
self.io = io
def _build(
self,
fmt: DistributionType,
executable: Path,
target_dir: Path,
config_settings: dict[str, Any],
) -> None:
builder = BUILD_FORMATS[fmt]
builder(
self.poetry,
executable=executable,
config_settings=config_settings,
).build(target_dir)
def _isolated_build(
self,
fmt: DistributionType,
executable: Path,
target_dir: Path,
config_settings: dict[str, Any],
) -> None:
with isolated_builder(
source=self.poetry.file.path.parent,
distribution=fmt,
python_executable=executable,
) as builder:
builder.build(fmt, target_dir, config_settings=config_settings)
def _requires_isolated_build(self) -> bool:
"""
Determines if an isolated build is required.
An isolated build is required if:
- The package has a build script.
- There are multiple build system dependencies.
- The build dependency is not `poetry-core`.
- The installed `poetry-core` version does not satisfy the build dependency constraints.
- The build dependency has a source type (e.g. is a VcsDependency).
:returns: True if an isolated build is required, False otherwise.
"""
if not self._has_build_backend_defined():
self.io.write_error_line(
"<warning><b>WARNING</>: No build backend defined. Please define one in the <c1>pyproject.toml</>.\n"
"Falling back to using the built-in `poetry-core` version.\n"
"In a future release Poetry will fallback to `setuptools` as defined by PEP 517.\n"
"More details can be found at https://python-poetry.org/docs/libraries/#packaging</>"
)
return False
if (
self.poetry.package.build_script
or len(self.poetry.build_system_dependencies) != 1
):
return True
build_dependency = self.poetry.build_system_dependencies[0]
if build_dependency.name != "poetry-core":
return True
poetry_core_version = Version.parse(metadata.version("poetry-core"))
return bool(
not build_dependency.constraint.allows(poetry_core_version)
or build_dependency.source_type
)
def _get_builder(self) -> Callable[..., None]:
if self._requires_isolated_build():
return self._isolated_build
return self._build
def _has_build_backend_defined(self) -> bool:
return "build-backend" in self.poetry.pyproject.data.get("build-system", {})
def build(self, options: BuildOptions) -> int:
if not self.poetry.is_package_mode:
self.io.write_error_line(
"Building a package is not possible in non-package mode."
)
return 1
dist_dir = Path(options.output)
package = self.poetry.package
self.io.write_line(
f"Building <c1>{package.pretty_name}</c1> (<c2>{package.version}</c2>)"
)
if not dist_dir.is_absolute():
dist_dir = self.poetry.pyproject_path.parent / dist_dir
if options.clean:
remove_directory(path=dist_dir, force=True)
build = self._get_builder()
for fmt in options.formats:
self.io.write_line(f"Building <info>{fmt}</info>")
build(
fmt,
executable=self.env.python,
target_dir=dist_dir,
config_settings=options.config_settings,
)
return 0
|
BuildHandler
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_validation_rule.py
|
{
"start": 383,
"end": 22970
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'field_path': 'str',
'message': 'str',
'message_expression': 'str',
'optional_old_self': 'bool',
'reason': 'str',
'rule': 'str'
}
attribute_map = {
'field_path': 'fieldPath',
'message': 'message',
'message_expression': 'messageExpression',
'optional_old_self': 'optionalOldSelf',
'reason': 'reason',
'rule': 'rule'
}
def __init__(self, field_path=None, message=None, message_expression=None, optional_old_self=None, reason=None, rule=None, local_vars_configuration=None): # noqa: E501
"""V1ValidationRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._field_path = None
self._message = None
self._message_expression = None
self._optional_old_self = None
self._reason = None
self._rule = None
self.discriminator = None
if field_path is not None:
self.field_path = field_path
if message is not None:
self.message = message
if message_expression is not None:
self.message_expression = message_expression
if optional_old_self is not None:
self.optional_old_self = optional_old_self
if reason is not None:
self.reason = reason
self.rule = rule
@property
def field_path(self):
"""Gets the field_path of this V1ValidationRule. # noqa: E501
fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` # noqa: E501
:return: The field_path of this V1ValidationRule. # noqa: E501
:rtype: str
"""
return self._field_path
@field_path.setter
def field_path(self, field_path):
"""Sets the field_path of this V1ValidationRule.
fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']` # noqa: E501
:param field_path: The field_path of this V1ValidationRule. # noqa: E501
:type: str
"""
self._field_path = field_path
@property
def message(self):
"""Gets the message of this V1ValidationRule. # noqa: E501
Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" # noqa: E501
:return: The message of this V1ValidationRule. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1ValidationRule.
Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" # noqa: E501
:param message: The message of this V1ValidationRule. # noqa: E501
:type: str
"""
self._message = message
@property
def message_expression(self):
"""Gets the message_expression of this V1ValidationRule. # noqa: E501
MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: \"x must be less than max (\"+string(self.max)+\")\" # noqa: E501
:return: The message_expression of this V1ValidationRule. # noqa: E501
:rtype: str
"""
return self._message_expression
@message_expression.setter
def message_expression(self, message_expression):
"""Sets the message_expression of this V1ValidationRule.
MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: \"x must be less than max (\"+string(self.max)+\")\" # noqa: E501
:param message_expression: The message_expression of this V1ValidationRule. # noqa: E501
:type: str
"""
self._message_expression = message_expression
@property
def optional_old_self(self):
"""Gets the optional_old_self of this V1ValidationRule. # noqa: E501
optionalOldSelf is used to opt a transition rule into evaluation even when the object is first created, or if the old object is missing the value. When enabled `oldSelf` will be a CEL optional whose value will be `None` if there is no old value, or when the object is initially created. You may check for presence of oldSelf using `oldSelf.hasValue()` and unwrap it after checking using `oldSelf.value()`. Check the CEL documentation for Optional types for more information: https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes May not be set unless `oldSelf` is used in `rule`. # noqa: E501
:return: The optional_old_self of this V1ValidationRule. # noqa: E501
:rtype: bool
"""
return self._optional_old_self
@optional_old_self.setter
def optional_old_self(self, optional_old_self):
"""Sets the optional_old_self of this V1ValidationRule.
optionalOldSelf is used to opt a transition rule into evaluation even when the object is first created, or if the old object is missing the value. When enabled `oldSelf` will be a CEL optional whose value will be `None` if there is no old value, or when the object is initially created. You may check for presence of oldSelf using `oldSelf.hasValue()` and unwrap it after checking using `oldSelf.value()`. Check the CEL documentation for Optional types for more information: https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes May not be set unless `oldSelf` is used in `rule`. # noqa: E501
:param optional_old_self: The optional_old_self of this V1ValidationRule. # noqa: E501
:type: bool
"""
self._optional_old_self = optional_old_self
@property
def reason(self):
"""Gets the reason of this V1ValidationRule. # noqa: E501
reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: \"FieldValueInvalid\", \"FieldValueForbidden\", \"FieldValueRequired\", \"FieldValueDuplicate\". If not set, default to use \"FieldValueInvalid\". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. # noqa: E501
:return: The reason of this V1ValidationRule. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1ValidationRule.
reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: \"FieldValueInvalid\", \"FieldValueForbidden\", \"FieldValueRequired\", \"FieldValueDuplicate\". If not set, default to use \"FieldValueInvalid\". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid. # noqa: E501
:param reason: The reason of this V1ValidationRule. # noqa: E501
:type: str
"""
self._reason = reason
@property
def rule(self):
"""Gets the rule of this V1ValidationRule. # noqa: E501
Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {\"rule\": \"self.status.actual <= self.spec.maxDesired\"} If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {\"rule\": \"self.components['Widget'].priority < 10\"} - Rule scoped to a list of integers: {\"rule\": \"self.values.all(value, value >= 0 && value < 100)\"} - Rule scoped to a string value: {\"rule\": \"self.startsWith('kube')\"} The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an \"unknown type\". An \"unknown type\" is recursively defined as: - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - An array where the items schema is of an \"unknown type\" - An object where the additionalProperties schema is of an \"unknown type\" Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Rule accessing a property named \"namespace\": {\"rule\": \"self.__namespace__ > 0\"} - Rule accessing a property named \"x-prop\": {\"rule\": \"self.x__dash__prop > 0\"} - Rule accessing a property named \"redact__d\": {\"rule\": \"self.redact__underscores__d > 0\"} Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. If `rule` makes use of the `oldSelf` variable it is implicitly a `transition rule`. By default, the `oldSelf` variable is the same type as `self`. When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional variable whose value() is the same type as `self`. See the documentation for the `optionalOldSelf` field for details. Transition rules by default are applied only on UPDATE requests and are skipped if an old value could not be found. You can opt a transition rule into unconditional evaluation by setting `optionalOldSelf` to true. # noqa: E501
:return: The rule of this V1ValidationRule. # noqa: E501
:rtype: str
"""
return self._rule
@rule.setter
def rule(self, rule):
"""Sets the rule of this V1ValidationRule.
Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {\"rule\": \"self.status.actual <= self.spec.maxDesired\"} If the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {\"rule\": \"self.components['Widget'].priority < 10\"} - Rule scoped to a list of integers: {\"rule\": \"self.values.all(value, value >= 0 && value < 100)\"} - Rule scoped to a string value: {\"rule\": \"self.startsWith('kube')\"} The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible. Unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an \"unknown type\". An \"unknown type\" is recursively defined as: - A schema with no type and x-kubernetes-preserve-unknown-fields set to true - An array where the items schema is of an \"unknown type\" - An object where the additionalProperties schema is of an \"unknown type\" Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\", \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\". Examples: - Rule accessing a property named \"namespace\": {\"rule\": \"self.__namespace__ > 0\"} - Rule accessing a property named \"x-prop\": {\"rule\": \"self.x__dash__prop > 0\"} - Rule accessing a property named \"redact__d\": {\"rule\": \"self.redact__underscores__d > 0\"} Equality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and non-intersecting elements in `Y` are appended, retaining their partial order. - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with non-intersecting keys are appended, retaining their partial order. If `rule` makes use of the `oldSelf` variable it is implicitly a `transition rule`. By default, the `oldSelf` variable is the same type as `self`. When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional variable whose value() is the same type as `self`. See the documentation for the `optionalOldSelf` field for details. Transition rules by default are applied only on UPDATE requests and are skipped if an old value could not be found. You can opt a transition rule into unconditional evaluation by setting `optionalOldSelf` to true. # noqa: E501
:param rule: The rule of this V1ValidationRule. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and rule is None: # noqa: E501
raise ValueError("Invalid value for `rule`, must not be `None`") # noqa: E501
self._rule = rule
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ValidationRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ValidationRule):
return True
return self.to_dict() != other.to_dict()
|
V1ValidationRule
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/ast_util.py
|
{
"start": 2319,
"end": 4372
}
|
class ____(gast.NodeTransformer):
"""Transformer that can rename symbols to a simple names."""
def __init__(self, name_map):
self.name_map = name_map
def _process_name_node(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
new_node = gast.Name(
str(self.name_map[qn]),
ctx=node.ctx,
annotation=None,
type_comment=None)
# All annotations get carried over.
for k in anno.keys(node):
anno.copyanno(node, new_node, k)
return new_node
return self.generic_visit(node)
def _process_list_of_strings(self, names):
for i in range(len(names)):
qn = qual_names.QN(names[i])
if qn in self.name_map:
names[i] = str(self.name_map[qn])
return names
def visit_Nonlocal(self, node):
node.names = self._process_list_of_strings(node.names)
return node
def visit_Global(self, node):
node.names = self._process_list_of_strings(node.names)
return node
def visit_Name(self, node):
return self._process_name_node(node)
def visit_Attribute(self, node):
if anno.hasanno(node, anno.Basic.QN):
return self._process_name_node(node)
# Renaming attributes is not supported.
return self.generic_visit(node)
def visit_FunctionDef(self, node):
qn = qual_names.QN(node.name)
if qn in self.name_map:
node.name = str(self.name_map[qn])
return self.generic_visit(node)
def rename_symbols(node, name_map):
"""Renames symbols in an AST. Requires qual_names annotations."""
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
elif isinstance(node, tuple):
return tuple(renamer.visit(n) for n in node)
return renamer.visit(node)
def keywords_to_dict(keywords):
"""Converts a list of ast.keyword objects to a dict."""
keys = []
values = []
for kw in keywords:
keys.append(gast.Constant(kw.arg, kind=None))
values.append(kw.value)
return gast.Dict(keys=keys, values=values)
|
SymbolRenamer
|
python
|
doocs__leetcode
|
solution/0500-0599/0561.Array Partition/Solution.py
|
{
"start": 0,
"end": 118
}
|
class ____:
def arrayPairSum(self, nums: List[int]) -> int:
nums.sort()
return sum(nums[::2])
|
Solution
|
python
|
astropy__astropy
|
astropy/coordinates/tests/test_representation.py
|
{
"start": 26656,
"end": 38310
}
|
class ____:
def test_name(self):
assert PhysicsSphericalRepresentation.name == "physicsspherical"
assert PhysicsSphericalRepresentation.name in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(
phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc
)
assert s3.phi == 8.0 * u.hourangle
assert s3.theta == 5.0 * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(
Angle(8, u.hour), Angle(5, u.deg), Distance(10, u.kpc)
)
assert s2.phi == 8.0 * u.hourangle
assert s2.theta == 5.0 * u.deg
assert s2.r == 10.0 * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc
)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(
phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc
)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8.0 * u.hourangle)
assert_allclose_quantity(s2.theta, 5.0 * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
s3 = PhysicsSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=10 * u.kpc
)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(
ValueError, match="Input parameters phi, theta, and r cannot be broadcast"
):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9, 10] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc
)
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[10, 20] * u.kpc
)
with pytest.raises(AttributeError):
s1.phi = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.theta = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.r = 1.0 * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(
phi=np.arange(10) * u.deg, theta=np.arange(5, 15) * u.deg, r=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg, theta=2 * u.deg, r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = PhysicsSphericalDifferential(
4 * u.mas / u.yr, 5 * u.mas / u.yr, 6 * u.km / u.s
)
sph = PhysicsSphericalRepresentation(
1 * u.deg, 2 * u.deg, 3 * u.kpc, differentials={"s": difs}
)
got = sph.represent_as(SphericalRepresentation, SphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
assert np.may_share_memory(sph.r, got.distance)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation, SphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation, UnitSphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(CylindricalRepresentation, CylindricalDifferential)
assert np.may_share_memory(sph.phi, got.phi)
expected = BaseRepresentation.represent_as(
sph, CylindricalRepresentation, CylindricalDifferential
)
assert_allclose_quantity(got.rho, expected.rho, atol=5e-17 * u.kpc)
assert_allclose_quantity(got.phi, expected.phi, atol=3e-16 * u.deg)
assert_array_equal(got.z, expected.z)
got = sph.represent_as(RadialRepresentation, RadialDifferential)
assert np.may_share_memory(sph.r, got.distance)
expected = BaseRepresentation.represent_as(
sph, RadialRepresentation, RadialDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
def test_to_cylindrical_at_the_origin(self):
"""Test that the transformation to cylindrical at the origin preserves phi."""
sph = PhysicsSphericalRepresentation(
phi=270 * u.deg, theta=45 * u.deg, r=0 * u.kpc
)
cyl = sph.represent_as(CylindricalRepresentation)
assert cyl.rho == 0.0 * u.kpc
assert cyl.z == 0.0 * u.kpc
assert cyl.phi == 270 * u.deg # phi is preserved exactly
def test_initialize_with_nan(self):
# Regression test for gh-11558: initialization used to fail.
psr = PhysicsSphericalRepresentation(
[1.0, np.nan] * u.deg, [np.nan, 2.0] * u.deg, [3.0, np.nan] * u.m
)
assert_array_equal(np.isnan(psr.phi), [False, True])
assert_array_equal(np.isnan(psr.theta), [True, False])
assert_array_equal(np.isnan(psr.r), [False, True])
def test_transform(self):
"""Test ``.transform()`` on rotation and general transform matrices."""
# set up representation
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr,
d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s,
)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg,
theta=[3, 4] * u.deg,
r=[5, 6] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
# compare differentials. should be unchanged (ds1).
assert_allclose_quantity(ds2.d_phi, ds1.d_phi)
assert_allclose_quantity(ds2.d_theta, ds1.d_theta)
assert_allclose_quantity(ds2.d_r, ds1.d_r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.theta, expected.theta)
assert_allclose_quantity(s3.r, expected.r)
assert_allclose_quantity(ds3.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds3.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds3.d_r, dexpected.d_r)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr,
d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s,
)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg,
theta=[3, 4] * u.deg,
r=[5, np.nan] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
)
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.phi), (False, False))
assert_array_equal(np.isnan(s3.theta), (False, False))
assert_array_equal(np.isnan(s3.r), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_phi), (False, True))
assert_array_equal(np.isnan(ds3.d_theta), (False, True))
assert_array_equal(np.isnan(ds3.d_r), (False, True))
# through Cartesian does
assert_array_equal(np.isnan(thruC.phi), (False, True))
assert_array_equal(np.isnan(thruC.theta), (False, True))
assert_array_equal(np.isnan(thruC.r), (False, True))
# so only test on the first value
assert_allclose_quantity(s3.phi[0], thruC.phi[0])
assert_allclose_quantity(s3.theta[0], thruC.theta[0])
assert_allclose_quantity(ds3.d_phi[0], dthruC.d_phi[0])
assert_allclose_quantity(ds3.d_theta[0], dthruC.d_theta[0])
|
TestPhysicsSphericalRepresentation
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/triggers/kubernetes_engine.py
|
{
"start": 1780,
"end": 6962
}
|
class ____(KubernetesPodTrigger):
"""
Trigger for checking pod status until it finishes its job.
:param pod_name: The name of the pod.
:param pod_namespace: The namespace of the pod.
:param cluster_url: The URL pointed to the cluster.
:param ssl_ca_cert: SSL certificate that is used for authentication to the pod.
:param cluster_context: Context that points to kubernetes cluster.
:param poll_interval: Polling period in seconds to check for the status.
:param trigger_start_time: time in Datetime format when the trigger was started
:param in_cluster: run kubernetes client with in_cluster configuration.
:param get_logs: get the stdout of the container as logs of the tasks.
:param startup_timeout: timeout in seconds to start up the pod.
:param base_container_name: The name of the base container in the pod. This container's logs
will appear as part of this task's logs if get_logs is True. Defaults to None. If None,
will consult the class variable BASE_CONTAINER_NAME (which defaults to "base") for the base
container name to use.
:param on_finish_action: What to do when the pod reaches its final state, or the execution is interrupted.
If "delete_pod", the pod will be deleted regardless its state; if "delete_succeeded_pod",
only succeeded pod will be deleted. You can set to "keep_pod" to keep the pod.
:param should_delete_pod: What to do when the pod reaches its final
state, or the execution is interrupted. If True (default), delete the
pod; if False, leave the pod.
Deprecated - use `on_finish_action` instead.
"""
def __init__(
self,
pod_name: str,
pod_namespace: str,
cluster_url: str,
ssl_ca_cert: str,
base_container_name: str,
trigger_start_time: datetime,
cluster_context: str | None = None,
poll_interval: float = 2,
in_cluster: bool | None = None,
get_logs: bool = True,
startup_timeout: int = 120,
on_finish_action: str = "delete_pod",
should_delete_pod: bool | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(
pod_name,
pod_namespace,
trigger_start_time,
base_container_name,
*args,
**kwargs,
)
self.pod_name = pod_name
self.pod_namespace = pod_namespace
self.trigger_start_time = trigger_start_time
self.base_container_name = base_container_name
self.poll_interval = poll_interval
self.cluster_context = cluster_context
self.in_cluster = in_cluster
self.get_logs = get_logs
self.startup_timeout = startup_timeout
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
if should_delete_pod is not None:
warnings.warn(
"`should_delete_pod` parameter is deprecated, please use `on_finish_action`",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
self.on_finish_action = (
OnFinishAction.DELETE_POD if should_delete_pod else OnFinishAction.KEEP_POD
)
self.should_delete_pod = should_delete_pod
else:
self.on_finish_action = OnFinishAction(on_finish_action)
self.should_delete_pod = self.on_finish_action == OnFinishAction.DELETE_POD
self._cluster_url = cluster_url
self._ssl_ca_cert = ssl_ca_cert
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.google.cloud.triggers.kubernetes_engine.GKEStartPodTrigger",
{
"pod_name": self.pod_name,
"pod_namespace": self.pod_namespace,
"cluster_url": self._cluster_url,
"ssl_ca_cert": self._ssl_ca_cert,
"poll_interval": self.poll_interval,
"cluster_context": self.cluster_context,
"in_cluster": self.in_cluster,
"get_logs": self.get_logs,
"startup_timeout": self.startup_timeout,
"trigger_start_time": self.trigger_start_time,
"base_container_name": self.base_container_name,
"should_delete_pod": self.should_delete_pod,
"on_finish_action": self.on_finish_action.value,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"logging_interval": self.logging_interval,
"last_log_time": self.last_log_time,
},
)
@cached_property
def hook(self) -> GKEKubernetesAsyncHook:
return GKEKubernetesAsyncHook(
cluster_url=self._cluster_url,
ssl_ca_cert=self._ssl_ca_cert,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
enable_tcp_keepalive=True,
)
|
GKEStartPodTrigger
|
python
|
joke2k__faker
|
tests/providers/test_bank.py
|
{
"start": 8366,
"end": 9634
}
|
class ____:
"""Test es_MX bank provider"""
def test_bank(self, faker, num_samples):
for _ in range(num_samples):
assert faker.bank() in EsMxBankProvider.banks
@pytest.mark.parametrize(
"clabe,validity",
[
("002864631170560203", True),
("002864631170560202", False),
("00286463117056020", False),
("0028646311705602030", False),
("00286463117056020A", False),
],
ids=[
"valid",
"bad_control_digit",
"too_short",
"too_long",
"non_numeric_characters",
],
)
def test_clabe_validation(self, clabe, validity):
assert is_valid_clabe(clabe) is validity
def test_clabe(self, faker, num_samples):
for _ in range(num_samples):
clabe = faker.clabe()
assert is_valid_clabe(clabe)
assert int(clabe[:3].lstrip("0")) in EsMxBankProvider.bank_codes
def test_clabe_bank_code(self, faker, num_samples):
bank_code = 133
for _ in range(num_samples):
clabe = faker.clabe(bank_code=bank_code)
assert is_valid_clabe(clabe)
assert int(clabe[:3].lstrip("0")) == bank_code
|
TestEsMx
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_project_template_detail.py
|
{
"start": 2689,
"end": 5198
}
|
class ____(ProjectTemplateAPIBase):
endpoint = "sentry-api-0-organization-project-template-detail"
method = "put"
def test_put__no_feature(self) -> None:
response = self.get_error_response(
self.organization.id, self.project_template.id, status_code=404
)
assert response.status_code == 404
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_put__only_name(self) -> None:
response = self.get_success_response(
self.organization.id,
self.project_template.id,
name="Updated",
)
assert response.data["name"] == "Updated"
# validate db is updated
self.project_template.refresh_from_db()
assert self.project_template.name == "Updated"
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_put__only_options(self) -> None:
options = {"sentry:release_track": "test"}
response = self.get_success_response(
self.organization.id, self.project_template.id, options=options
)
assert response.data["name"] == self.project_template.name
assert response.data["options"] == options
# validate db is updated
self.project_template.refresh_from_db()
assert options == {
option.key: option.value for option in self.project_template.options.all()
}
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_put__name_and_options(self) -> None:
options = {"sentry:release_track": "test"}
response = self.get_success_response(
self.organization.id,
self.project_template.id,
name="Updated",
options=options,
)
assert response.data["name"] == "Updated"
assert response.data["options"] == options
# validate db is updated
self.project_template.refresh_from_db()
assert self.project_template.name == "Updated"
assert options == {
option.key: option.value for option in self.project_template.options.all()
}
@with_feature(PROJECT_TEMPLATE_FEATURE_FLAG)
def test_put__not_found(self) -> None:
response = self.get_error_response(self.organization.id, 100, status_code=404)
assert response.status_code == 404
assert response.data == {
"detail": ErrorDetail(
string="No ProjectTemplate matches the given query.", code="not_found"
)
}
|
ProjectTemplateUpdateTest
|
python
|
pydantic__pydantic
|
pydantic-core/tests/serializers/test_union.py
|
{
"start": 29225,
"end": 29325
}
|
class ____:
def __init__(self, type_: Literal['cat']) -> None:
self.type_ = 'cat'
|
ModelCat
|
python
|
huggingface__transformers
|
tests/models/lfm2_moe/test_modeling_lfm2_moe.py
|
{
"start": 1250,
"end": 1641
}
|
class ____(CausalLMModelTester):
if is_torch_available():
config_class = Lfm2MoeConfig
base_model_class = Lfm2MoeModel
causal_lm_class = Lfm2MoeForCausalLM
def __init__(
self,
parent,
layer_types=["full_attention", "conv"],
):
super().__init__(parent)
self.layer_types = layer_types
@require_torch
|
Lfm2MoeModelTester
|
python
|
pytorch__pytorch
|
test/dynamo/test_dicts.py
|
{
"start": 53637,
"end": 54801
}
|
class ____(torch._dynamo.test_case.TestCase):
def setUp(self):
self._prev_trace_unittest = torch._dynamo.config.enable_trace_unittest
torch._dynamo.config.enable_trace_unittest = True
super().setUp()
def tearDown(self):
torch._dynamo.config.enable_trace_unittest = self._prev_trace_unittest
return super().tearDown()
def assertEqual(self, x, y):
self.assertTrue(x == y, f"Expected {x} to be equal to {y}")
def assertNotEqual(self, x, y):
self.assertFalse(x == y, f"Expected {x} to not be equal to {y}")
class OrderedDictSubclass(OrderedDict):
def get(self, key, default=None, /):
return default
def move_to_end(self, key, last=True, /):
# change the behavior to something else
self.pop(key)
thetype = OrderedDictSubclass
@make_dynamo_test
def test_move_to_end(self):
p = self.thetype({"a": 1, "b": 2, "c": 3})
p.move_to_end("a")
self.assertEqual(list(p.keys()), list("bc"))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
OrderedDictSubclassOverload
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/triton.py
|
{
"start": 10338,
"end": 20578
}
|
class ____:
"""
This is a base class that describes a block descriptor used in Triton kernels.
It can be used to create either a tensor descriptor (with TensorDescriptorOptions)
or a block pointer (with BlockPtrOptions).
"""
params: BlockParameters
constant_offset: sympy.Expr
order: list[int]
mask_vars: OrderedSet[str]
broadcast_shape: Sequence[sympy.Expr]
broadcasting_dims: list[bool]
final_shape: Sequence[sympy.Expr]
_boundary_check: Optional[list[int]] = None
# Can we safely lift the constructor
# to the top of the kernel?
can_lift: bool = False
@property
def shape(self) -> list[sympy.Expr]:
return self.params.shape
@property
def block_shape(self) -> list[sympy.Expr]:
return self.params.block_shape
@property
def strides(self) -> list[sympy.Expr]:
return self.params.strides
@property
def offsets(self) -> list[sympy.Expr]:
return self.params.offsets
@classmethod
def create(
cls,
*,
params: BlockParameters,
constant_offset: sympy.Expr,
range_trees: list[IterationRangesRoot],
mask_vars: OrderedSet[str],
get_max_block: Callable[[str], int],
can_lift=False,
transpose_contiguous=False,
) -> BlockDescriptorOptions:
"""Helper to create a BlockDescriptorOptions instance"""
sizevars = V.graph.sizevars
def lookup_size(exprs: Iterable[sympy.Expr]) -> list[sympy.Expr]:
return [sizevars.lookup_precomputed_size(expr) for expr in exprs]
# Look up precomputed sizes
params.shape = lookup_size(params.shape)
params.strides = lookup_size(params.strides)
# Strip out dimensions of stride 0.
# These will be restored with tl.broadcast_to.
broadcasting_dims = [
sizevars.statically_known_equals(stride, 0) for stride in params.strides
]
# Strip out dimensions of size 1.
# These will be restored by tl.reshape.
singleton_dims = [
sizevars.statically_known_equals(dim, 1) for dim in params.block_shape
]
if all(singleton_dims):
# Handle a pure singletons, e.g. [1, 1]
singleton_dims[-1] = False
# Record the post-broadcast shape before broadcasting dims are removed.
# The pre-broadcast shape is identical to this, except broadcasting dims are
# replaced with 1.
broadcast_shape = [
dim
for dim, is_singleton in zip(params.block_shape, singleton_dims)
if not is_singleton
]
# Combine all removable dims.
removable_dims = [any(dims) for dims in zip(singleton_dims, broadcasting_dims)]
# Remove singleton_dims from broadcasting_dims so that
# broadcast_shape and broadcasting_dims have the same length
broadcasting_dims = [
dim
for dim, is_singleton in zip(broadcasting_dims, singleton_dims)
if not is_singleton
]
def remove_dims(it):
"""Removes any broadcasting or singleton dims from a given sequence"""
return [
item
for item, is_removable in zip(it, removable_dims)
if not is_removable
]
# Drop removable dimensions from the input.
params = BlockParameters(
**{
key: remove_dims(val) for key, val in dataclasses.asdict(params).items()
},
)
# TODO: Generalize to ND tensors.
transpose = transpose_contiguous and params.strides[-1] != 1
if transpose:
params = params.transpose()
# Compute the final shape, adjusting for special kernel types.
final_shape = [TritonSymbols.get_block_size(tree) for tree in range_trees]
if V.kernel.no_x_dim:
assert range_trees[0].prefix == "x"
final_shape.pop(0)
# Check for when BlockParams have been transposed.
order = list(reversed(range(len(params.shape))))
if transpose:
final_shape.reverse()
order.reverse()
reduction_ndim = V.kernel.num_reduction_dims
if (
not V.kernel.inside_reduction
and len(params.strides) == len(V.kernel.numels) - reduction_ndim
and V.kernel.features.is_reduction()
):
# Need to expand rank to match the rank used inside the reduction loop
final_shape += [sympy.S.One] * reduction_ndim
result = cls(
params=params,
constant_offset=V.graph.sizevars.lookup_precomputed_size(constant_offset),
order=order,
mask_vars=mask_vars,
final_shape=final_shape,
broadcast_shape=broadcast_shape,
broadcasting_dims=broadcasting_dims,
can_lift=can_lift,
)
result.compute_boundary_check(get_max_block, range_trees)
return result
def replace_offset(
self, expr: sympy.Expr, replacement: sympy.Expr, symt: SymT
) -> sympy.Expr:
"""
Replaces instances of {symt}_offset with the new expression.
"""
roffset = TritonSymbols.block_offsets[symt]
return sympy_subs(expr, {roffset: replacement})
def remove_roffsets(self, expr: sympy.Expr) -> sympy.Expr:
for symt in TritonSymbols.reduction_types:
expr = self.replace_offset(expr, sympy.Integer(0), symt)
return expr
def compute_boundary_check(
self,
get_max_block: Callable[[str], int],
range_trees: list[IterationRangesRoot],
) -> None:
"""List of indices to pass to tl.load(boundary_check=...)"""
sizevars = V.graph.sizevars
# Substitute maximum block sizes in shape expressions.
# This works in multiple_of checks because block sizes are powers of 2.
block_to_max: dict[sympy.Expr, Any] = {
TritonSymbols.block_sizes[t.symt]: get_max_block(prefix_str[t.symt])
for t in range_trees
}
# Also see Note: Constant mask optimisation
# if ynumel / YBLOCK > max_ygrid, then the z dimension is used to handle
# the remaining programs that cannot fit into the y dimension. This means
# it's possible that more than the required number of programs are launched,
# possibly leading to out-of-bounds accesses. So even if ynumel divides YBLOCK,
# boundary checking is required in the dimensions that are based on YBLOCK
# e.g. for [YBLOCK // 16, YBLOCK, XBLOCK] dimensions 0 and 1 need boundary
# checks when max_ygrid is exceeded.
needs_overflow_grid = any(map(V.kernel.needs_yz_grid_overflow, range_trees))
self._boundary_check = [
idx
for idx in range(len(self.shape))
if (
not sizevars.statically_known_equals(self.strides[idx], sympy.S.Zero)
and (
(
needs_overflow_grid
and TritonSymbols.block_sizes[SymT.YBLOCK]
in self.block_shape[idx].free_symbols
)
or (
not sizevars.statically_known_multiple_of(
self.shape[idx], self.block_shape[idx]
)
and not sizevars.statically_known_multiple_of(
self.shape[idx],
sympy_subs(self.block_shape[idx], block_to_max),
)
)
)
and not (
V.kernel.no_x_dim
and self.block_shape[idx] == TritonSymbols.block_sizes[SymT.XBLOCK]
)
)
]
def boundary_check(self) -> list[int]:
assert self._boundary_check is not None
return self._boundary_check
def has_indirect(self) -> bool:
return False # block_ptr can't do indirect indexing
def has_rindex(self) -> bool:
return any(
free_symbol_is_type(expr, TritonSymbols.reduction_types)
for expr in self.block_shape
)
def has_rmask(self) -> bool:
return self.has_rindex()
def has_tmpmask(self) -> bool:
return False # block_ptr can't do indirect indexing
def has_mask(self) -> bool:
return bool(self.boundary_check())
def codegen_broadcast_and_reshape(
self,
value: str,
initial_shape: Sequence[sympy.Expr],
final_shape: Sequence[sympy.Expr],
allow_implicit: bool,
) -> str:
"""
Generate a broadcast and a reshape for the block descriptor.
This restores stride-0 dimensions which were removed from the block descriptor.
"""
# Reshape to add singletons.
pre_broadcast_shape = [
sympy.S.One if is_broadcasting else dim
for dim, is_broadcasting in zip(
self.broadcast_shape, self.broadcasting_dims
)
]
value = triton_reshape(value, initial_shape, pre_broadcast_shape)
# Broadcast singletons.
# For loads, we can often implicitly broadcast singleton dimensions.
# We need an explicit broadcast for stores, or if the final reshape does more
# than add singletons.
sizevars = V.graph.sizevars
supports_implicit_broadcast = allow_implicit and (
len(pre_broadcast_shape) == len(final_shape)
and all(
sizevars.statically_known_equals(pre_dim, 1)
or sizevars.statically_known_equals(pre_dim, post_dim)
for pre_dim, post_dim in zip(pre_broadcast_shape, final_shape)
)
)
if any(self.broadcasting_dims) and not supports_implicit_broadcast:
value = f"tl.broadcast_to({value}, {V.kernel.index_to_str(self.broadcast_shape)})"
# Reshape to the final shape.
value = triton_reshape(value, self.broadcast_shape, final_shape)
return value
@dataclasses.dataclass
|
BlockDescriptorOptions
|
python
|
PyCQA__pydocstyle
|
src/tests/test_cases/test.py
|
{
"start": 3963,
"end": 11888
}
|
class ____:
"""Leading and trailing space missing."""
pass
@expect('D205: 1 blank line required between summary line and description '
'(found 0)')
@expect('D213: Multi-line docstring summary should start at the second line')
def multi_line_zero_separating_blanks():
"""Summary.
Description.
"""
@expect('D205: 1 blank line required between summary line and description '
'(found 2)')
@expect('D213: Multi-line docstring summary should start at the second line')
def multi_line_two_separating_blanks():
"""Summary.
Description.
"""
@expect('D213: Multi-line docstring summary should start at the second line')
def multi_line_one_separating_blanks():
"""Summary.
Description.
"""
@expect('D207: Docstring is under-indented')
@expect('D213: Multi-line docstring summary should start at the second line')
def asdfsdf():
"""Summary.
Description.
"""
@expect('D207: Docstring is under-indented')
@expect('D213: Multi-line docstring summary should start at the second line')
def asdsdfsdffsdf():
"""Summary.
Description.
"""
@expect('D208: Docstring is over-indented')
@expect('D213: Multi-line docstring summary should start at the second line')
def asdfsdsdf24():
"""Summary.
Description.
"""
@expect('D208: Docstring is over-indented')
@expect('D213: Multi-line docstring summary should start at the second line')
def asdfsdsdfsdf24():
"""Summary.
Description.
"""
@expect('D208: Docstring is over-indented')
@expect('D213: Multi-line docstring summary should start at the second line')
def asdfsdfsdsdsdfsdf24():
"""Summary.
Description.
"""
@expect('D209: Multi-line docstring closing quotes should be on a separate '
'line')
@expect('D213: Multi-line docstring summary should start at the second line')
def asdfljdf24():
"""Summary.
Description."""
@expect('D210: No whitespaces allowed surrounding docstring text')
def endswith():
"""Whitespace at the end. """
@expect('D210: No whitespaces allowed surrounding docstring text')
def around():
""" Whitespace at everywhere. """
@expect('D210: No whitespaces allowed surrounding docstring text')
@expect('D213: Multi-line docstring summary should start at the second line')
def multiline():
""" Whitespace at the beginning.
This is the end.
"""
@expect('D300: Use """triple double quotes""" (found \'\'\'-quotes)')
def triple_single_quotes_raw():
r'''Summary.'''
@expect('D300: Use """triple double quotes""" (found \'\'\'-quotes)')
def triple_single_quotes_raw_uppercase():
R'''Summary.'''
@expect('D300: Use """triple double quotes""" (found \'-quotes)')
def single_quotes_raw():
r'Summary.'
@expect('D300: Use """triple double quotes""" (found \'-quotes)')
def single_quotes_raw_uppercase():
R'Summary.'
@expect('D300: Use """triple double quotes""" (found \'-quotes)')
@expect('D301: Use r""" if any backslashes in a docstring')
def single_quotes_raw_uppercase_backslash():
R'Sum\mary.'
@expect('D301: Use r""" if any backslashes in a docstring')
def double_quotes_backslash():
"""Sum\\mary."""
@expect('D301: Use r""" if any backslashes in a docstring')
def double_quotes_backslash_uppercase():
R"""Sum\\mary."""
@expect('D213: Multi-line docstring summary should start at the second line')
def exceptions_of_D301():
"""Exclude some backslashes from D301.
In particular, line continuations \
and unicode literals \u0394 and \N{GREEK CAPITAL LETTER DELTA}.
They are considered to be intentionally unescaped.
"""
@expect("D400: First line should end with a period (not 'y')")
@expect("D415: First line should end with a period, question mark, "
"or exclamation point (not 'y')")
def lwnlkjl():
"""Summary"""
@expect("D401: First line should be in imperative mood "
"(perhaps 'Return', not 'Returns')")
def liouiwnlkjl():
"""Returns foo."""
@expect("D401: First line should be in imperative mood; try rephrasing "
"(found 'Constructor')")
def sdgfsdg23245():
"""Constructor for a foo."""
@expect("D401: First line should be in imperative mood; try rephrasing "
"(found 'Constructor')")
def sdgfsdg23245777():
"""Constructor."""
@expect('D402: First line should not be the function\'s "signature"')
def foobar():
"""Signature: foobar()."""
@expect('D213: Multi-line docstring summary should start at the second line')
def new_209():
"""First line.
More lines.
"""
pass
@expect('D213: Multi-line docstring summary should start at the second line')
def old_209():
"""One liner.
Multi-line comments. OK to have extra blank line
"""
@expect("D103: Missing docstring in public function")
def oneliner_d102(): return
@expect("D400: First line should end with a period (not 'r')")
@expect("D415: First line should end with a period, question mark,"
" or exclamation point (not 'r')")
def oneliner_withdoc(): """One liner"""
def ignored_decorator(func): # noqa: D400,D401,D415
"""Runs something"""
func()
pass
def decorator_for_test(func): # noqa: D400,D401,D415
"""Runs something"""
func()
pass
@ignored_decorator
def oneliner_ignored_decorator(): """One liner"""
@decorator_for_test
@expect("D400: First line should end with a period (not 'r')")
@expect("D415: First line should end with a period, question mark,"
" or exclamation point (not 'r')")
def oneliner_with_decorator_expecting_errors(): """One liner"""
@decorator_for_test
def valid_oneliner_with_decorator(): """One liner."""
@expect("D207: Docstring is under-indented")
@expect('D213: Multi-line docstring summary should start at the second line')
def docstring_start_in_same_line(): """First Line.
Second Line
"""
def function_with_lambda_arg(x=lambda y: y):
"""Wrap the given lambda."""
@expect('D213: Multi-line docstring summary should start at the second line')
def a_following_valid_function(x=None):
"""Check for a bug where the previous function caused an assertion.
The assertion was caused in the next function, so this one is necessary.
"""
def outer_function():
"""Do something."""
def inner_function():
"""Do inner something."""
return 0
@expect("D400: First line should end with a period (not 'g')")
@expect("D401: First line should be in imperative mood "
"(perhaps 'Run', not 'Runs')")
@expect("D415: First line should end with a period, question mark, "
"or exclamation point (not 'g')")
def docstring_bad():
"""Runs something"""
pass
def docstring_bad_ignore_all(): # noqa
"""Runs something"""
pass
def docstring_bad_ignore_one(): # noqa: D400,D401,D415
"""Runs something"""
pass
@expect("D401: First line should be in imperative mood "
"(perhaps 'Run', not 'Runs')")
def docstring_ignore_some_violations_but_catch_D401(): # noqa: E501,D400,D415
"""Runs something"""
pass
@expect(
"D401: First line should be in imperative mood "
"(perhaps 'Initiate', not 'Initiates')"
)
def docstring_initiates():
"""Initiates the process."""
@expect(
"D401: First line should be in imperative mood "
"(perhaps 'Initialize', not 'Initializes')"
)
def docstring_initializes():
"""Initializes the process."""
@wraps(docstring_bad_ignore_one)
def bad_decorated_function():
"""Bad (E501) but decorated"""
pass
def valid_google_string(): # noqa: D400
"""Test a valid something!"""
@expect("D415: First line should end with a period, question mark, "
"or exclamation point (not 'g')")
def bad_google_string(): # noqa: D400
"""Test a valid something"""
# This is reproducing a bug where AttributeError is raised when parsing class
# parameters as functions for Google / Numpy conventions.
|
LeadingAndTrailingSpaceMissing
|
python
|
modin-project__modin
|
modin/pandas/indexing.py
|
{
"start": 7338,
"end": 22404
}
|
class ____(QueryCompilerCaster, ClassLogger):
"""
Base class for location indexer like loc and iloc.
Parameters
----------
modin_df : Union[DataFrame, Series]
DataFrame to operate on.
"""
df: Union[DataFrame, Series]
qc: BaseQueryCompiler
_extensions: EXTENSION_DICT_TYPE = EXTENSION_DICT_TYPE(dict)
def is_backend_pinned(self) -> bool:
"""
Get whether this object's data is pinned to a particular backend.
Returns
-------
bool
True if the data is pinned.
"""
return self.df.is_backend_pinned()
def _set_backend_pinned(self, pinned: bool, inplace: bool = False):
"""
Update whether this object's data is pinned to a particular backend.
Parameters
----------
pinned : bool
Whether the data is pinned.
inplace : bool, default: False
Whether to update the object in place.
Returns
-------
Optional[Self]
The object with the new pin state, if `inplace` is False. Otherwise, None.
"""
change = (self.is_backend_pinned() and not pinned) or (
not self.is_backend_pinned() and pinned
)
if not change:
return None if inplace else self
result = type(self)(self.df._set_backend_pinned(pinned))
if inplace:
result._copy_into(self)
return None
return result
@disable_logging
@_inherit_docstrings(QueryCompilerCaster.set_backend)
def set_backend(
self, backend, inplace: bool = False, *, switch_operation: Optional[str] = None
) -> Optional[Self]:
result = type(self)(
self.df.set_backend(backend, switch_operation=switch_operation)
)
if inplace:
result._copy_into(self)
return None
return result
@disable_logging
@_inherit_docstrings(QueryCompilerCaster._get_query_compiler)
def _get_query_compiler(self):
return getattr(self, "qc", None)
@disable_logging
@_inherit_docstrings(QueryCompilerCaster.get_backend)
def get_backend(self):
return self.qc.get_backend()
@disable_logging
@_inherit_docstrings(QueryCompilerCaster._copy_into)
def _copy_into(self, other: Series):
other.qc = self.df._query_compiler
other.df._update_inplace(new_query_compiler=self.df._query_compiler)
other.df._set_backend_pinned(self.is_backend_pinned())
return None
def __init__(self, modin_df: Union[DataFrame, Series]):
# TODO(https://github.com/modin-project/modin/issues/7513): Do not keep
# both `df` and `qc`.
self.df = modin_df
self.qc = modin_df._query_compiler
def _validate_key_length(self, key: tuple) -> tuple: # noqa: GL08
# Implementation copied from pandas.
if len(key) > self.df.ndim:
if key[0] is Ellipsis:
# e.g. Series.iloc[..., 3] reduces to just Series.iloc[3]
key = key[1:]
if Ellipsis in key:
raise IndexingError(_one_ellipsis_message)
return self._validate_key_length(key)
raise IndexingError("Too many indexers")
return key
def __getitem__(self, key): # pragma: no cover
"""
Retrieve dataset according to `key`.
Parameters
----------
key : callable, scalar, or tuple
The global row index to retrieve data from.
Returns
-------
modin.pandas.DataFrame or modin.pandas.Series
Located dataset.
See Also
--------
pandas.DataFrame.loc
"""
raise NotImplementedError("Implemented by subclasses")
def __setitem__(self, key, item): # pragma: no cover
"""
Assign `item` value to dataset located by `key`.
Parameters
----------
key : callable or tuple
The global row numbers to assign data to.
item : modin.pandas.DataFrame, modin.pandas.Series or scalar
Value that should be assigned to located dataset.
See Also
--------
pandas.DataFrame.iloc
"""
raise NotImplementedError("Implemented by subclasses")
def _get_pandas_object_from_qc_view(
self,
qc_view,
row_multiindex_full_lookup: bool,
col_multiindex_full_lookup: bool,
row_scalar: bool,
col_scalar: bool,
ndim: int,
):
"""
Convert the query compiler view to the appropriate pandas object.
Parameters
----------
qc_view : BaseQueryCompiler
Query compiler to convert.
row_multiindex_full_lookup : bool
See _multiindex_possibly_contains_key.__doc__.
col_multiindex_full_lookup : bool
See _multiindex_possibly_contains_key.__doc__.
row_scalar : bool
Whether indexer for rows is scalar.
col_scalar : bool
Whether indexer for columns is scalar.
ndim : {0, 1, 2}
Number of dimensions in dataset to be retrieved.
Returns
-------
modin.pandas.DataFrame or modin.pandas.Series
The pandas object with the data from the query compiler view.
Notes
-----
Usage of `slice(None)` as a lookup is a hack to pass information about
full-axis grab without computing actual indices that triggers lazy computations.
Ideally, this API should get rid of using slices as indexers and either use a
common ``Indexer`` object or range and ``np.ndarray`` only.
"""
if ndim == 2:
return self.df.__constructor__(query_compiler=qc_view)
if isinstance(self.df, Series) and not row_scalar:
return self.df.__constructor__(query_compiler=qc_view)
if isinstance(self.df, Series):
axis = 0
elif ndim == 0:
axis = None
else:
# We are in the case where ndim == 1
# The axis we squeeze on depends on whether we are looking for an exact
# value or a subset of rows and columns. Knowing if we have a full MultiIndex
# lookup or scalar lookup can help us figure out whether we need to squeeze
# on the row or column index.
axis = (
None
if (col_scalar and row_scalar)
or (row_multiindex_full_lookup and col_multiindex_full_lookup)
else 1 if col_scalar or col_multiindex_full_lookup else 0
)
res_df = self.df.__constructor__(query_compiler=qc_view)
return res_df.squeeze(axis=axis)
def _setitem_positional(self, row_lookup, col_lookup, item, axis=None):
"""
Assign `item` value to located dataset.
Parameters
----------
row_lookup : slice or scalar
The global row index to write item to.
col_lookup : slice or scalar
The global col index to write item to.
item : DataFrame, Series or scalar
The new item needs to be set. It can be any shape that's
broadcast-able to the product of the lookup tables.
axis : {None, 0, 1}, default: None
If not None, it means that whole axis is used to assign a value.
0 means assign to whole column, 1 means assign to whole row.
If None, it means that partial assignment is done on both axes.
"""
# Convert slices to indices for the purposes of application.
# TODO (devin-petersohn): Apply to slice without conversion to list
if isinstance(row_lookup, slice):
row_lookup = range(len(self.qc.index))[row_lookup]
if isinstance(col_lookup, slice):
col_lookup = range(len(self.qc.columns))[col_lookup]
# This is True when we dealing with assignment of a full column. This case
# should be handled in a fastpath with `df[col] = item`.
if axis == 0:
assert len(col_lookup) == 1
self.df[self.df.columns[col_lookup][0]] = item
# This is True when we are assigning to a full row. We want to reuse the setitem
# mechanism to operate along only one axis for performance reasons.
elif axis == 1:
if hasattr(item, "_query_compiler"):
if isinstance(item, DataFrame):
item = item.squeeze(axis=0)
item = item._query_compiler
assert len(row_lookup) == 1
new_qc = self.qc.setitem(1, self.qc.index[row_lookup[0]], item)
self.df._create_or_update_from_compiler(new_qc, inplace=True)
# Assignment to both axes.
else:
new_qc = self.qc.write_items(row_lookup, col_lookup, item)
self.df._create_or_update_from_compiler(new_qc, inplace=True)
self.qc = self.df._query_compiler
def _determine_setitem_axis(self, row_lookup, col_lookup, row_scalar, col_scalar):
"""
Determine an axis along which we should do an assignment.
Parameters
----------
row_lookup : slice or list
Indexer for rows.
col_lookup : slice or list
Indexer for columns.
row_scalar : bool
Whether indexer for rows is scalar or not.
col_scalar : bool
Whether indexer for columns is scalar or not.
Returns
-------
int or None
None if this will be a both axis assignment, number of axis to assign in other cases.
Notes
-----
axis = 0: column assignment df[col] = item
axis = 1: row assignment df.loc[row] = item
axis = None: assignment along both axes
"""
if self.df.shape == (1, 1):
return None if not (row_scalar ^ col_scalar) else 1 if row_scalar else 0
def get_axis(axis):
return self.qc.index if axis == 0 else self.qc.columns
row_lookup_len, col_lookup_len = [
(
len(lookup)
if not isinstance(lookup, slice)
else compute_sliced_len(lookup, len(get_axis(i)))
)
for i, lookup in enumerate([row_lookup, col_lookup])
]
if col_lookup_len == 1 and row_lookup_len == 1:
axis = None
elif (
row_lookup_len == len(self.qc.index)
and col_lookup_len == 1
and isinstance(self.df, DataFrame)
):
axis = 0
elif col_lookup_len == len(self.qc.columns) and row_lookup_len == 1:
axis = 1
else:
axis = None
return axis
def _parse_row_and_column_locators(self, tup):
"""
Unpack the user input for getitem and setitem and compute ndim.
loc[a] -> ([a], :), 1D
loc[[a,b]] -> ([a,b], :),
loc[a,b] -> ([a], [b]), 0D
Parameters
----------
tup : tuple
User input to unpack.
Returns
-------
row_loc : scalar or list
Row locator(s) as a scalar or List.
col_list : scalar or list
Column locator(s) as a scalar or List.
ndim : {0, 1, 2}
Number of dimensions of located dataset.
"""
row_loc, col_loc = slice(None), slice(None)
if is_tuple(tup):
row_loc = tup[0]
if len(tup) == 2:
col_loc = tup[1]
if len(tup) > 2:
raise IndexingError("Too many indexers")
else:
row_loc = tup
row_loc = row_loc(self.df) if callable(row_loc) else row_loc
col_loc = col_loc(self.df) if callable(col_loc) else col_loc
return row_loc, col_loc, _compute_ndim(row_loc, col_loc)
# HACK: This method bypasses regular ``loc/iloc.__getitem__`` flow in order to ensure better
# performance in the case of boolean masking. The only purpose of this method is to compensate
# for a lack of backend's indexing API, there is no Query Compiler method allowing masking
# along both axis when any of the indexers is a boolean. That's why rows and columns masking
# phases are separate in this case.
# TODO: Remove this method and handle this case naturally via ``loc/iloc.__getitem__`` flow
# when QC API would support both-axis masking with boolean indexers.
def _handle_boolean_masking(self, row_loc, col_loc):
"""
Retrieve dataset according to the boolean mask for rows and an indexer for columns.
In comparison with the regular ``loc/iloc.__getitem__`` flow this method efficiently
masks rows with a Modin Series boolean mask without materializing it (if the selected
execution implements such masking).
Parameters
----------
row_loc : modin.pandas.Series of bool dtype
Boolean mask to index rows with.
col_loc : object
An indexer along column axis.
Returns
-------
modin.pandas.DataFrame or modin.pandas.Series
Located dataset.
"""
ErrorMessage.catch_bugs_and_request_email(
failure_condition=not isinstance(row_loc, Series),
extra_log=f"Only ``modin.pandas.Series`` boolean masks are acceptable, got: {type(row_loc)}",
)
masked_df = self.df.__constructor__(
query_compiler=self.qc.getitem_array(row_loc._query_compiler)
)
if isinstance(masked_df, Series):
assert col_loc == slice(None)
return masked_df
# Passing `slice(None)` as a row indexer since we've just applied it
return type(self)(masked_df)[(slice(None), col_loc)]
def _multiindex_possibly_contains_key(self, axis, key):
"""
Determine if a MultiIndex row/column possibly contains a key.
Check to see if the current DataFrame has a MultiIndex row/column and if it does,
check to see if the key is potentially a full key-lookup such that the number of
levels match up with the length of the tuple key.
Parameters
----------
axis : {0, 1}
0 for row, 1 for column.
key : Any
Lookup key for MultiIndex row/column.
Returns
-------
bool
If the MultiIndex possibly contains the given key.
Notes
-----
This function only returns False if we have a partial key lookup. It's
possible that this function returns True for a key that does NOT exist
since we only check the length of the `key` tuple to match the number
of levels in the MultiIndex row/colunmn.
"""
if not self.qc.has_multiindex(axis=axis):
return False
multiindex = self.df.index if axis == 0 else self.df.columns
return isinstance(key, tuple) and len(key) == len(multiindex.levels)
|
_LocationIndexerBase
|
python
|
django__django
|
tests/queries/models.py
|
{
"start": 4451,
"end": 4571
}
|
class ____(models.Model):
x = models.ForeignKey(LoopX, models.CASCADE)
class Meta:
ordering = ["x"]
|
LoopY
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 964056,
"end": 964354
}
|
class ____(sgqlc.types.Type):
"""An individual package version"""
__schema__ = github_schema
__field_names__ = ("identifier",)
identifier = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="identifier")
"""The package name or version"""
|
SecurityAdvisoryPackageVersion
|
python
|
rapidsai__cudf
|
python/cudf/cudf/pandas/fast_slow_proxy.py
|
{
"start": 23285,
"end": 26268
}
|
class ____(_FastSlowProxy):
"""
Proxy type for a pair of "intermediate" types that appear as
intermediate values when invoking operations on "final" types.
The conversion between fast and slow types is done by keeping
track of the sequence of operations that created the wrapped
object, and "playing back" that sequence starting from the "slow"
version of the originating _FinalProxy.
Do not attempt to use this class directly. Instead, use
`make_intermediate_proxy_type` to create subtypes.
"""
_method_chain: tuple[Callable, tuple, dict]
@classmethod
def _fsproxy_wrap(
cls,
obj: Any,
method_chain: tuple[Callable, tuple, dict],
):
"""
Parameters
----------
obj: The object to wrap
method_chain: A tuple of the form (func, args, kwargs) where
`func` is the function that was called to create `obj`,
and `args` and `kwargs` are the arguments that were passed
to `func`.
"""
proxy = object.__new__(cls)
proxy._fsproxy_wrapped = obj
proxy._method_chain = method_chain
return proxy
@nvtx.annotate(
"COPY_SLOW_TO_FAST",
color=_CUDF_PANDAS_NVTX_COLORS["COPY_SLOW_TO_FAST"],
domain="cudf_pandas",
)
def _fsproxy_slow_to_fast(self) -> Any:
func, args, kwargs = self._method_chain
args, kwargs = _fast_arg(args), _fast_arg(kwargs)
return func(*args, **kwargs)
@nvtx.annotate(
"COPY_FAST_TO_SLOW",
color=_CUDF_PANDAS_NVTX_COLORS["COPY_FAST_TO_SLOW"],
domain="cudf_pandas",
)
def _fsproxy_fast_to_slow(self) -> Any:
func, args, kwargs = self._method_chain
args, kwargs = _slow_arg(args), _slow_arg(kwargs)
return func(*args, **kwargs)
def __reduce__(self):
"""
In conjunction with `__proxy_setstate__`, this effectively enables
proxy types to be pickled and unpickled by pickling and unpickling
the underlying wrapped types.
"""
# Need a local import to avoid circular import issues
from .module_accelerator import disable_module_accelerator
with disable_module_accelerator():
pickled_wrapped_obj = pickle.dumps(self._fsproxy_wrapped)
pickled_method_chain = pickle.dumps(self._method_chain)
return (
_PickleConstructor(type(self)),
(),
(pickled_wrapped_obj, pickled_method_chain),
)
def __setstate__(self, state):
# Need a local import to avoid circular import issues
from .module_accelerator import disable_module_accelerator
with disable_module_accelerator():
unpickled_wrapped_obj = pickle.loads(state[0])
unpickled_method_chain = pickle.loads(state[1])
self._fsproxy_wrapped = unpickled_wrapped_obj
self._method_chain = unpickled_method_chain
|
_IntermediateProxy
|
python
|
huggingface__transformers
|
src/transformers/models/cohere2/modeling_cohere2.py
|
{
"start": 16879,
"end": 20324
}
|
class ____(Cohere2PreTrainedModel):
def __init__(self, config: Cohere2Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Cohere2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Cohere2LayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
self.rotary_emb = Cohere2RotaryEmbedding(config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None and not self.training:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
if not isinstance(causal_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
|
Cohere2Model
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/project_overview.py
|
{
"start": 769,
"end": 913
}
|
class ____(ProjectPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin"],
}
|
RelaxedProjectPermission
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster_tests/declarative_automation_tests/scenario_utils/base_scenario.py
|
{
"start": 3842,
"end": 31145
}
|
class ____(
NamedTuple(
"_AssetReconciliationScenario",
[
("unevaluated_runs", Sequence[RunSpec]),
("assets", Optional[Sequence[Union[dg.SourceAsset, dg.AssetsDefinition]]]),
("asset_checks", Optional[Sequence[dg.AssetChecksDefinition]]),
("between_runs_delta", Optional[datetime.timedelta]),
("evaluation_delta", Optional[datetime.timedelta]),
("cursor_from", Optional["AssetReconciliationScenario"]),
("current_time", Optional[datetime.datetime]),
("asset_selection", Optional[dg.AssetSelection]),
(
"active_backfill_targets",
Optional[
Sequence[Union[Mapping[dg.AssetKey, PartitionsSubset], Sequence[dg.AssetKey]]]
],
),
("dagster_runs", Optional[Sequence[dg.DagsterRun]]),
("event_log_entries", Optional[Sequence[dg.EventLogEntry]]),
("expected_run_requests", Optional[Sequence[dg.RunRequest]]),
(
"code_locations",
Optional[Mapping[str, Sequence[Union[dg.SourceAsset, dg.AssetsDefinition]]]],
),
("expected_evaluations", Optional[Sequence[AssetEvaluationSpec]]),
("requires_respect_materialization_data_versions", bool),
("supports_with_remote_asset_graph", bool),
("expected_error_message", Optional[str]),
],
)
):
def __new__(
cls,
unevaluated_runs: Sequence[RunSpec],
assets: Optional[Sequence[Union[dg.SourceAsset, dg.AssetsDefinition]]],
asset_checks: Optional[Sequence[dg.AssetChecksDefinition]] = None,
between_runs_delta: Optional[datetime.timedelta] = None,
evaluation_delta: Optional[datetime.timedelta] = None,
cursor_from: Optional["AssetReconciliationScenario"] = None,
current_time: Optional[datetime.datetime] = None,
asset_selection: Optional[dg.AssetSelection] = None,
active_backfill_targets: Optional[
Sequence[Union[Mapping[dg.AssetKey, PartitionsSubset], Sequence[dg.AssetKey]]]
] = None,
dagster_runs: Optional[Sequence[dg.DagsterRun]] = None,
event_log_entries: Optional[Sequence[dg.EventLogEntry]] = None,
expected_run_requests: Optional[Sequence[dg.RunRequest]] = None,
code_locations: Optional[
Mapping[str, Sequence[Union[dg.SourceAsset, dg.AssetsDefinition]]]
] = None,
expected_evaluations: Optional[Sequence[AssetEvaluationSpec]] = None,
requires_respect_materialization_data_versions: bool = False,
supports_with_remote_asset_graph: bool = True,
expected_error_message: Optional[str] = None,
) -> "AssetReconciliationScenario":
# For scenarios with no auto-materialize policies, we infer auto-materialize policies
# and add them to the assets.
assets_with_implicit_policies = assets
if assets and all(
(
isinstance(a, dg.AssetsDefinition)
and all(spec.auto_materialize_policy is None for spec in a.specs)
)
or isinstance(a, dg.SourceAsset)
for a in assets
):
asset_graph = AssetGraph.from_assets([*assets, *(asset_checks or [])])
auto_materialize_asset_keys = (
asset_selection.resolve(asset_graph)
if asset_selection is not None
else asset_graph.materializable_asset_keys
)
assets_with_implicit_policies = with_implicit_auto_materialize_policies(
assets, asset_graph, auto_materialize_asset_keys
)
return super().__new__(
cls,
unevaluated_runs=unevaluated_runs,
assets=assets_with_implicit_policies,
asset_checks=asset_checks,
between_runs_delta=between_runs_delta,
evaluation_delta=evaluation_delta,
cursor_from=cursor_from,
current_time=current_time,
asset_selection=asset_selection,
active_backfill_targets=active_backfill_targets,
dagster_runs=dagster_runs,
event_log_entries=event_log_entries,
expected_run_requests=expected_run_requests,
code_locations=code_locations,
expected_evaluations=expected_evaluations,
requires_respect_materialization_data_versions=requires_respect_materialization_data_versions,
supports_with_remote_asset_graph=supports_with_remote_asset_graph,
expected_error_message=expected_error_message,
)
def _get_code_location_origin(
self, scenario_name, location_name=None
) -> InProcessCodeLocationOrigin:
"""scenarios.py puts all the scenarios in its namespace under different 'hacky_daemon_repo_...' names."""
return InProcessCodeLocationOrigin(
loadable_target_origin=LoadableTargetOrigin(
executable_path=sys.executable,
module_name=(
"dagster_tests.declarative_automation_tests.legacy_tests.scenarios.scenarios"
),
working_directory=os.getcwd(),
attribute="hacky_daemon_repo_"
+ scenario_name
+ (f"_{location_name}" if location_name else ""),
),
location_name=location_name or "test_location",
)
def do_sensor_scenario(
self,
instance,
scenario_name=None,
with_remote_asset_graph=False,
respect_materialization_data_versions=False,
):
if (
self.requires_respect_materialization_data_versions
and not respect_materialization_data_versions
):
pytest.skip("requires respect_materialization_data_versions to be True")
assert not self.code_locations, "setting code_locations not supported for sensor tests"
test_time = self.current_time or get_current_datetime()
with freeze_time(test_time):
@dg.repository # pyright: ignore[reportArgumentType]
def repo():
return self.assets
# add any runs to the instance
for dagster_run in self.dagster_runs or []:
instance.add_run(dagster_run)
# make sure to log the planned events
for asset_key in dagster_run.asset_selection: # pyright: ignore[reportOptionalIterable]
event = dg.DagsterEvent(
event_type_value=DagsterEventType.ASSET_MATERIALIZATION_PLANNED.value,
job_name=dagster_run.job_name,
event_specific_data=AssetMaterializationPlannedData(
asset_key, partition=(dagster_run.tags or {}).get("dagster/partition")
),
)
instance.report_dagster_event(event, dagster_run.run_id, logging.DEBUG)
# add any events to the instance
for event_log_entry in self.event_log_entries or []:
instance.store_event(event_log_entry)
# add any backfills to the instance
for i, target in enumerate(self.active_backfill_targets or []):
if isinstance(target, Mapping):
target_subset = AssetGraphSubset(
partitions_subsets_by_asset_key=target,
non_partitioned_asset_keys=set(),
)
else:
target_subset = AssetGraphSubset(
partitions_subsets_by_asset_key={},
non_partitioned_asset_keys=target, # pyright: ignore[reportArgumentType]
)
empty_subset = AssetGraphSubset(
partitions_subsets_by_asset_key={},
non_partitioned_asset_keys=set(),
)
asset_backfill_data = AssetBackfillData(
latest_storage_id=0,
target_subset=target_subset,
requested_runs_for_target_roots=False,
materialized_subset=empty_subset,
requested_subset=empty_subset,
failed_and_downstream_subset=empty_subset,
backfill_start_time=TimestampWithTimezone(test_time.timestamp(), "UTC"),
)
backfill = PartitionBackfill(
backfill_id=f"backfill{i}",
status=BulkActionStatus.REQUESTED,
from_failure=False,
tags={},
backfill_timestamp=test_time.timestamp(),
serialized_asset_backfill_data=asset_backfill_data.serialize(
dynamic_partitions_store=instance, asset_graph=repo.asset_graph
),
)
instance.add_backfill(backfill)
if self.cursor_from is not None:
@dg.repository # pyright: ignore[reportArgumentType]
def prior_repo():
return self.cursor_from.assets # pyright: ignore[reportOptionalMemberAccess]
(
run_requests,
cursor,
evaluations,
) = self.cursor_from.do_sensor_scenario(
instance,
scenario_name=scenario_name,
with_remote_asset_graph=with_remote_asset_graph,
)
for run_request in run_requests:
asset_selection = check.not_none(run_request.asset_selection)
instance.create_run_for_job(
prior_repo.get_implicit_job_def_for_assets(asset_selection),
asset_selection=set(asset_selection),
tags=run_request.tags,
)
else:
cursor = AssetDaemonCursor.empty()
start = datetime.datetime.now()
def test_time_fn():
return (test_time + (datetime.datetime.now() - start)).timestamp()
for run in self.unevaluated_runs:
if self.between_runs_delta is not None:
test_time += self.between_runs_delta
with freeze_time(test_time), mock.patch("time.time", new=test_time_fn):
if run.is_observation:
observe(
instance=instance,
assets=[
a
for a in self.assets # pyright: ignore[reportOptionalIterable]
if isinstance(a, dg.SourceAsset) and a.key in run.asset_keys
],
)
else:
do_run(
asset_keys=run.asset_keys,
partition_key=run.partition_key,
all_assets=self.assets, # pyright: ignore[reportArgumentType]
instance=instance,
failed_asset_keys=run.failed_asset_keys,
)
if self.evaluation_delta is not None:
test_time += self.evaluation_delta
with freeze_time(test_time):
# get asset_graph
if not with_remote_asset_graph:
asset_graph = repo.asset_graph
else:
assert scenario_name is not None, "scenario_name must be provided for daemon runs"
with create_test_daemon_workspace_context(
workspace_load_target=InProcessTestWorkspaceLoadTarget(
self._get_code_location_origin(scenario_name)
),
instance=instance,
) as workspace_context:
workspace = workspace_context.create_request_context()
assert workspace.get_code_location_error("test_location") is None, (
workspace.get_code_location_error("test_location")
)
asset_graph = workspace.asset_graph
with mock.patch.object(
dg.DagsterInstance,
"auto_materialize_respect_materialization_data_versions",
new=lambda: self.respect_materialization_data_versions, # pyright: ignore[reportAttributeAccessIssue]
):
run_requests, cursor, evaluations = AutomationTickEvaluationContext(
evaluation_id=cursor.evaluation_id + 1,
asset_graph=asset_graph,
asset_selection=self.asset_selection or AssetSelection.all(),
instance=instance,
materialize_run_tags={},
observe_run_tags={},
cursor=cursor,
emit_backfills=False,
auto_observe_asset_keys={
key
for key in asset_graph.observable_asset_keys
if asset_graph.get(key).auto_observe_interval_minutes is not None
},
logger=logging.getLogger("dagster.amp"),
).evaluate()
for run_request in run_requests:
base_job = repo.get_implicit_job_def_for_assets(run_request.asset_selection) # pyright: ignore[reportArgumentType]
assert base_job is not None
return run_requests, cursor, evaluations
def do_daemon_scenario(
self,
instance,
scenario_name,
debug_crash_flags: Optional[SingleInstigatorDebugCrashFlags] = None,
):
assert bool(self.assets) != bool(self.code_locations), (
"Must specify either assets or code_locations"
)
assert not self.active_backfill_targets, (
"setting active_backfill_targets not supported for daemon tests"
)
test_time = self.current_time or get_current_datetime()
with freeze_time(test_time) if self.current_time else contextlib.nullcontext():
if self.cursor_from is not None:
self.cursor_from.do_daemon_scenario(
instance,
scenario_name=scenario_name,
)
start = datetime.datetime.now()
def test_time_fn():
return (test_time + (datetime.datetime.now() - start)).timestamp()
for run in self.unevaluated_runs:
if self.between_runs_delta is not None:
test_time += self.between_runs_delta
with freeze_time(test_time), mock.patch("time.time", new=test_time_fn):
assert not run.is_observation, "Observations not supported for daemon tests"
if self.assets:
do_run(
asset_keys=run.asset_keys,
partition_key=run.partition_key,
all_assets=self.assets,
instance=instance,
failed_asset_keys=run.failed_asset_keys,
)
else:
all_assets = [
a for assets in check.not_none(self.code_locations).values() for a in assets
]
do_run(
asset_keys=run.asset_keys,
partition_key=run.partition_key,
all_assets=all_assets, # This isn't quite right, it should be filtered to just the assets for the location
instance=instance,
failed_asset_keys=run.failed_asset_keys,
)
if self.evaluation_delta is not None:
test_time += self.evaluation_delta
with freeze_time(test_time):
assert scenario_name is not None, "scenario_name must be provided for daemon runs"
if self.code_locations:
target = InProcessTestWorkspaceLoadTarget(
[
self._get_code_location_origin(scenario_name, location_name)
for location_name in self.code_locations.keys()
]
)
else:
target = InProcessTestWorkspaceLoadTarget(
self._get_code_location_origin(scenario_name)
)
with create_test_daemon_workspace_context(
workspace_load_target=target,
instance=instance,
) as workspace_context:
workspace = workspace_context.create_request_context()
assert workspace.get_code_location_error("test_location") is None, (
workspace.get_code_location_error("test_location")
)
try:
AssetDaemon( # noqa: SLF001
settings=instance.get_auto_materialize_settings(),
pre_sensor_interval_seconds=42,
)._run_iteration_impl(
workspace_context,
threadpool_executor=None,
amp_tick_futures={},
debug_crash_flags=(debug_crash_flags or {}),
)
if self.expected_error_message:
raise Exception(
f"Failed to raise expected error {self.expected_error_message}"
)
except Exception:
if not self.expected_error_message:
raise
assert self.expected_error_message in str(sys.exc_info())
def do_run(
asset_keys: Sequence[dg.AssetKey],
partition_key: Optional[str],
all_assets: Sequence[Union[dg.SourceAsset, dg.AssetsDefinition]],
instance: DagsterInstance,
failed_asset_keys: Optional[Sequence[dg.AssetKey]] = None,
tags: Optional[Mapping[str, str]] = None,
) -> None:
assets_in_run: list[Union[dg.SourceAsset, dg.AssetsDefinition]] = []
asset_keys_set = set(asset_keys)
for a in all_assets:
if isinstance(a, dg.SourceAsset):
assets_in_run.append(a)
else:
selected_keys = asset_keys_set.intersection(a.keys)
if selected_keys == a.keys:
assets_in_run.append(a)
elif not selected_keys:
assets_in_run.extend(a.to_source_assets())
else:
assets_in_run.append(a.subset_for(asset_keys_set, selected_asset_check_keys=None))
assets_in_run.extend(
a.subset_for(
a.keys - selected_keys, selected_asset_check_keys=None
).to_source_assets()
)
dg.materialize_to_memory(
instance=instance,
partition_key=partition_key,
assets=assets_in_run,
run_config={
"ops": {
failed_asset_key.path[-1]: {"config": {"fail": True}}
for failed_asset_key in (failed_asset_keys or [])
}
},
raise_on_error=False,
tags=tags,
)
def single_asset_run(asset_key: str, partition_key: Optional[str] = None) -> RunSpec:
return RunSpec(asset_keys=[AssetKey.from_coercible(asset_key)], partition_key=partition_key)
def run(
asset_keys: Iterable[str],
partition_key: Optional[str] = None,
failed_asset_keys: Optional[Iterable[str]] = None,
is_observation: bool = False,
):
return RunSpec(
asset_keys=list(
map(AssetKey.from_coercible, itertools.chain(asset_keys, failed_asset_keys or []))
),
failed_asset_keys=list(map(AssetKey.from_coercible, failed_asset_keys or [])),
partition_key=partition_key,
is_observation=is_observation,
)
FAIL_TAG = "test/fail"
def run_request(
asset_keys: Union[dg.AssetKey, Sequence[CoercibleToAssetKey]],
partition_key: Optional[str] = None,
fail_keys: Optional[Sequence[str]] = None,
tags: Optional[Mapping[str, str]] = None,
) -> dg.RunRequest:
if isinstance(asset_keys, dg.AssetKey):
asset_selection = [asset_keys]
else:
asset_selection = [AssetKey.from_coercible(key) for key in asset_keys]
return dg.RunRequest(
asset_selection=asset_selection,
partition_key=partition_key,
tags={**(tags or {}), **({FAIL_TAG: json.dumps(fail_keys)} if fail_keys else {})},
)
def asset_def(
key: str,
deps: Optional[Union[list[str], Mapping[str, Optional[dg.PartitionMapping]]]] = None,
partitions_def: Optional[dg.PartitionsDefinition] = None,
legacy_freshness_policy: Optional[dg.LegacyFreshnessPolicy] = None,
auto_materialize_policy: Optional[dg.AutoMaterializePolicy] = None,
code_version: Optional[str] = None,
config_schema: Optional[Mapping[str, dg.Field]] = None,
**asset_def_kwargs,
) -> dg.AssetsDefinition:
if deps is None:
non_argument_deps = None
ins = None
elif isinstance(deps, list):
non_argument_deps = deps
ins = None
else:
non_argument_deps = None
ins = {
dep: dg.AssetIn(partition_mapping=partition_mapping, dagster_type=Nothing)
for dep, partition_mapping in deps.items()
}
@dg.asset(
name=key,
partitions_def=partitions_def,
deps=non_argument_deps,
ins=ins,
config_schema=config_schema or {"fail": dg.Field(bool, default_value=False)},
legacy_freshness_policy=legacy_freshness_policy,
auto_materialize_policy=auto_materialize_policy,
code_version=code_version,
**asset_def_kwargs,
)
def _asset(context, **kwargs):
del kwargs
if context.op_execution_context.op_config["fail"]:
raise ValueError("")
return _asset # type: ignore
def multi_asset_def(
keys: list[str],
deps: Optional[Union[list[str], Mapping[str, set[str]]]] = None,
can_subset: bool = False,
legacy_freshness_policies: Optional[Mapping[str, dg.LegacyFreshnessPolicy]] = None,
) -> dg.AssetsDefinition:
if deps is None:
non_argument_deps = None
internal_asset_deps = None
elif isinstance(deps, list):
non_argument_deps = deps
internal_asset_deps = None
else:
non_argument_deps = list(set().union(*deps.values()) - set(deps.keys()))
internal_asset_deps = {k: {dg.AssetKey(vv) for vv in v} for k, v in deps.items()}
@dg.multi_asset(
outs={
key: dg.AssetOut(
is_required=not can_subset,
legacy_freshness_policy=legacy_freshness_policies.get(key)
if legacy_freshness_policies
else None,
)
for key in keys
},
name="_".join(keys),
deps=non_argument_deps,
internal_asset_deps=internal_asset_deps,
can_subset=can_subset,
)
def _assets(context):
for output in keys:
if output in context.op_execution_context.selected_output_names:
yield dg.Output(output, output)
return _assets
def observable_source_asset_def(
key: str, partitions_def: Optional[dg.PartitionsDefinition] = None, minutes_to_change: int = 0
):
def _data_version() -> dg.DataVersion:
return (
dg.DataVersion(str(get_current_datetime().minute // minutes_to_change))
if minutes_to_change
else dg.DataVersion(str(random.random()))
)
@dg.observable_source_asset(name=key, partitions_def=partitions_def)
def _observable():
if partitions_def is None:
return _data_version()
else:
return dg.DataVersionsByPartition(
{partition: _data_version() for partition in partitions_def.get_partition_keys()}
)
return _observable
def with_auto_materialize_policy(
assets_defs: Sequence[dg.AssetsDefinition], auto_materialize_policy: AutoMaterializePolicy
) -> Sequence[dg.AssetsDefinition]:
"""Note: this should be implemented in core dagster at some point, and this implementation is
a lazy hack.
"""
ret = []
for assets_def in assets_defs:
ret.append(
assets_def.with_attributes(
automation_condition=auto_materialize_policy.to_automation_condition()
)
)
return ret
def get_implicit_auto_materialize_policy(
asset_key: AssetKey, asset_graph: BaseAssetGraph
) -> Optional[dg.AutoMaterializePolicy]:
"""For backcompat with pre-auto materialize policy graphs, assume a default scope of 1 day."""
auto_materialize_policy = asset_graph.get(asset_key).auto_materialize_policy
if auto_materialize_policy is None:
time_partitions_def = get_time_partitions_def(asset_graph.get(asset_key).partitions_def)
if time_partitions_def is None:
max_materializations_per_minute = None
elif time_partitions_def.schedule_type == ScheduleType.HOURLY:
max_materializations_per_minute = 24
else:
max_materializations_per_minute = 1
rules = {
AutoMaterializeRule.materialize_on_missing(),
AutoMaterializeRule.materialize_on_required_for_freshness(),
AutoMaterializeRule.skip_on_parent_outdated(),
AutoMaterializeRule.skip_on_parent_missing(),
AutoMaterializeRule.skip_on_required_but_nonexistent_parents(),
AutoMaterializeRule.skip_on_backfill_in_progress(),
}
if not bool(asset_graph.get_downstream_legacy_freshness_policies(asset_key=asset_key)):
rules.add(AutoMaterializeRule.materialize_on_parent_updated())
return dg.AutoMaterializePolicy(
rules=rules,
max_materializations_per_minute=max_materializations_per_minute,
)
return auto_materialize_policy
def with_implicit_auto_materialize_policies(
assets_defs: Sequence[Union[dg.SourceAsset, dg.AssetsDefinition]],
asset_graph: BaseAssetGraph,
targeted_assets: Optional[AbstractSet[dg.AssetKey]] = None,
) -> Sequence[dg.AssetsDefinition]:
"""Accepts a list of assets, adding implied auto-materialize policies to targeted assets
if policies do not exist.
"""
ret = []
for assets_def in assets_defs:
if (
isinstance(assets_def, dg.AssetsDefinition)
and not assets_def.auto_materialize_policies_by_key
):
targeted_keys = (
assets_def.keys & targeted_assets if targeted_assets else assets_def.keys
)
automation_conditions_by_key = {}
for key in targeted_keys:
policy = get_implicit_auto_materialize_policy(key, asset_graph)
if policy:
automation_conditions_by_key[key] = policy.to_automation_condition()
ret.append(
assets_def.with_attributes(automation_condition=automation_conditions_by_key)
)
else:
ret.append(assets_def)
return ret
|
AssetReconciliationScenario
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1388406,
"end": 1392125
}
|
class ____(sgqlc.types.Type, Node):
"""Represents a Git reference."""
__schema__ = github_schema
__field_names__ = (
"associated_pull_requests",
"branch_protection_rule",
"compare",
"name",
"prefix",
"ref_update_rule",
"repository",
"target",
)
associated_pull_requests = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestConnection),
graphql_name="associatedPullRequests",
args=sgqlc.types.ArgDict(
(
(
"states",
sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)), graphql_name="states", default=None),
),
("labels", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None)),
("head_ref_name", sgqlc.types.Arg(String, graphql_name="headRefName", default=None)),
("base_ref_name", sgqlc.types.Arg(String, graphql_name="baseRefName", default=None)),
("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of pull requests with this ref as the head ref.
Arguments:
* `states` (`[PullRequestState!]`): A list of states to filter the
pull requests by.
* `labels` (`[String!]`): A list of label names to filter the pull
requests by.
* `head_ref_name` (`String`): The head ref name to filter the pull
requests by.
* `base_ref_name` (`String`): The base ref name to filter the pull
requests by.
* `order_by` (`IssueOrder`): Ordering options for pull requests
returned from the connection.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
branch_protection_rule = sgqlc.types.Field(BranchProtectionRule, graphql_name="branchProtectionRule")
"""Branch protection rules for this ref"""
compare = sgqlc.types.Field(
Comparison,
graphql_name="compare",
args=sgqlc.types.ArgDict((("head_ref", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="headRef", default=None)),)),
)
"""Compares the current ref as a base ref to another head ref, if the
comparison can be made.
Arguments:
* `head_ref` (`String!`): The head ref to compare against.
"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The ref name."""
prefix = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="prefix")
"""The ref's prefix, such as `refs/heads/` or `refs/tags/`."""
ref_update_rule = sgqlc.types.Field(RefUpdateRule, graphql_name="refUpdateRule")
"""Branch protection rules that are viewable by non-admins"""
repository = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="repository")
"""The repository the ref belongs to."""
target = sgqlc.types.Field(GitObject, graphql_name="target")
"""The object the ref points to. Returns null when object does not
exist.
"""
|
Ref
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/op_def_library_test.py
|
{
"start": 1544,
"end": 55117
}
|
class ____(test_util.TensorFlowTestCase):
def Tensor(self, t, name="in"):
return op_def_library.apply_op("OutT", T=t, name=name)
def testNoRegisteredOpFails(self):
with self.assertRaises(RuntimeError) as cm:
op_def_library.apply_op("unknown")
self.assertEqual(str(cm.exception), "Unrecognized Op name unknown")
def testSimple(self):
with ops.Graph().as_default():
out = op_def_library.apply_op("Simple", a=3)
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'Simple' op: 'Simple' input: 'Simple/a'
""", out.op.node_def)
out = op_def_library.apply_op("Simple", a=4)
self.assertProtoEquals("""
name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'
""", out.op.node_def)
out = op_def_library.apply_op("Simple", a=5, name="named")
self.assertProtoEquals("""
name: 'named' op: 'Simple' input: 'named/a'
""", out.op.node_def)
out = op_def_library.apply_op(
"Simple", a=[[1, 2, 3], [4, 5, 6]], name="two_d")
self.assertProtoEquals("""
name: 'two_d' op: 'Simple' input: 'two_d/a'
""", out.op.node_def)
def testSimpleFailures(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Simple", a="Bad string")
self.assertIn(
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got 'Bad string' of type 'str' instead.", str(cm.exception))
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Simple", a=self.Tensor(dtypes.string))
self.assertIn(
"Input 'a' of 'Simple' Op has type string "
"that does not match expected type of int32.", str(cm.exception))
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Simple", a=6, extra="bogus")
self.assertIn("Simple got unexpected keyword arguments: extra",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"Simple", a=6, extra1="bogus", extra2="also_bogus")
self.assertIn(
"Simple got unexpected keyword arguments: extra1, "
"extra2", str(cm.exception))
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Simple")
self.assertIn("No argument for input a", str(cm.exception))
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Simple", wrong=7)
self.assertIn("No argument for input a", str(cm.exception))
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Simple", a={"label": 1})
self.assertIn(
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got {'label': 1} of type 'dict' instead.", str(cm.exception))
def testReservedInput(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("ReservedInput", input_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedInput' input: 'x/input'
""", op.node_def)
def testPolymorphic(self):
with ops.Graph().as_default():
out = op_def_library.apply_op("Polymorphic", a=7, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'Polymorphic' input: 'p/a'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = op_def_library.apply_op("Polymorphic", a="s", name="q")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'Polymorphic' input: 'q/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = op_def_library.apply_op("Polymorphic", a=["s", "t", "u"], name="r")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'r' op: 'Polymorphic' input: 'r/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Polymorphic", a="s", T=dtypes.string)
self.assertEqual(
str(cm.exception),
"Should not specify value for inferred attr 'T' for "
"Polymorphic.")
def testPolymorphicOut(self):
with ops.Graph().as_default():
out = op_def_library.apply_op("PolymorphicOut", T=dtypes.int32, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = op_def_library.apply_op("PolymorphicOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("PolymorphicOut")
self.assertEqual(
str(cm.exception), "No argument found for attr T for PolymorphicOut")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("PolymorphicOut", T=None)
self.assertEqual(str(cm.exception),
"Expected DataType for argument 'T' not None.")
def testPolymorphicDefaultOut(self):
with ops.Graph().as_default():
out = op_def_library.apply_op("PolymorphicDefaultOut", T=None, name="p")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = op_def_library.apply_op(
"PolymorphicDefaultOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
def testBinary(self):
with ops.Graph().as_default():
out = op_def_library.apply_op("Binary", a=8, b=9, name="b")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = op_def_library.apply_op("Binary", a="left", b="right", name="c")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError):
op_def_library.apply_op("Binary", a="left", b=12)
with self.assertRaises(TypeError):
op_def_library.apply_op(
"Binary", a=self.Tensor(dtypes.string), b=self.Tensor(dtypes.int32))
def testRestrict(self):
with ops.Graph().as_default():
out = op_def_library.apply_op("Restrict", a="foo", name="g")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'g' op: 'Restrict' input: 'g/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = op_def_library.apply_op("Restrict", a=True, name="h")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'h' op: 'Restrict' input: 'h/a'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Restrict", a=17)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testTypeList(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("TypeList", a=["foo"], name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeList' input: 'z/a_0'
attr { key: 'T' value { list { type: DT_STRING } } }
""", op.node_def)
op = op_def_library.apply_op("TypeList", a=[True, 12], name="y")
self.assertProtoEquals("""
name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'
attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }
""", op.node_def)
op = op_def_library.apply_op("TypeList", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("TypeList", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' "
"argument to 'TypeList' Op, not ")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("TypeList", a=[self.Tensor(dtypes.int32), None])
self.assertStartsWith(str(cm.exception),
"Tensors in list passed to 'a' of 'TypeList' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>]")
def testTypeListTwice(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"TypeListTwice", a=["foo", True], b=["bar", False], name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeListTwice'
input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
op = op_def_library.apply_op("TypeListTwice", a=[], b=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("TypeListTwice", a=["foo", True], b=["bar", 6])
self.assertEqual(str(cm.exception),
"Input 'b' of 'TypeListTwice' Op has type list of "
"string, int32 that does not match type list "
"string, bool of argument 'a'.")
def testOutTypeList(self):
with ops.Graph().as_default():
out, = op_def_library.apply_op(
"OutTypeList", T=[dtypes.float32], name="x")
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'x' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_FLOAT } } }
""", out.op.node_def)
out1, out2 = op_def_library.apply_op(
"OutTypeList", T=[dtypes.int32, dtypes.bool], name="w")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'w' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }
""", out1.op.node_def)
out = op_def_library.apply_op("OutTypeList", T=[], name="empty")
self.assertEqual([], out)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("OutTypeList", T=dtypes.int32)
self.assertEqual(
str(cm.exception), "Expected list for attr T, obtained "
"DType instead.")
def testTypeListRestrict(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"TypeListRestrict", a=["foo", False], name="v")
self.assertProtoEquals("""
name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("TypeListRestrict", a=[True, 12])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testOutTypeListRestrict(self):
with ops.Graph().as_default():
out1, out2 = op_def_library.apply_op(
"OutTypeListRestrict", t=[dtypes.bool, dtypes.string], name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertProtoEquals("""
name: 'u' op: 'OutTypeListRestrict'
attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"OutTypeListRestrict", t=[dtypes.string, dtypes.int32])
self.assertEqual(str(cm.exception),
"Value passed to parameter 't' has DataType int32 "
"not in list of allowed values: string, bool")
def testAttr(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("Attr", a=12, name="t")
self.assertProtoEquals("""
name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }
""", op.node_def)
op = op_def_library.apply_op(
"Attr", a=tensor_shape.Dimension(13), name="u")
self.assertProtoEquals("""
name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Attr", a="bad")
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not 'bad'.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Attr", a=[12])
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not [12].")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Attr", a=None)
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not None.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("Attr")
self.assertEqual(
str(cm.exception), "No argument found for attr a for "
"Attr")
def testAttrFloat(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrFloat", a=1.2, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }
""", op.node_def)
op = op_def_library.apply_op("AttrFloat", a=12, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("AttrFloat", a="bad")
self.assertEqual(str(cm.exception),
"Expected float for argument 'a' not 'bad'.")
def testAttrFunc(self):
with ops.Graph().as_default():
@function.Defun(dtypes.float32, func_name="MyFn")
def fn(x):
return 2 + x
op = op_def_library.apply_op("FuncAttr", f=fn, name="t")
self.assertProtoEquals("""
name: 't' op: 'FuncAttr' attr { key: 'f'
value { func { name: 'MyFn' } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("FuncAttr", f=3)
self.assertEqual(str(cm.exception),
"Don't know how to convert 3 to a func for argument f")
def testAttrFuncWithFuncWithAttrs(self):
with ops.Graph().as_default():
@def_function.function(
input_signature=(tensor.TensorSpec(None, dtypes.float32),),
autograph=False,
experimental_attributes={"_implements": 15})
def fn(x):
return 2 + x
concrete_fn = fn.get_concrete_function()
op = op_def_library.apply_op("FuncAttr", f=concrete_fn, name="t")
self.assertEqual(15, op.node_def.attr["f"].func.attr["_implements"].i)
self.assertEqual(
compat.as_str(concrete_fn.name), op.node_def.attr["f"].func.name
)
def testAttrFuncList(self):
with ops.Graph().as_default():
@function.Defun(dtypes.float32, func_name="MyFn")
def fn1(x):
return 2 + x
@function.Defun(dtypes.int32, dtypes.float32, func_name="MyFn2")
def fn2(x, y):
return 2 + x, y * 3
@function.Defun(dtypes.int32, func_name="MyFn3")
def fn3(y):
return 2 + y
op = op_def_library.apply_op("FuncListAttr", f=[fn1, fn2, fn3], name="t")
self.assertProtoEquals("""
name: 't' op: 'FuncListAttr'
attr { key: 'f' value { list { func { name: 'MyFn' }
func { name: 'MyFn2' }
func { name: 'MyFn3' } } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("FuncListAttr", f=[fn1, 3, fn2])
self.assertEqual(str(cm.exception),
"Don't know how to convert 3 to a func for argument f")
def testAttrBool(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrBool", a=True, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }
""", op.node_def)
op = op_def_library.apply_op("AttrBool", a=False, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("AttrBool", a=0)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("AttrBool", a=1)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 1.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("AttrBool", a=[])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not [].")
def testAttrBoolList(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"AttrBoolList", a=[True, False, True], name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBoolList'
attr { key: 'a' value { list { b: true b: false b:true } } }
""", op.node_def)
op = op_def_library.apply_op("AttrBoolList", a=[], name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("AttrBoolList", a=[0])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
def testAttrMin(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrMin", a=12, name="s")
self.assertProtoEquals("""
name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("AttrMin", a=2)
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.")
def testAttrListMin(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrListMin", a=[1, 2], name="r")
self.assertProtoEquals("""
name: 'r' op: 'AttrListMin'
attr { key: 'a' value { list { i: 1 i: 2 } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("AttrListMin", a=[17])
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrListMin' Op "
"passed list of length 1 less than minimum 2.")
def testAttrEnum(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrEnum", a="oranges", name="e")
self.assertProtoEquals("""
name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("AttrEnum", a="invalid")
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnum\' Op '
'passed string \'invalid\' not in: '
'"apples", "oranges".')
def testAttrEnumList(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"AttrEnumList", a=["oranges", "apples"], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrEnumList'
attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op(
"AttrEnumList", a=["apples", "invalid", "oranges"])
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnumList\' Op '
'passed string \'invalid\' not '
'in: "apples", "oranges".')
def testAttrShape(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = op_def_library.apply_op("AttrShape", a=(4, 3, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = op_def_library.apply_op(
"AttrShape", a=tensor_shape.TensorShape([3, 2]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = op_def_library.apply_op("AttrShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = 6
shape.dim.add().size = 3
op = op_def_library.apply_op("AttrShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }
""", op.node_def)
# TODO(josh11b): Re-enable this test once we stop promoting scalars to
# shapes.
# with self.assertRaises(TypeError) as cm:
# op_def_library.apply_op("AttrShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for"
# " argument 'a'")
with self.assertRaises(TypeError):
op_def_library.apply_op("AttrShape", a="ABC")
def testAttrShapeList(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"AttrShapeList", a=[[3, 2], [6, 5, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }
""", op.node_def)
op = op_def_library.apply_op("AttrShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrPartialShape(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrPartialShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrPartialShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = op_def_library.apply_op(
"AttrPartialShape", a=(4, None, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }
""", op.node_def)
op = op_def_library.apply_op(
"AttrPartialShape", a=tensor_shape.TensorShape([3, None]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: -1 } } } }
""", op.node_def)
op = op_def_library.apply_op("AttrPartialShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrPartialShape'
attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = -1
shape.dim.add().size = 3
op = op_def_library.apply_op("AttrPartialShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: -1 } dim { size: 3 } } } }
""", op.node_def)
# TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# op_def_library.apply_op("AttrPartialShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for"
# " argument 'a'")
with self.assertRaises(TypeError):
op_def_library.apply_op("AttrPartialShape", a="ABC")
def testAttrPartialShapeList(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"AttrPartialShapeList", a=[[3, 2], [6, None, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrPartialShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }
""", op.node_def)
op = op_def_library.apply_op("AttrPartialShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrPartialShapeList' attr {
key: 'a' value { list { } } }
""", op.node_def)
def testAttrDefault(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrDefault", a=None, name="d")
self.assertProtoEquals("""
name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }
""", op.node_def)
op = op_def_library.apply_op("AttrDefault", a="kiwi", name="c")
self.assertProtoEquals("""
name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }
""", op.node_def)
def testAttrListDefault(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 5 i: 15 } } }
""", op.node_def)
op = op_def_library.apply_op("AttrListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 3 } } }
""", op.node_def)
op = op_def_library.apply_op("AttrListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrEmptyListDefault(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("AttrEmptyListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
op = op_def_library.apply_op("AttrEmptyListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { f: 3 } } }
""", op.node_def)
op = op_def_library.apply_op("AttrEmptyListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testReservedAttr(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("ReservedAttr", range_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }
""", op.node_def)
def testDefaultAttrType(self):
with ops.Graph().as_default():
# Give an input whose type has no obvious output type.
op = op_def_library.apply_op("AttrTypeDefault", a=[], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrTypeDefault' input: 'n/a'
attr { key: 'T' value { type: DT_INT32 } }
""", op.node_def)
# Give an input whose type can be inferred as different
# than the default.
op = op_def_library.apply_op("AttrTypeDefault", a=[1.0], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrTypeDefault' input: 'f/a'
attr { key: 'T' value { type: DT_FLOAT } }
""", op.node_def)
def testDefaultListAttrType(self):
with ops.Graph().as_default():
# Give an input whose type can be inferred as different
# than the default.
op = op_def_library.apply_op(
"AttrListTypeDefault", a=[1.0], b=[2.0], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
def testNIntsIn(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("NIntsIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op("NIntsIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NIntsIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'N' value { i: 5 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NIntsIn", a=["foo", "bar"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[string, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"NIntsIn",
a=[self.Tensor(dtypes.string),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have "
"types [string, string] that do not match expected type "
"int32.")
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("NIntsIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NIntsIn' Op "
"with length 1 shorter than "
"minimum length 2.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NIntsIn", a=[38, "bar"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[int32, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"NIntsIn",
a=[self.Tensor(dtypes.int32),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op "
"have types [int32, string] that do not match expected "
"type int32.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NIntsIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NIntsIn' Op, not ")
def testNPolymorphicIn(self):
with ops.Graph().as_default():
op = op_def_library.apply_op("NPolymorphicIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op(
"NPolymorphicIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 5 } }
""", op.node_def)
op = op_def_library.apply_op("NPolymorphicIn", a=["foo", "bar"], name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op(
"NPolymorphicIn",
a=[1, self.Tensor(dtypes.float32, name="x")],
name="q")
self.assertProtoEquals("""
name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op(
"NPolymorphicIn",
a=[
self.Tensor(dtypes.float32, name="y"),
self.Tensor(dtypes.float32_ref, name="z")
],
name="r")
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("NPolymorphicIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NPolymorphicIn' Op with length 1 "
"shorter than minimum length 2.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NPolymorphicIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"NPolymorphicIn", a=[38, self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NPolymorphicIn", a=[38, None])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that "
"don't all match.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"NPolymorphicIn", a=["abcd", self.Tensor(dtypes.int32)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [string, int32] that don't all match.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NPolymorphicIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NPolymorphicIn' Op, not ")
def testNPolymorphicRestrictIn(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"NPolymorphicRestrictIn", a=["foo", "bar"], name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op(
"NPolymorphicRestrictIn", a=[False, True, False], name="b")
self.assertProtoEquals("""
name: 'b' op: 'NPolymorphicRestrictIn'
input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NPolymorphicRestrictIn", a=[1, 2])
self.assertEqual(
str(cm.exception),
"Value passed to parameter 'a' has DataType int32 not in "
"list of allowed values: string, bool")
def testNInTwice(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"NInTwice", a=[1, 2], b=["one", "two"], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op("NInTwice", a=[], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("NInTwice", a=[1, 2, 3], b=["too short"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwice' Op "
"with length 1 must match "
"length 3 of argument 'a'.")
def testNInPolymorphicTwice(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"NInPolymorphicTwice", a=[1, 2], b=[3, 4], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInPolymorphicTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("NInPolymorphicTwice", a=[1, 2, 3], b=[5])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInPolymorphicTwice' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"NInPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'NInPolymorphicTwice' "
"Op have types [string, string] that do not match type "
"int32 inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"NInPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of "
"'NInPolymorphicTwice' Op have types [string] that do "
"not match type int32 inferred from earlier arguments.")
def testNInTwoTypeVariables(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"NInTwoTypeVariables", a=[1, 2], b=[True, False], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwoTypeVariables'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op(
"NInTwoTypeVariables", a=[1, 2], b=[3, 4], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwoTypeVariables'
input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op(
"NInTwoTypeVariables",
a=[self.Tensor(dtypes.int32, name="q")],
b=[self.Tensor(dtypes.string, name="r")],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("NInTwoTypeVariables", a=[1, 2, 3], b=["5"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwoTypeVariables' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
def testInPolymorphicTwice(self):
with ops.Graph().as_default():
op = op_def_library.apply_op(
"InPolymorphicTwice", a=[8], b=[3, 4, 5], name="n")
self.assertProtoEquals("""
name: 'n' op: 'InPolymorphicTwice'
input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 3 } }
""", op.node_def)
op = op_def_library.apply_op("InPolymorphicTwice", a=[8], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 0 } }
""", op.node_def)
op = op_def_library.apply_op(
"InPolymorphicTwice", a=[], b=[3, 4], name="p")
self.assertProtoEquals("""
name: 'p' op: 'InPolymorphicTwice' input: 'p/b_0' input: 'p/b_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 0 } }
attr { key: 'M' value { i: 2 } }
""", op.node_def)
op = op_def_library.apply_op(
"InPolymorphicTwice", a=[], b=[3.0, 4.0], name="q")
self.assertProtoEquals("""
name: 'q' op: 'InPolymorphicTwice' input: 'q/b_0' input: 'q/b_1'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 0 } }
attr { key: 'M' value { i: 2 } }
""", op.node_def)
# Empty input lists: assume default type for T.
op = op_def_library.apply_op(
"InPolymorphicTwice", a=[], b=[], name="r")
self.assertProtoEquals("""
name: 'r' op: 'InPolymorphicTwice'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 0 } }
attr { key: 'M' value { i: 0 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"InPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op "
"have types [string, string] that do not match type int32 "
"inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op(
"InPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' "
"Op have types [string] that do not match type int32 "
"inferred from earlier arguments.")
def testNIntsOut(self):
with ops.Graph().as_default():
out1, out2 = op_def_library.apply_op("NIntsOut", N=2, name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3, out4, out5 = op_def_library.apply_op(
"NIntsOut", N=5, name="o")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertEqual(dtypes.int32, out4.dtype)
self.assertEqual(dtypes.int32, out5.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }
""", out5.op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("NIntsOut", N=1)
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NIntsOut", N=[3])
self.assertEqual(str(cm.exception),
"Expected int for argument 'N' not [3].")
def testNIntsOutDefault(self):
with ops.Graph().as_default():
out1, out2, out3 = op_def_library.apply_op(
"NIntsOutDefault", N=None, name="z")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = op_def_library.apply_op("NIntsOutDefault", N=2, name="y")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }
""", out2.op.node_def)
def testNPolymorphicOut(self):
with ops.Graph().as_default():
out1, out2 = op_def_library.apply_op(
"NPolymorphicOut", N=2, T=dtypes.int32, name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = op_def_library.apply_op(
"NPolymorphicOut", T=dtypes.string, N=3, name="o")
self.assertEqual(dtypes.string, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertEqual(dtypes.string, out3.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 3 } }
""", out3.op.node_def)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("NPolymorphicOut", N=1, T=dtypes.string)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NPolymorphicOut' Op "
"passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NPolymorphicOut", N=3, T=[dtypes.string])
self.assertEqual(
str(cm.exception),
"Expected DataType for argument 'T' not [tf.string].")
def testNPolymorphicOutDefault(self):
with ops.Graph().as_default():
out1, out2 = op_def_library.apply_op(
"NPolymorphicOutDefault", N=None, T=None, name="r")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = op_def_library.apply_op(
"NPolymorphicOutDefault", N=3, T=None, name="s")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 's' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = op_def_library.apply_op(
"NPolymorphicOutDefault", N=None, T=dtypes.int32, name="t")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 't' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = op_def_library.apply_op(
"NPolymorphicOutDefault", N=3, T=dtypes.int32, name="u")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
def testNPolymorphicRestrictOut(self):
with ops.Graph().as_default():
out1, out2, out3 = op_def_library.apply_op(
"NPolymorphicRestrictOut", N=3, T=dtypes.bool, name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicRestrictOut'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("NPolymorphicRestrictOut", N=2, T=dtypes.int32)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'T' has DataType int32 "
"not in list of allowed values: string, bool")
def testRef(self):
with ops.Graph().as_default():
out = op_def_library.apply_op("RefOut", T=dtypes.bool, name="o")
self.assertEqual(dtypes.bool_ref, out.dtype)
self.assertProtoEquals("""
name: 'o' op: 'RefOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
op = op_def_library.apply_op("RefIn", a=out, name="i")
self.assertProtoEquals("""
name: 'i' op: 'RefIn' input: 'o'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: "_class" value { list { s: "loc:@o" } } }
""", op.node_def)
# Can pass ref to non-ref input.
out = op_def_library.apply_op("RefOut", T=dtypes.int32, name="r")
out = op_def_library.apply_op("Simple", a=out, name="s")
self.assertProtoEquals("""
name: 's' op: 'Simple' input: 'r'
""", out.op.node_def)
# Can't pass non-ref to ref input.
with self.assertRaises(TypeError) as cm:
op_def_library.apply_op("RefIn", a=2)
self.assertEqual(
str(cm.exception),
"'RefIn' Op requires that input 'a' be a mutable tensor " +
"(e.g.: a tf.Variable)")
input_a = op_def_library.apply_op("RefOut", T=dtypes.int32, name="t")
input_b = op_def_library.apply_op("RefOut", T=dtypes.int32, name="u")
op = op_def_library.apply_op("TwoRefsIn", a=input_a, b=input_b, name="v")
# NOTE(mrry): The order of colocation constraints is an implementation
# detail.
self.assertProtoEquals("""
name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: "_class" value { list { s: "loc:@t" s: "loc:@u" } } }
""", op.node_def)
def testSpecifyDevice(self):
graph = ops.Graph()
with graph.as_default():
with graph.device("/job:ADevice"):
op_def_library.apply_op("Simple", a=3)
# We look at the whole graph here to make sure the Const op is also given
# the specified device.
graph_def = graph.as_graph_def()
self.assertEqual(len(graph_def.node), 2)
for node in graph_def.node:
self.assertDeviceEqual(node.device, "/job:ADevice")
def testStructuredOutputSingleList(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
a = op_def_library.apply_op("SimpleStruct", n_a=n_a)
self.assertIsInstance(a, list)
self.assertEqual(n_a, len(a))
def testStructuredOutputListAndSingle(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
a, b = op_def_library.apply_op("MixedStruct", n_a=n_a)
self.assertIsInstance(a, list)
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertIsInstance(b, tensor.Tensor)
self.assertEqual(dtypes.float32, b.dtype)
def testStructuredOutputMultipleLists(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
for n_b in [0, 1, 3]:
for t_c in [[],
[dtypes.int32],
[dtypes.int32, dtypes.float32]]:
a, b, c = op_def_library.apply_op(
"ComplexStruct", n_a=n_a, n_b=n_b, t_c=t_c)
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertEqual(n_b, len(b))
self.assertTrue(all(x.dtype == dtypes.int64 for x in b))
self.assertEqual(t_c, [x.dtype for x in c])
@test_util.add_graph_building_optimization_tests
|
OpDefLibraryTest
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/organization_spans_fields.py
|
{
"start": 7189,
"end": 8637
}
|
class ____(ABC):
PROJECT_SLUG_KEYS = {"project", "project.name"}
PROJECT_ID_KEYS = {"project.id"}
def __init__(
self,
organization: Organization,
snuba_params: SnubaParams,
key: str,
query: str | None,
max_span_tag_values: int,
):
self.organization = organization
self.snuba_params = snuba_params
self.key = key
self.query = query or ""
self.max_span_tag_values = max_span_tag_values
@abstractmethod
def execute(self) -> list[TagValue]:
raise NotImplementedError
def project_id_autocomplete_function(self) -> list[TagValue]:
return [
TagValue(
key=self.key,
value=str(project.id),
times_seen=None,
first_seen=None,
last_seen=None,
)
for project in self.snuba_params.projects
if not self.query or self.query in str(project.id)
]
def project_slug_autocomplete_function(self) -> list[TagValue]:
return [
TagValue(
key=self.key,
value=project.slug,
times_seen=None,
first_seen=None,
last_seen=None,
)
for project in self.snuba_params.projects
if not self.query or self.query in project.slug
]
|
BaseSpanFieldValuesAutocompletionExecutor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-zendesk-sunshine/components.py
|
{
"start": 341,
"end": 706
}
|
class ____(DeclarativeAuthenticator):
config: Mapping[str, Any]
basic_auth: BasicHttpAuthenticator
oauth2: BearerAuthenticator
def __new__(cls, basic_auth, oauth2, config, *args, **kwargs):
if config["credentials"]["auth_method"] == "api_token":
return basic_auth
else:
return oauth2
|
AuthenticatorZendeskSunshine
|
python
|
jina-ai__jina
|
jina/helper.py
|
{
"start": 21036,
"end": 29656
}
|
class ____:
"""Helper function for argparse.Namespace object."""
@staticmethod
def kwargs2list(kwargs: Dict) -> List[str]:
"""
Convert dict to an argparse-friendly list.
:param kwargs: dictionary of key-values to be converted
:return: argument list
"""
args = []
from jina.serve.executors import BaseExecutor
from jina.serve.runtimes.gateway.gateway import BaseGateway
for k, v in kwargs.items():
k = k.replace('_', '-')
if v is not None:
if isinstance(v, bool):
if v:
args.append(f'--{k}')
elif isinstance(v, list): # for nargs
args.extend([f'--{k}', *(str(vv) for vv in v)])
elif isinstance(v, dict):
args.extend([f'--{k}', json.dumps(v)])
elif isinstance(v, type) and issubclass(v, BaseExecutor):
args.extend([f'--{k}', v.__name__])
elif isinstance(v, type) and issubclass(v, BaseGateway):
args.extend([f'--{k}', v.__name__])
else:
args.extend([f'--{k}', str(v)])
return args
@staticmethod
def kwargs2namespace(
kwargs: Dict[str, Union[str, int, bool]],
parser: ArgumentParser,
warn_unknown: bool = False,
fallback_parsers: Optional[List[ArgumentParser]] = None,
positional_args: Optional[Tuple[str, ...]] = None,
) -> Namespace:
"""
Convert dict to a namespace.
:param kwargs: dictionary of key-values to be converted
:param parser: the parser for building kwargs into a namespace
:param warn_unknown: True, if unknown arguments should be logged
:param fallback_parsers: a list of parsers to help resolving the args
:param positional_args: some parser requires positional arguments to be presented
:return: argument list
"""
args = ArgNamespace.kwargs2list(kwargs)
if positional_args:
args += positional_args
p_args, unknown_args = parser.parse_known_args(args)
unknown_args = list(filter(lambda x: x.startswith('--'), unknown_args))
if '--jcloud' in unknown_args:
unknown_args.remove('--jcloud')
if warn_unknown and unknown_args:
_leftovers = set(unknown_args)
if fallback_parsers:
for p in fallback_parsers:
_, _unk_args = p.parse_known_args(args)
_leftovers = _leftovers.intersection(_unk_args)
if not _leftovers:
# all args have been resolved
break
warn_unknown_args(_leftovers)
return p_args
@staticmethod
def get_non_defaults_args(
args: Namespace, parser: ArgumentParser, taboo: Optional[Set[str]] = None
) -> Dict:
"""
Get non-default args in a dict.
:param args: the namespace to parse
:param parser: the parser for referring the default values
:param taboo: exclude keys in the final result
:return: non defaults
"""
if taboo is None:
taboo = set()
non_defaults = {}
_defaults = vars(parser.parse_args([]))
for k, v in vars(args).items():
if k in _defaults and k not in taboo and _defaults[k] != v:
non_defaults[k] = v
return non_defaults
@staticmethod
def flatten_to_dict(
args: Union[Dict[str, 'Namespace'], 'Namespace']
) -> Dict[str, Any]:
"""Convert argparse.Namespace to dict to be uploaded via REST.
:param args: namespace or dict or namespace to dict.
:return: pod args
"""
if isinstance(args, Namespace):
return vars(args)
elif isinstance(args, dict):
pod_args = {}
for k, v in args.items():
if isinstance(v, Namespace):
pod_args[k] = vars(v)
elif isinstance(v, list):
pod_args[k] = [vars(_) for _ in v]
else:
pod_args[k] = v
return pod_args
def is_valid_local_config_source(path: str) -> bool:
"""
Check if the path is valid.
:param path: Local file path.
:return: True if the path is valid else False.
"""
try:
from jina.jaml import parse_config_source
parse_config_source(path)
return True
except FileNotFoundError:
return False
def get_full_version() -> Optional[Tuple[Dict, Dict]]:
"""
Get the version of libraries used in Jina and environment variables.
:return: Version information and environment variables
"""
import os
import platform
from uuid import getnode
import google.protobuf
import grpc
import yaml
from google.protobuf.internal import api_implementation
from grpc import _grpcio_metadata
try:
from hubble import __version__ as __hubble_version__
except:
__hubble_version__ = 'not-available'
try:
from jcloud import __version__ as __jcloud_version__
except:
__jcloud_version__ = 'not-available'
from jina import __docarray_version__, __proto_version__, __version__
from jina.constants import __jina_env__, __unset_msg__, __uptime__
from jina.logging.predefined import default_logger
try:
info = {
'jina': __version__,
'docarray': __docarray_version__,
'jcloud': __jcloud_version__,
'jina-hubble-sdk': __hubble_version__,
'jina-proto': __proto_version__,
'protobuf': google.protobuf.__version__,
'proto-backend': api_implementation.Type(),
'grpcio': getattr(grpc, '__version__', _grpcio_metadata.__version__),
'pyyaml': yaml.__version__,
'python': platform.python_version(),
'platform': platform.system(),
'platform-release': platform.release(),
'platform-version': platform.version(),
'architecture': platform.machine(),
'processor': platform.processor(),
'uid': getnode(),
'session-id': str(random_uuid(use_uuid1=True)),
'uptime': __uptime__,
'ci-vendor': get_ci_vendor() or __unset_msg__,
'internal': 'jina-ai'
in os.getenv('GITHUB_ACTION_REPOSITORY', __unset_msg__),
}
env_info = {k: os.getenv(k, __unset_msg__) for k in __jina_env__}
full_version = info, env_info
except Exception as e:
default_logger.error(str(e))
full_version = None
return full_version
def format_full_version_info(info: Dict, env_info: Dict) -> str:
"""
Format the version information.
:param info: Version information of Jina libraries.
:param env_info: The Jina environment variables.
:return: Formatted version information.
"""
version_info = '\n'.join(f'- {k:30s}{v}' for k, v in info.items())
env_info = '\n'.join(f'* {k:30s}{v}' for k, v in env_info.items())
return version_info + '\n' + env_info
def _update_policy():
if __windows__:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
elif 'JINA_DISABLE_UVLOOP' in os.environ:
return
else:
try:
import uvloop
if not isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
warnings.warn(
'Install `uvloop` via `pip install "jina[uvloop]"` for better performance.'
)
def get_or_reuse_loop():
"""
Get a new eventloop or reuse the current opened eventloop.
:return: A new eventloop or reuse the current opened eventloop.
"""
_update_policy()
try:
loop = asyncio.get_event_loop()
if loop.is_closed():
raise RuntimeError
except RuntimeError:
# no event loop
# create a new loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def typename(obj):
"""
Get the typename of object.
:param obj: Target object.
:return: Typename of the obj.
"""
if not isinstance(obj, type):
obj = obj.__class__
try:
return f'{obj.__module__}.{obj.__name__}'
except AttributeError:
return str(obj)
|
ArgNamespace
|
python
|
mkdocs__mkdocs
|
mkdocs/config/config_options.py
|
{
"start": 31440,
"end": 32174
}
|
class ____(Config):
"""An extra script to be added to the page. The `extra_javascript` config is a list of these."""
path = Type(str)
"""The value of the `src` tag of the script."""
type = Type(str, default='')
"""The value of the `type` tag of the script."""
defer = Type(bool, default=False)
"""Whether to add the `defer` tag to the script."""
async_ = Type(bool, default=False)
"""Whether to add the `async` tag to the script."""
def __init__(self, path: str = '', config_file_path=None):
super().__init__(config_file_path=config_file_path)
self.path = path
def __str__(self):
return self.path
def __fspath__(self):
return self.path
|
ExtraScriptValue
|
python
|
pytorch__pytorch
|
test/test_tensor_creation_ops.py
|
{
"start": 184707,
"end": 192736
}
|
class ____(TestCase):
def _run_test(self, shape, dtype, count=-1, first=0, offset=None, **kwargs):
numpy_dtype = torch_to_numpy_dtype_dict[dtype]
if offset is None:
offset = first * get_dtype_size(dtype)
numpy_original = make_tensor(shape, dtype=dtype, device="cpu").numpy()
original = memoryview(numpy_original)
# First call PyTorch's version in case of errors.
# If this call exits successfully, the NumPy version must also do so.
torch_frombuffer = torch.frombuffer(original, dtype=dtype, count=count, offset=offset, **kwargs)
numpy_frombuffer = np.frombuffer(original, dtype=numpy_dtype, count=count, offset=offset)
self.assertEqual(numpy_frombuffer, torch_frombuffer)
self.assertEqual(numpy_frombuffer.__array_interface__["data"][0], torch_frombuffer.data_ptr())
return (numpy_original, torch_frombuffer)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_same_type(self, device, dtype):
self._run_test((), dtype)
self._run_test((4,), dtype)
self._run_test((10, 10), dtype)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_requires_grad(self, device, dtype):
def _run_test_and_check_grad(requires_grad, *args, **kwargs):
kwargs["requires_grad"] = requires_grad
_, tensor = self._run_test(*args, **kwargs)
self.assertTrue(tensor.requires_grad == requires_grad)
requires_grad = may_require_grad(dtype)
_run_test_and_check_grad(requires_grad, (), dtype)
_run_test_and_check_grad(requires_grad, (4,), dtype)
_run_test_and_check_grad(requires_grad, (10, 10), dtype)
_run_test_and_check_grad(False, (), dtype)
_run_test_and_check_grad(False, (4,), dtype)
_run_test_and_check_grad(False, (10, 10), dtype)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_with_offset(self, device, dtype):
# Offset should be valid whenever there is, at least,
# one remaining element
for i in range(SIZE):
self._run_test(SHAPE, dtype, first=i)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_with_count(self, device, dtype):
# Count should be valid for any valid in the interval
# [-1, len(input)], except for 0
for i in range(-1, SIZE + 1):
if i != 0:
self._run_test(SHAPE, dtype, count=i)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_with_count_and_offset(self, device, dtype):
# Explicit default count [-1, 1, 2, ..., len]
for i in range(-1, SIZE + 1):
if i != 0:
self._run_test(SHAPE, dtype, count=i)
# Explicit default offset [0, 1, ..., len - 1]
for i in range(SIZE):
self._run_test(SHAPE, dtype, first=i)
# All possible combinations of count and dtype aligned
# offset for 'input'
# count:[1, 2, ..., len - 1] x first:[0, 1, ..., len - count]
for i in range(1, SIZE):
for j in range(SIZE - i + 1):
self._run_test(SHAPE, dtype, count=i, first=j)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_invalid_positional_args(self, device, dtype):
bytes = get_dtype_size(dtype)
in_bytes = SIZE * bytes
# Empty array
with self.assertRaisesRegex(ValueError,
r"both buffer length \(0\) and count"):
empty = np.array([])
torch.frombuffer(empty, dtype=dtype)
# Count equals 0
with self.assertRaisesRegex(ValueError,
r"both buffer length .* and count \(0\)"):
self._run_test(SHAPE, dtype, count=0)
# Offset negative and bigger than total length
with self.assertRaisesRegex(ValueError,
rf"offset \(-{bytes} bytes\) must be"):
self._run_test(SHAPE, dtype, first=-1)
with self.assertRaisesRegex(ValueError,
rf"offset \({in_bytes} bytes\) must be .* "
rf"buffer length \({in_bytes} bytes\)"):
self._run_test(SHAPE, dtype, first=SIZE)
# Non-multiple offset with all elements
if bytes > 1:
offset = bytes - 1
with self.assertRaisesRegex(ValueError,
rf"buffer length \({in_bytes - offset} bytes\) after "
rf"offset \({offset} bytes\) must be"):
self._run_test(SHAPE, dtype, offset=bytes - 1)
# Count too big for each good first element
for first in range(SIZE):
count = SIZE - first + 1
with self.assertRaisesRegex(ValueError,
rf"requested buffer length \({count} \* {bytes} bytes\) "
rf"after offset \({first * bytes} bytes\) must .*"
rf"buffer length \({in_bytes} bytes\)"):
self._run_test(SHAPE, dtype, count=count, first=first)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_shared_buffer(self, device, dtype):
x = make_tensor((1,), dtype=dtype, device=device)
# Modify the whole tensor
arr, tensor = self._run_test(SHAPE, dtype)
tensor[:] = x
self.assertEqual(arr, tensor)
self.assertTrue((tensor == x).all().item())
# Modify the whole tensor from all valid offsets, given
# a count value
for count in range(-1, SIZE + 1):
if count == 0:
continue
actual_count = count if count > 0 else SIZE
for first in range(SIZE - actual_count):
last = first + actual_count
arr, tensor = self._run_test(SHAPE, dtype, first=first, count=count)
tensor[:] = x
self.assertEqual(arr[first:last], tensor)
self.assertTrue((tensor == x).all().item())
# Modify the first value in the array
arr[first] = x.item() - 1
self.assertEqual(arr[first:last], tensor)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_not_a_buffer(self, device, dtype):
with self.assertRaisesRegex(ValueError,
r"object does not implement Python buffer protocol."):
torch.frombuffer([1, 2, 3, 4], dtype=dtype)
@dtypes(*set(numpy_to_torch_dtype_dict.values()))
def test_non_writable_buffer(self, device, dtype):
numpy_arr = make_tensor((1,), dtype=dtype, device=device).numpy()
byte_arr = numpy_arr.tobytes()
with self.assertWarnsOnceRegex(UserWarning,
r"The given buffer is not writable."):
torch.frombuffer(byte_arr, dtype=dtype)
def test_byte_to_int(self):
byte_array = np.array([-1, 0, 0, 0, -1, 0, 0, 0], dtype=np.byte) if sys.byteorder == 'little' \
else np.array([0, 0, 0, -1, 0, 0, 0, -1], dtype=np.byte)
tensor = torch.frombuffer(byte_array, dtype=torch.int32)
self.assertEqual(tensor.numel(), 2)
self.assertSequenceEqual(tensor, [255, 255])
# Tests for the `asarray` function:
# Constructs tensors from a Python object that has one of the following
# characteristics:
# 1. is a Tensor
# 2. is a DLPack capsule
# 3. implements the Python Buffer protocol
# 4. is an arbitrary list
# The implementation itself is based on the Python Array API:
# https://data-apis.org/array-api/latest/API_specification/creation_functions.html
def get_another_device(device):
return "cuda" if torch.device(device).type == "cpu" else "cpu"
def identity(tensor):
return tensor
def to_numpy(tensor):
return tensor.numpy()
def to_memview(tensor):
return memoryview(to_numpy(tensor))
|
TestBufferProtocol
|
python
|
FactoryBoy__factory_boy
|
tests/test_declarations.py
|
{
"start": 5569,
"end": 5775
}
|
class ____(unittest.TestCase):
def test_transform(self):
t = declarations.Transformer('foo', transform=str.upper)
self.assertEqual("FOO", utils.evaluate_declaration(t))
|
TransformerTestCase
|
python
|
numba__numba
|
numba/core/ir.py
|
{
"start": 25614,
"end": 26384
}
|
class ____(Stmt):
"""A raise statement inside a try-block.
Similar to ``DynamicRaise`` but does not terminate.
"""
def __init__(self, exc_class, exc_args, loc):
assert exc_class is None or isinstance(exc_class, type)
assert isinstance(loc, Loc)
assert exc_args is None or isinstance(exc_args, tuple)
self.exc_class = exc_class
self.exc_args = exc_args
self.loc = loc
def __str__(self):
if self.exc_class is None:
return f"dynamic_try_raise"
elif self.exc_args is None:
return f"dynamic_try_raise {self.exc_class}"
else:
args = ", ".join(map(repr, self.exc_args))
return f"dynamic_try_raise {self.exc_class}({args})"
|
DynamicTryRaise
|
python
|
gevent__gevent
|
src/gevent/tests/test__makefile_ref.py
|
{
"start": 8653,
"end": 16827
}
|
class ____(Test):
def _ssl_connect_task(self, connector, port, accepted_event):
connector.connect((DEFAULT_CONNECT, port))
try:
# Note: We get ResourceWarning about 'x'
# on Python 3 if we don't join the spawned thread
x = ssl.SSLContext().wrap_socket(connector)
# Wait to be fully accepted. We could otherwise raise ahead
# of the server and close ourself before it's ready to read.
accepted_event.wait()
except socket.error:
# Observed on Windows with PyPy2 5.9.0 and libuv:
# if we don't switch in a timely enough fashion,
# the server side runs ahead of us and closes
# our socket first, so this fails.
pass
else:
x.close()
def _make_ssl_connect_task(self, connector, port):
accepted_event = threading.Event()
t = threading.Thread(target=self._ssl_connect_task,
args=(connector, port, accepted_event))
t.daemon = True
t.accepted_event = accepted_event
return t
def test_simple_close(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
s = closer(ssl.SSLContext().wrap_socket(s))
fileno = s.fileno()
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_makefile1(self):
with Closing() as closer:
raw_s = closer(self.make_open_socket())
s = closer(ssl.SSLContext().wrap_socket(raw_s))
fileno = s.fileno()
self.assert_open(s, fileno)
f = closer(s.makefile())
self.assert_open(s, fileno)
s.close()
self.assert_open(s, fileno)
f.close()
raw_s.close()
self.assert_closed(s, fileno)
def test_makefile2(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
s = closer(ssl.SSLContext().wrap_socket(s))
fileno = s.fileno()
self.assert_open(s, fileno)
f = closer(s.makefile())
self.assert_open(s, fileno)
f.close()
# closing fileobject does not close the socket
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def _wrap_socket(self, sock, *, keyfile, certfile, server_side=False):
context = ssl.SSLContext()
context.load_cert_chain(certfile=certfile, keyfile=keyfile)
return context.wrap_socket(sock, server_side=server_side)
def test_server_simple(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
t = self._make_ssl_connect_task(connector, port)
closer.running_task(t)
client_socket = closer.accept(listener)
t.accepted_event.set()
client_socket = closer(
self._wrap_socket(client_socket, keyfile=CERTFILE, certfile=CERTFILE,
server_side=True))
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
def test_server_makefile1(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
t = self._make_ssl_connect_task(connector, port)
closer.running_task(t)
client_socket = closer.accept(listener)
t.accepted_event.set()
client_socket = closer(
self._wrap_socket(client_socket, keyfile=CERTFILE, certfile=CERTFILE,
server_side=True))
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_open(client_socket, fileno)
f.close()
self.assert_closed(client_socket, fileno)
def test_server_makefile2(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
t = self._make_ssl_connect_task(connector, port)
closer.running_task(t)
t.accepted_event.set()
client_socket = closer.accept(listener)
client_socket = closer(
self._wrap_socket(client_socket, keyfile=CERTFILE, certfile=CERTFILE,
server_side=True))
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
# Closing fileobject does not close SSLObject
f.close()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
def test_serverssl_makefile1(self):
raw_listener = tcp_listener(backlog=1)
fileno = raw_listener.fileno()
port = raw_listener.getsockname()[1]
listener = self._wrap_socket(raw_listener, keyfile=CERTFILE, certfile=CERTFILE)
connector = socket.socket()
t = self._make_ssl_connect_task(connector, port)
t.start()
with CleaningUp(t, listener, raw_listener, connector) as client_socket:
t.accepted_event.set()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_open(client_socket, fileno)
f.close()
self.assert_closed(client_socket, fileno)
def test_serverssl_makefile2(self):
raw_listener = tcp_listener(backlog=1)
port = raw_listener.getsockname()[1]
listener = self._wrap_socket(raw_listener, keyfile=CERTFILE, certfile=CERTFILE)
accepted_event = threading.Event()
read_event = threading.Event()
def connect(connector=socket.socket()):
try:
connector.connect((DEFAULT_CONNECT, port))
s = ssl.SSLContext().wrap_socket(connector)
accepted_event.wait()
s.sendall(b'test_serverssl_makefile2')
s.shutdown(socket.SHUT_RDWR)
read_event.wait()
s.close()
finally:
connector.close()
t = threading.Thread(target=connect)
t.daemon = True
t.start()
client_socket = None
with CleaningUp(t, listener, raw_listener) as client_socket:
accepted_event.set()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
self.assertEqual(f.read(), 'test_serverssl_makefile2')
# Closing the other end of the socket
# before we finish reading can result in us
# getting EPIPE from OpenSSL 3.5 on certain
# systems, notably the manylinux images on Github
# actions. This hasn't shown up anywhere else.
read_event.set()
self.assertEqual(f.read(), '')
# Closing file object does not close the socket.
f.close()
if WIN and psutil:
# Hmm?
self.extra_allowed_open_states = (psutil.CONN_CLOSE_WAIT,)
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
|
TestSSL
|
python
|
tiangolo__fastapi
|
docs_src/schema_extra_example/tutorial004.py
|
{
"start": 110,
"end": 824
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Item = Body(
examples=[
{
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
},
{
"name": "Bar",
"price": "35.4",
},
{
"name": "Baz",
"price": "thirty five point four",
},
],
),
):
results = {"item_id": item_id, "item": item}
return results
|
Item
|
python
|
networkx__networkx
|
networkx/algorithms/isomorphism/tests/test_ismags.py
|
{
"start": 19400,
"end": 25325
}
|
class ____:
def test_wikipedia_graph(self):
edges1 = [
(1, 5),
(1, 2),
(1, 4),
(3, 2),
(6, 2),
(3, 4),
(7, 3),
(4, 8),
(5, 8),
(6, 5),
(6, 7),
(7, 8),
]
mapped = {1: "a", 2: "h", 3: "d", 4: "i", 5: "g", 6: "b", 7: "j", 8: "c"}
G1 = nx.DiGraph(edges1)
G2 = nx.relabel_nodes(G1, mapped)
result = next(nx.isomorphism.ISMAGS(G1, G2).find_isomorphisms())
assert result == mapped
# Change the direction of an edge
G1.remove_edge(1, 5)
G1.add_edge(5, 1)
result = list(nx.isomorphism.ISMAGS(G1, G2).find_isomorphisms())
assert result == []
def test_non_isomorphic_same_degree_sequence(self):
r"""
G1 G2
x--------------x x--------------x
| \ | | \ |
| x-------x | | x-------x |
| | | | | | | |
| x-------x | | x-------x |
| / | | \ |
x--------------x x--------------x
"""
edges1 = [
(1, 5),
(1, 2),
(4, 1),
(3, 2),
(3, 4),
(4, 8),
(5, 8),
(6, 5),
(6, 7),
(7, 8),
]
edges2 = [
(1, 5),
(1, 2),
(4, 1),
(3, 2),
(4, 3),
(5, 8),
(6, 5),
(6, 7),
(3, 7),
(8, 7),
]
G1 = nx.DiGraph(edges1)
G2 = nx.DiGraph(edges2)
assert not is_isomorphic(G1, G2)
def test_is_isomorphic(self):
G1 = nx.Graph([[1, 2], [1, 3], [1, 5], [2, 3]])
G2 = nx.Graph([[10, 20], [20, 30], [10, 30], [10, 50]])
G4 = nx.Graph([[1, 2], [1, 3], [1, 5], [2, 4]])
assert is_isomorphic(G1, G2)
assert not is_isomorphic(G1, G4)
assert is_isomorphic(G1.to_directed(), G2.to_directed())
assert not is_isomorphic(G1.to_directed(), G4.to_directed())
with pytest.raises(
ValueError, match="Directed and undirected graphs cannot be compared."
):
is_isomorphic(G1.to_directed(), G1)
@pytest.mark.parametrize("graph_class", graph_classes)
def test_simple_node_match(graph_class):
g1 = graph_class([(0, 0), (0, 1), (1, 0)])
g2 = g1.copy()
nm = iso.numerical_node_match("size", 1)
assert is_isomorphic(g1, g2, node_match=nm)
g2.nodes[0]["size"] = 3
assert not is_isomorphic(g1, g2, node_match=nm)
@pytest.mark.parametrize("graph_class", graph_classes)
def test_simple_node_and_edge_match(graph_class):
g1 = graph_class()
g1.add_weighted_edges_from([(0, 0, 1.2), (0, 1, 1.4), (1, 0, 1.6)])
g2 = g1.copy()
nm = iso.numerical_node_match("size", 1)
if g1.is_multigraph():
em = iso.numerical_multiedge_match("weight", 1)
else:
em = iso.numerical_edge_match("weight", 1)
assert is_isomorphic(g1, g2, node_match=nm, edge_match=em)
g2.nodes[0]["size"] = 3
assert not is_isomorphic(g1, g2, node_match=nm, edge_match=em)
g2 = g1.copy()
if g1.is_multigraph():
g2.edges[0, 1, 0]["weight"] = 2.1
else:
g2.edges[0, 1]["weight"] = 2.1
assert not is_isomorphic(g1, g2, node_match=nm, edge_match=em)
g2 = g1.copy()
g2.nodes[0]["size"] = 3
if g1.is_multigraph():
g2.edges[0, 1, 0]["weight"] = 2.1
else:
g2.edges[0, 1]["weight"] = 2.1
assert not is_isomorphic(g1, g2, node_match=nm, edge_match=em)
@pytest.mark.parametrize("graph_class", graph_classes)
def test_simple_edge_match(graph_class):
# 16 simple tests
w = "weight"
edges = [(0, 0, 1), (0, 0, 1.5), (0, 1, 2), (1, 0, 3)]
g1 = graph_class()
g1.add_weighted_edges_from(edges)
g2 = g1.copy()
if g1.is_multigraph():
em = iso.numerical_multiedge_match("weight", 1)
else:
em = iso.numerical_edge_match("weight", 1)
assert is_isomorphic(g1, g2, edge_match=em)
for mod1, mod2 in [(False, True), (True, False), (True, True)]:
# mod1 tests a regular edge weight difference
# mod2 tests a selfloop weight difference
if g1.is_multigraph():
if mod1:
data1 = {0: {"weight": 10}}
if mod2:
data2 = {0: {"weight": 1}, 1: {"weight": 2.5}}
else:
if mod1:
data1 = {"weight": 10}
if mod2:
data2 = {"weight": 2.5}
g2 = g1.copy()
if mod1:
if not g1.is_directed():
g2._adj[1][0] = data1
g2._adj[0][1] = data1
else:
g2._succ[1][0] = data1
g2._pred[0][1] = data1
if mod2:
if not g1.is_directed():
g2._adj[0][0] = data2
else:
g2._succ[0][0] = data2
g2._pred[0][0] = data2
assert not is_isomorphic(g1, g2, edge_match=em)
@pytest.mark.parametrize("graph_class", graph_classes)
def test_weightkey(graph_class):
g1 = graph_class()
g2 = graph_class()
if g1.is_multigraph():
edge_match = iso.numerical_multiedge_match
else:
edge_match = iso.numerical_edge_match
g1.add_edge("A", "B", weight=1)
g2.add_edge("C", "D", weight=0)
assert nx.is_isomorphic(g1, g2)
em = edge_match("nonexistent attribute", 1)
assert nx.is_isomorphic(g1, g2, edge_match=em)
em = edge_match("weight", 1)
assert not nx.is_isomorphic(g1, g2, edge_match=em)
g2 = graph_class()
g2.add_edge("C", "D")
assert nx.is_isomorphic(g1, g2, edge_match=em)
|
TestDiGraphISO
|
python
|
pydantic__pydantic
|
pydantic/warnings.py
|
{
"start": 3925,
"end": 4154
}
|
class ____(Warning):
"""A Pydantic specific experimental functionality warning.
It is raised to warn users that the functionality may change or be removed in future versions of Pydantic.
"""
|
PydanticExperimentalWarning
|
python
|
Pylons__pyramid
|
tests/test_request.py
|
{
"start": 16739,
"end": 18391
}
|
class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _callFUT(self, request, extensions=None):
from pyramid.request import apply_request_extensions
return apply_request_extensions(request, extensions=extensions)
def test_it_with_registry(self):
from pyramid.interfaces import IRequestExtensions
extensions = Dummy()
extensions.methods = {'foo': lambda x, y: y}
extensions.descriptors = {'bar': property(lambda x: 'bar')}
self.config.registry.registerUtility(extensions, IRequestExtensions)
request = DummyRequest()
request.registry = self.config.registry
self._callFUT(request)
self.assertEqual(request.bar, 'bar')
self.assertEqual(request.foo('abc'), 'abc')
def test_it_override_extensions(self):
from pyramid.interfaces import IRequestExtensions
ignore = Dummy()
ignore.methods = {'x': lambda x, y, z: 'asdf'}
ignore.descriptors = {'bar': property(lambda x: 'asdf')}
self.config.registry.registerUtility(ignore, IRequestExtensions)
request = DummyRequest()
request.registry = self.config.registry
extensions = Dummy()
extensions.methods = {'foo': lambda x, y: y}
extensions.descriptors = {'bar': property(lambda x: 'bar')}
self._callFUT(request, extensions=extensions)
self.assertRaises(AttributeError, lambda: request.x)
self.assertEqual(request.bar, 'bar')
self.assertEqual(request.foo('abc'), 'abc')
|
Test_apply_request_extensions
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_data_labels35.py
|
{
"start": 315,
"end": 1482
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels35.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [48498944, 48508928]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {"value": True, "border": {"color": "red"}},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/ext.py
|
{
"start": 8361,
"end": 20786
}
|
class ____(Extension):
"""This extension adds gettext support to Jinja."""
tags = {"trans"}
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment: Environment) -> None:
super().__init__(environment)
environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False,
)
def _install(
self, translations: "_SupportedTranslations", newstyle: t.Optional[bool] = None
) -> None:
# ugettext and ungettext are preferred in case the I18N library
# is providing compatibility with older Python versions.
gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
pgettext = getattr(translations, "pgettext", None)
npgettext = getattr(translations, "npgettext", None)
self._install_callables(
gettext, ngettext, newstyle=newstyle, pgettext=pgettext, npgettext=npgettext
)
def _install_null(self, newstyle: t.Optional[bool] = None) -> None:
import gettext
translations = gettext.NullTranslations()
if hasattr(translations, "pgettext"):
# Python < 3.8
pgettext = translations.pgettext # type: ignore
else:
def pgettext(c: str, s: str) -> str:
return s
if hasattr(translations, "npgettext"):
npgettext = translations.npgettext # type: ignore
else:
def npgettext(c: str, s: str, p: str, n: int) -> str:
return s if n == 1 else p
self._install_callables(
gettext=translations.gettext,
ngettext=translations.ngettext,
newstyle=newstyle,
pgettext=pgettext,
npgettext=npgettext,
)
def _install_callables(
self,
gettext: t.Callable[[str], str],
ngettext: t.Callable[[str, str, int], str],
newstyle: t.Optional[bool] = None,
pgettext: t.Optional[t.Callable[[str, str], str]] = None,
npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
) -> None:
if newstyle is not None:
self.environment.newstyle_gettext = newstyle # type: ignore
if self.environment.newstyle_gettext: # type: ignore
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
if pgettext is not None:
pgettext = _make_new_pgettext(pgettext)
if npgettext is not None:
npgettext = _make_new_npgettext(npgettext)
self.environment.globals.update(
gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
)
def _uninstall(self, translations: "_SupportedTranslations") -> None:
for key in ("gettext", "ngettext", "pgettext", "npgettext"):
self.environment.globals.pop(key, None)
def _extract(
self,
source: t.Union[str, nodes.Template],
gettext_functions: t.Sequence[str] = GETTEXT_FUNCTIONS,
) -> t.Iterator[
t.Tuple[int, str, t.Union[t.Optional[str], t.Tuple[t.Optional[str], ...]]]
]:
if isinstance(source, str):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser: "Parser") -> t.Union[nodes.Node, t.List[nodes.Node]]:
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr: t.Optional[nodes.Expr] = None
plural_expr_assignment: t.Optional[nodes.Assign] = None
variables: t.Dict[str, nodes.Expr] = {}
trimmed = None
while parser.stream.current.type != "block_end":
if variables:
parser.stream.expect("comma")
# skip colon for python compatibility
if parser.stream.skip_if("colon"):
break
token = parser.stream.expect("name")
if token.value in variables:
parser.fail(
f"translatable variable {token.value!r} defined twice.",
token.lineno,
exc=TemplateAssertionError,
)
# expressions
if parser.stream.current.type == "assign":
next(parser.stream)
variables[token.value] = var = parser.parse_expression()
elif trimmed is None and token.value in ("trimmed", "notrimmed"):
trimmed = token.value == "trimmed"
continue
else:
variables[token.value] = var = nodes.Name(token.value, "load")
if plural_expr is None:
if isinstance(var, nodes.Call):
plural_expr = nodes.Name("_trans", "load")
variables[token.value] = plural_expr
plural_expr_assignment = nodes.Assign(
nodes.Name("_trans", "store"), var
)
else:
plural_expr = var
num_called_num = token.value == "num"
parser.stream.expect("block_end")
plural = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], "load")
num_called_num = singular_names[0] == "num"
# if we have a pluralize block, we parse that too
if parser.stream.current.test("name:pluralize"):
have_plural = True
next(parser.stream)
if parser.stream.current.type != "block_end":
token = parser.stream.expect("name")
if token.value not in variables:
parser.fail(
f"unknown variable {token.value!r} for pluralization",
token.lineno,
exc=TemplateAssertionError,
)
plural_expr = variables[token.value]
num_called_num = token.value == "num"
parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for name in referenced:
if name not in variables:
variables[name] = nodes.Name(name, "load")
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail("pluralize without variables", lineno)
if trimmed is None:
trimmed = self.environment.policies["ext.i18n.trimmed"]
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
node = self._make_node(
singular,
plural,
variables,
plural_expr,
bool(referenced),
num_called_num and have_plural,
)
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
def _trim_whitespace(self, string: str, _ws_re: t.Pattern[str] = _ws_re) -> str:
return _ws_re.sub(" ", string.strip())
def _parse_block(
self, parser: "Parser", allow_pluralize: bool
) -> t.Tuple[t.List[str], str]:
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while True:
if parser.stream.current.type == "data":
buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
elif parser.stream.current.type == "variable_begin":
next(parser.stream)
name = parser.stream.expect("name").value
referenced.append(name)
buf.append(f"%({name})s")
parser.stream.expect("variable_end")
elif parser.stream.current.type == "block_begin":
next(parser.stream)
if parser.stream.current.test("name:endtrans"):
break
elif parser.stream.current.test("name:pluralize"):
if allow_pluralize:
break
parser.fail(
"a translatable section can have only one pluralize section"
)
parser.fail(
"control structures in translatable sections are not allowed"
)
elif parser.stream.eos:
parser.fail("unclosed translation block")
else:
raise RuntimeError("internal parser error")
return referenced, concat(buf)
def _make_node(
self,
singular: str,
plural: t.Optional[str],
variables: t.Dict[str, nodes.Expr],
plural_expr: t.Optional[nodes.Expr],
vars_referenced: bool,
num_called_num: bool,
) -> nodes.Output:
"""Generates a useful node from the data provided."""
newstyle = self.environment.newstyle_gettext # type: ignore
node: nodes.Expr
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not newstyle:
singular = singular.replace("%%", "%")
if plural:
plural = plural.replace("%%", "%")
# singular only:
if plural_expr is None:
gettext = nodes.Name("gettext", "load")
node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
# singular and plural
else:
ngettext = nodes.Name("ngettext", "load")
node = nodes.Call(
ngettext,
[nodes.Const(singular), nodes.Const(plural), plural_expr],
[],
None,
None,
)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if newstyle:
for key, value in variables.items():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == "num":
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(
node,
nodes.Dict(
[
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]
),
)
return nodes.Output([node])
|
InternationalizationExtension
|
python
|
google__pytype
|
pytype/overlays/abc_overlay.py
|
{
"start": 1352,
"end": 1774
}
|
class ____(abstract.PyTDFunction):
"""Implements the @abc.abstractmethod decorator."""
@classmethod
def make(cls, ctx, module):
return super().make("abstractmethod", ctx, module)
def call(self, node, func, args, alias_map=None):
"""Marks that the given function is abstract."""
del func, alias_map # unused
self.match_args(node, args)
return node, _set_abstract(args, "funcobj")
|
AbstractMethod
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/cloud_memorystore.py
|
{
"start": 1617,
"end": 1877
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Memorystore Memcached List of Instances Link."""
name = "Memorystore Memcached List of Instances"
key = "memcached_instances"
format_str = MEMCACHED_LIST_LINK
|
MemcachedInstanceListLink
|
python
|
doocs__leetcode
|
lcof/面试题44. 数字序列中某一位的数字/Solution.py
|
{
"start": 0,
"end": 277
}
|
class ____:
def findNthDigit(self, n: int) -> int:
k, cnt = 1, 9
while k * cnt < n:
n -= k * cnt
k += 1
cnt *= 10
num = 10 ** (k - 1) + (n - 1) // k
idx = (n - 1) % k
return int(str(num)[idx])
|
Solution
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/test_events.py
|
{
"start": 1735,
"end": 13594
}
|
class ____:
def __init__(self, attributes=[]) -> None:
self.event_name = None
self.attributes = attributes
self.payload = {}
def __call__(self, event):
self.event_name = event.event_name
self.payload = {attr:getattr(event, attr) for attr in self.attributes}
def test_event_metaclass() -> None:
# All events currently in the namespace should be in the EVENT_CLASSES set
assert len(concrete_events - set(events._CONCRETE_EVENT_CLASSES.values())) == 0
def test_common_decode_json() -> None:
for event_name, event_cls in events._CONCRETE_EVENT_CLASSES.items():
if event_name is None:
continue # Skip abstract base class
legend_item = LegendItem()
if issubclass(event_cls, events.ButtonClick):
model = Button()
elif issubclass(event_cls, events.AxisClick):
model = LinearAxis()
elif issubclass(event_cls, events.LegendItemClick):
model = Legend(items=[legend_item])
elif issubclass(event_cls, events.ValueSubmit):
model = TextInput()
elif issubclass(event_cls, ClearInput):
model = FileInput()
else:
model = Plot()
entries = []
if issubclass(event_cls, events.ModelEvent):
entries.append(["model", model.ref])
if issubclass(event_cls, events.LegendItemClick):
entries.append(["item", legend_item.ref])
if issubclass(event_cls, events.ValueSubmit):
entries.append(["value", ""])
decoder = Deserializer(references=[model, legend_item])
event = decoder.decode(dict(
type="event",
name=event_cls.event_name,
values=dict(type="map", entries=entries),
))
assert isinstance(event, events.Event)
if isinstance(event, events.ModelEvent):
assert event.model == model
def test_pointevent_subclass_decode_json() -> None:
for event_cls in point_events:
if event_cls.event_name is None:
continue # Skip abstract base class
model = Plot()
decoder = Deserializer(references=[model])
event = decoder.decode(dict(
type="event",
name=event_cls.event_name,
values=dict(
type="map",
entries=[
["model", dict(id=model.id)],
["sx", 3],
["sy", -2],
["x", 10],
["y", 100],
],
),
))
assert event.model == model
assert event.sx == 3
assert event.sy == -2
assert event.x == 10
assert event.y == 100
def test_panevent_decode_json() -> None:
model = Plot()
decoder = Deserializer(references=[model])
event = decoder.decode(dict(
type="event",
name=events.Pan.event_name,
values=dict(
type="map",
entries=[
["model", dict(id=model.id)],
["delta_x", 0.1],
["delta_y", 0.3],
["sx", 3],
["sy", -2],
["x", 10],
["y", 100],
],
),
))
assert event.model == model
assert event.delta_x == 0.1
assert event.delta_y == 0.3
assert event.sx == 3
assert event.sy == -2
assert event.x == 10
assert event.y == 100
def test_mousewheelevent_decode_json() -> None:
"""
model = Plot()
decoder = Deserializer(references=[model])
event = decoder.decode(dict(
type="event",
name=events..event_name,
values=dict(
type="map",
entries=[
["model", dict(id=model.id)],
],
),
))
"""
model = Plot()
decoder = Deserializer(references=[model])
event = decoder.decode(dict(
type="event",
name=events.MouseWheel.event_name,
values=dict(
type="map",
entries=[
["model", dict(id=model.id)],
["delta", -0.1],
["sx", 3],
["sy", -2],
["x", 10],
["y", 100],
],
),
))
assert event.model == model
assert event.delta == -0.1
assert event.sx == 3
assert event.sy == -2
assert event.x == 10
assert event.y == 100
def test_pinchevent_decode_json() -> None:
model = Plot()
decoder = Deserializer(references=[model])
event = decoder.decode(dict(
type="event",
name=events.Pinch.event_name,
values=dict(
type="map",
entries=[
["model", dict(id=model.id)],
["scale", 42],
["sx", 3],
["sy", -2],
["x", 10],
["y", 100],
],
),
))
assert event.model == model
assert event.scale == 42
assert event.sx == 3
assert event.sy == -2
assert event.x == 10
assert event.y == 100
def test_event_constructor_button() -> None:
model = Button()
event = events.ModelEvent(model)
assert event.model == model
def test_event_constructor_div() -> None:
model = Div()
event = events.ModelEvent(model)
assert event.model == model
def test_event_constructor_plot() -> None:
model = Plot()
event = events.ModelEvent(model)
assert event.model == model
def test_buttonclick_constructor_button() -> None:
model = Button()
event = events.ButtonClick(model)
assert event.model == model
def test_buttonclick_constructor_div() -> None:
with pytest.raises(ValueError):
events.ButtonClick(Div())
def test_buttonclick_constructor_plot() -> None:
with pytest.raises(ValueError):
events.ButtonClick(Plot())
def test_lodstart_constructor_button() -> None:
with pytest.raises(ValueError):
events.LODStart(Button())
def test_lodstart_constructor_div() -> None:
with pytest.raises(ValueError):
events.LODStart(Div())
def test_lodstart_constructor_plot() -> None:
model = Plot()
event = events.LODStart(model)
assert event.model == model
def test_lodend_constructor_button() -> None:
with pytest.raises(ValueError):
events.LODEnd(Button())
def test_lodend_constructor_div() -> None:
with pytest.raises(ValueError):
events.LODEnd(Div())
def test_lodend_constructor_plot() -> None:
model = Plot()
event = events.LODEnd(model)
assert event.model == model
def test_plotevent_constructor_button() -> None:
with pytest.raises(ValueError):
events.PlotEvent(Button())
def test_plotevent_constructor_div() -> None:
with pytest.raises(ValueError):
events.PlotEvent(Div())
def test_plotevent_constructor_plot() -> None:
model = Plot()
event = events.PlotEvent(model)
assert event.model == model
def test_pointEvent_constructor_plot() -> None:
model = Plot()
event = events.PointEvent(model, sx=3, sy=-2, x=10, y=100)
assert event.model == model
assert event.sx == 3
assert event.sy == -2
assert event.x == 10
assert event.y == 100
def test_pointevent_constructor_button() -> None:
with pytest.raises(ValueError):
events.PointEvent(Button(), sx=3, sy=-2, x=10, y=100)
def test_pointevent_constructor_div() -> None:
with pytest.raises(ValueError):
events.PointEvent(Div(), sx=3, sy=-2, x=10, y=100)
def test_pointevent_subclass_constructor_plot() -> None:
model = Plot()
for subcls in point_events:
event = subcls(model, sx=3, sy=-2, x=10, y=100)
assert event.model == model
assert event.sx == 3
assert event.sy == -2
assert event.x == 10
assert event.y == 100
def test_pointevent_subclass_constructor_button() -> None:
model = Button()
for subcls in point_events:
with pytest.raises(ValueError):
subcls(model, sx=3, sy=-2, x=10, y=100)
def test_pointevent_subclass_constructor_div() -> None:
model = Div()
for subcls in point_events:
with pytest.raises(ValueError):
subcls(model, sx=3, sy=-2, x=10, y=100)
# Testing event callback invocation
def test_buttonclick_event_callbacks() -> None:
button = Button()
test_callback = EventCallback()
button.on_event(events.ButtonClick, test_callback)
assert test_callback.event_name is None
button._trigger_event(events.ButtonClick(button))
assert test_callback.event_name == events.ButtonClick.event_name
def test_atomic_plot_event_callbacks() -> None:
plot = Plot()
for event_cls in [events.LODStart, events.LODEnd]:
test_callback = EventCallback()
plot.on_event(event_cls, test_callback)
assert test_callback.event_name is None
plot._trigger_event(event_cls(plot))
assert test_callback.event_name == event_cls.event_name
def test_pointevent_callbacks() -> None:
plot = Plot()
payload = dict(sx=3, sy=-2, x=10, y=100)
for event_cls in point_events:
test_callback = EventCallback(['sx','sy','x','y'])
plot.on_event(event_cls, test_callback)
assert test_callback.event_name is None
plot._trigger_event(event_cls(plot, **payload))
assert test_callback.event_name == event_cls.event_name
assert test_callback.payload == payload
def test_mousewheel_callbacks() -> None:
plot = Plot()
payload = dict(sx=3, sy=-2, x=10, y=100, delta=5)
test_callback = EventCallback(['sx','sy','x','y', 'delta'])
plot.on_event(events.MouseWheel, test_callback)
assert test_callback.event_name is None
plot._trigger_event(events.MouseWheel(plot, **payload))
assert test_callback.event_name == events.MouseWheel.event_name
assert test_callback.payload == payload
def test_pan_callbacks() -> None:
plot = Plot()
payload = dict(sx=3, sy=-2, x=10, y=100, delta_x=2, delta_y=3.2)
test_callback = EventCallback(['sx','sy','x','y', 'delta_x', 'delta_y'])
plot.on_event(events.Pan, test_callback)
assert test_callback.event_name is None
plot._trigger_event(events.Pan(plot, **payload))
assert test_callback.event_name == events.Pan.event_name
assert test_callback.payload == payload
def test_pinch_callbacks() -> None:
plot = Plot()
payload = dict(sx=3, sy=-2, x=10, y=100, scale=42)
test_callback = EventCallback(['sx','sy','x','y', 'scale'])
plot.on_event(events.Pinch, test_callback)
assert test_callback.event_name is None
plot._trigger_event(events.Pinch(plot, **payload))
assert test_callback.event_name == events.Pinch.event_name
assert test_callback.payload == payload
def test_FileInput_clear() -> None:
file_input = FileInput()
doc = Document()
doc.add_root(file_input)
collected_events: list[DocumentChangedEvent] = []
def on_change(event: DocumentChangedEvent) -> None:
collected_events.append(event)
doc.on_change(on_change)
file_input.clear()
assert len(collected_events) == 1
[event] = collected_events
assert isinstance(event, MessageSentEvent)
assert event.msg_type == "bokeh_event"
assert isinstance(event.msg_data, ClearInput)
assert event.msg_data.model == file_input
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
EventCallback
|
python
|
tensorflow__tensorflow
|
tensorflow/core/function/trace_type/serialization_test.py
|
{
"start": 2162,
"end": 3206
}
|
class ____(serialization.Serializable):
@classmethod
def experimental_type_proto(cls):
return serialization_test_pb2.MyMultiClassRepresentation
@classmethod
def experimental_from_proto(cls, proto):
if proto.id == 1:
return SerializableFromSuperClassOne()
if proto.id == 2:
return SerializableFromSuperClassTwo()
if proto.id == 3:
return SerializableFromSuperClassThree()
raise NotImplementedError
def experimental_as_proto(self):
if isinstance(self, SerializableFromSuperClassOne):
return serialization_test_pb2.MyMultiClassRepresentation(id=1)
if isinstance(self, SerializableFromSuperClassTwo):
return serialization_test_pb2.MyMultiClassRepresentation(id=2)
if isinstance(self, SerializableFromSuperClassThree):
return serialization_test_pb2.MyMultiClassRepresentation(id=3)
raise NotImplementedError
def __eq__(self, other):
return type(self) is type(other)
serialization.register_serializable(MySerializableSuperClass)
|
MySerializableSuperClass
|
python
|
pypa__warehouse
|
warehouse/integrations/vulnerabilities/models.py
|
{
"start": 980,
"end": 2049
}
|
class ____(db.ModelBase):
__tablename__ = "vulnerabilities"
source: Mapped[str] = mapped_column(primary_key=True)
id: Mapped[str] = mapped_column(primary_key=True)
# The URL for the vulnerability report at the source
# e.g. "https://osv.dev/vulnerability/PYSEC-2021-314"
link: Mapped[str | None]
# Alternative IDs for this vulnerability
# e.g. "CVE-2021-12345"
aliases: Mapped[list[str] | None] = mapped_column(ARRAY(String))
# Details about the vulnerability
details: Mapped[str | None]
# A short, plaintext summary of the vulnerability
summary: Mapped[str | None]
# Events of introduced/fixed versions
fixed_in: Mapped[list[str] | None] = mapped_column(ARRAY(String))
# When the vulnerability was withdrawn, if it has been withdrawn.
withdrawn: Mapped[datetime.datetime | None]
releases: Mapped[list[Release]] = orm.relationship(
"Release",
back_populates="vulnerabilities",
secondary="release_vulnerabilities",
passive_deletes=True,
)
|
VulnerabilityRecord
|
python
|
kamyu104__LeetCode-Solutions
|
Python/reach-end-of-array-with-max-score.py
|
{
"start": 38,
"end": 296
}
|
class ____(object):
def findMaximumScore(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = mx = 0
for x in nums:
result += mx
mx = max(mx, x)
return result
|
Solution
|
python
|
pypa__pip
|
src/pip/_vendor/resolvelib/resolvers/resolution.py
|
{
"start": 21515,
"end": 24212
}
|
class ____(AbstractResolver[RT, CT, KT]):
"""The thing that performs the actual resolution work."""
base_exception = ResolverException
def resolve( # type: ignore[override]
self,
requirements: Iterable[RT],
max_rounds: int = 100,
) -> Result[RT, CT, KT]:
"""Take a collection of constraints, spit out the resolution result.
The return value is a representation to the final resolution result. It
is a tuple subclass with three public members:
* `mapping`: A dict of resolved candidates. Each key is an identifier
of a requirement (as returned by the provider's `identify` method),
and the value is the resolved candidate.
* `graph`: A `DirectedGraph` instance representing the dependency tree.
The vertices are keys of `mapping`, and each edge represents *why*
a particular package is included. A special vertex `None` is
included to represent parents of user-supplied requirements.
* `criteria`: A dict of "criteria" that hold detailed information on
how edges in the graph are derived. Each key is an identifier of a
requirement, and the value is a `Criterion` instance.
The following exceptions may be raised if a resolution cannot be found:
* `ResolutionImpossible`: A resolution cannot be found for the given
combination of requirements. The `causes` attribute of the
exception is a list of (requirement, parent), giving the
requirements that could not be satisfied.
* `ResolutionTooDeep`: The dependency tree is too deeply nested and
the resolver gave up. This is usually caused by a circular
dependency, but you can try to resolve this by increasing the
`max_rounds` argument.
"""
resolution = Resolution(self.provider, self.reporter)
state = resolution.resolve(requirements, max_rounds=max_rounds)
return _build_result(state)
def _has_route_to_root(
criteria: Mapping[KT, Criterion[RT, CT]],
key: KT | None,
all_keys: dict[int, KT | None],
connected: set[KT | None],
) -> bool:
if key in connected:
return True
if key not in criteria:
return False
assert key is not None
for p in criteria[key].iter_parent():
try:
pkey = all_keys[id(p)]
except KeyError:
continue
if pkey in connected:
connected.add(key)
return True
if _has_route_to_root(criteria, pkey, all_keys, connected):
connected.add(key)
return True
return False
|
Resolver
|
python
|
ethereum__web3.py
|
web3/contract/async_contract.py
|
{
"start": 17617,
"end": 19466
}
|
class ____(BaseContractCaller):
# mypy types
w3: "AsyncWeb3[Any]"
def __init__(
self,
abi: ABI,
w3: "AsyncWeb3[Any]",
address: ChecksumAddress,
transaction: TxParams | None = None,
block_identifier: BlockIdentifier = None,
ccip_read_enabled: bool | None = None,
decode_tuples: bool | None = False,
contract_functions: AsyncContractFunctions | None = None,
) -> None:
super().__init__(abi, w3, address, decode_tuples=decode_tuples)
if self.abi:
if transaction is None:
transaction = {}
if contract_functions is None:
contract_functions = AsyncContractFunctions(
abi, w3, address, decode_tuples=decode_tuples
)
self._functions = contract_functions._functions
for fn in contract_functions.__iter__():
caller_method = partial(
self.call_function,
fn,
transaction=transaction,
block_identifier=block_identifier,
ccip_read_enabled=ccip_read_enabled,
)
setattr(self, str(fn.abi_element_identifier), caller_method)
def __call__(
self,
transaction: TxParams | None = None,
block_identifier: BlockIdentifier = None,
ccip_read_enabled: bool | None = None,
) -> "AsyncContractCaller":
if transaction is None:
transaction = {}
return type(self)(
self.abi,
self.w3,
self.address,
transaction=transaction,
block_identifier=block_identifier,
ccip_read_enabled=ccip_read_enabled,
decode_tuples=self.decode_tuples,
)
|
AsyncContractCaller
|
python
|
astropy__astropy
|
astropy/units/tests/test_quantity_ufuncs.py
|
{
"start": 40284,
"end": 40943
}
|
class ____:
"""Test the where argument in ufuncs."""
def test_where(self):
q = np.arange(4.0) << u.m
out = np.zeros(4) << u.m
result = np.add(q, 1 * u.km, out=out, where=[True, True, True, False])
assert result is out
assert_array_equal(result, [1000.0, 1001.0, 1002.0, 0.0] << u.m)
def test_exception_with_where_quantity(self):
a = np.ones(2)
where = np.ones(2, bool) << u.m
with pytest.raises(TypeError, match="all returned NotImplemented"):
np.add(a, a, out=a, where=where)
@pytest.mark.skipif(not hasattr(np_umath, "clip"), reason="no clip ufunc available")
|
TestWhere
|
python
|
h5py__h5py
|
h5py/tests/test_slicing.py
|
{
"start": 8951,
"end": 10573
}
|
class ____(BaseSlicing):
"""
Field names for read & write
"""
dt = np.dtype([('a', 'f'), ('b', 'i'), ('c', 'f4')])
data = np.ones((100,), dtype=dt)
def setUp(self):
BaseSlicing.setUp(self)
self.dset = self.f.create_dataset('x', (100,), dtype=self.dt)
self.dset[...] = self.data
def test_read(self):
""" Test read with field selections """
self.assertArrayEqual(self.dset['a'], self.data['a'])
def test_unicode_names(self):
""" Unicode field names for for read and write """
self.assertArrayEqual(self.dset['a'], self.data['a'])
data = self.data.copy()
dset = self.f.create_dataset(make_name(), data=data)
dset['a'] = 42
data['a'] = 42
self.assertArrayEqual(dset['a'], data['a'])
def test_write(self):
""" Test write with field selections """
data = self.data.copy()
dset = self.f.create_dataset(make_name(), data=data)
data['a'] *= 2
dset['a'] = data
self.assertTrue(np.all(dset[...] == data))
data['b'] *= 4
dset['b'] = data
self.assertTrue(np.all(dset[...] == data))
data['a'] *= 3
data['c'] *= 3
dset['a','c'] = data
self.assertTrue(np.all(dset[...] == data))
def test_write_noncompound(self):
""" Test write with non-compound source (single-field) """
data = self.data.copy()
dset = self.f.create_dataset(make_name(), data=data)
data['b'] = 1.0
dset['b'] = 1.0
self.assertTrue(np.all(dset[...] == data))
|
TestFieldNames
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/units.py
|
{
"start": 4266,
"end": 5027
}
|
class ____(ConversionInterface):
"""Converter for decimal.Decimal data to float."""
@staticmethod
def convert(value, unit, axis):
"""
Convert Decimals to floats.
The *unit* and *axis* arguments are not used.
Parameters
----------
value : decimal.Decimal or iterable
Decimal or list of Decimal need to be converted
"""
if isinstance(value, Decimal):
return float(value)
# value is Iterable[Decimal]
elif isinstance(value, ma.MaskedArray):
return ma.asarray(value, dtype=float)
else:
return np.asarray(value, dtype=float)
# axisinfo and default_units can be inherited as Decimals are Numbers.
|
DecimalConverter
|
python
|
run-llama__llama_index
|
llama-index-packs/llama-index-packs-code-hierarchy/tests/test_utility_methods.py
|
{
"start": 2687,
"end": 3044
}
|
class ____ {
exampleMethod() {
console.log("line1");
}
}
function baz() {
console.log("bbq");
}"""
(
indent_char,
count_per_indent,
first_indent_level,
) = CodeHierarchyNodeParser._get_indentation(text)
assert indent_char == " "
assert count_per_indent == 4
assert first_indent_level == 0
|
Example
|
python
|
ray-project__ray
|
python/ray/serve/handle.py
|
{
"start": 1245,
"end": 8827
}
|
class ____:
def __init__(
self,
deployment_name: str,
app_name: str,
*,
init_options: Optional[InitHandleOptionsBase] = None,
handle_options: Optional[DynamicHandleOptionsBase] = None,
_router: Optional[Router] = None,
_create_router: Optional[CreateRouterCallable] = None,
_request_counter: Optional[metrics.Counter] = None,
_handle_id: Optional[str] = None,
):
self.deployment_id = DeploymentID(name=deployment_name, app_name=app_name)
self.init_options: Optional[InitHandleOptionsBase] = init_options
self.handle_options: DynamicHandleOptionsBase = (
handle_options or create_dynamic_handle_options()
)
# Handle ID is shared among handles that are returned by
# `handle.options` or `handle.method`
self.handle_id = _handle_id or get_random_string()
self.request_counter = _request_counter or self._create_request_counter(
app_name, deployment_name, self.handle_id
)
self._router: Optional[Router] = _router
if _create_router is None:
self._create_router = create_router
else:
self._create_router = _create_router
@staticmethod
def _gen_handle_tag(app_name: str, deployment_name: str, handle_id: str):
if app_name:
return f"{app_name}#{deployment_name}#{handle_id}"
else:
return f"{deployment_name}#{handle_id}"
@classmethod
def _create_request_counter(
cls, app_name: str, deployment_name: str, handle_id: str
):
return metrics.Counter(
"serve_handle_request_counter",
description=(
"The number of handle.remote() calls that have been "
"made on this handle."
),
tag_keys=("handle", "deployment", "route", "application"),
).set_default_tags(
{
"handle": cls._gen_handle_tag(
app_name, deployment_name, handle_id=handle_id
),
"deployment": deployment_name,
"application": app_name,
}
)
def running_replicas_populated(self) -> bool:
if self._router is None:
return False
return self._router.running_replicas_populated()
@property
def deployment_name(self) -> str:
return self.deployment_id.name
@property
def app_name(self) -> str:
return self.deployment_id.app_name
@property
def is_initialized(self) -> bool:
return self._router is not None
def _init(self, **kwargs):
"""Initialize this handle with arguments.
A handle can only be initialized once. A handle is implicitly
initialized when `.options()` or `.remote()` is called. Therefore
to initialize a handle with custom init options, you must do it
before calling `.options()` or `.remote()`.
"""
if self._router is not None:
raise RuntimeError(
"Handle has already been initialized. Note that a handle is implicitly "
"initialized when you call `.options()` or `.remote()`. You either "
"tried to call `._init()` twice or called `._init()` after calling "
"`.options()` or `.remote()`. If you want to modify the init options, "
"please do so before calling `.options()` or `.remote()`. This handle "
f"was initialized with {self.init_options}."
)
init_options = create_init_handle_options(**kwargs)
self._router = self._create_router(
handle_id=self.handle_id,
deployment_id=self.deployment_id,
handle_options=init_options,
)
self.init_options = init_options
logger.info(
f"Initialized DeploymentHandle {self.handle_id} for {self.deployment_id}.",
extra={"log_to_stderr": False},
)
# Record handle api telemetry when not in the proxy
if (
self.init_options._source != DeploymentHandleSource.PROXY
and self.__class__ == DeploymentHandle
):
ServeUsageTag.DEPLOYMENT_HANDLE_API_USED.record("1")
def _is_router_running_in_separate_loop(self) -> bool:
return self.init_options._run_router_in_separate_loop
def _options(self, _prefer_local_routing=DEFAULT.VALUE, **kwargs):
if kwargs.get("stream") is True and inside_ray_client_context():
raise RuntimeError(
"Streaming DeploymentHandles are not currently supported when "
"connected to a remote Ray cluster using Ray Client."
)
new_handle_options = self.handle_options.copy_and_update(**kwargs)
# TODO(zcin): remove when _prefer_local_routing is removed from options() path
if _prefer_local_routing != DEFAULT.VALUE:
self._init(_prefer_local_routing=_prefer_local_routing)
if not self.is_initialized:
self._init()
return DeploymentHandle(
self.deployment_name,
self.app_name,
init_options=self.init_options,
handle_options=new_handle_options,
_router=self._router,
_create_router=self._create_router,
_request_counter=self.request_counter,
_handle_id=self.handle_id,
)
def _remote(
self,
args: Tuple[Any],
kwargs: Dict[str, Any],
) -> Tuple[concurrent.futures.Future, RequestMetadata]:
if not self.is_initialized:
self._init()
metadata = serve._private.default_impl.get_request_metadata(
self.init_options, self.handle_options
)
self.request_counter.inc(
tags={
"route": metadata.route,
"application": metadata.app_name,
}
)
return self._router.assign_request(metadata, *args, **kwargs), metadata
def __getattr__(self, name):
return self.options(method_name=name)
def shutdown(self):
if self._router:
shutdown_future = self._router.shutdown()
if self._is_router_running_in_separate_loop():
shutdown_future.result()
else:
logger.warning(
"Synchronously shutting down a router that's running in the same "
"event loop can only be done best effort. Please use "
"`shutdown_async` instead."
)
async def shutdown_async(self):
if self._router:
shutdown_future: Union[
asyncio.Future, concurrent.futures.Future
] = self._router.shutdown()
if self._is_router_running_in_separate_loop:
await asyncio.wrap_future(shutdown_future)
else:
await shutdown_future
def __repr__(self):
return f"{self.__class__.__name__}" f"(deployment='{self.deployment_name}')"
@classmethod
def _deserialize(cls, kwargs):
"""Required for this class's __reduce__ method to be picklable."""
return cls(**kwargs)
def __reduce__(self):
serialized_constructor_args = {
"deployment_name": self.deployment_name,
"app_name": self.app_name,
"handle_options": self.handle_options,
}
return self.__class__._deserialize, (serialized_constructor_args,)
|
_DeploymentHandleBase
|
python
|
huggingface__transformers
|
src/transformers/integrations/integration_utils.py
|
{
"start": 62864,
"end": 74980
}
|
class ____(TrainerCallback):
"""TrainerCallback that sends the logs to [Neptune](https://app.neptune.ai).
Args:
api_token (`str`, *optional*): Neptune API token obtained upon registration.
You can leave this argument out if you have saved your token to the `NEPTUNE_API_TOKEN` environment
variable (strongly recommended). See full setup instructions in the
[docs](https://docs.neptune.ai/setup/installation).
project (`str`, *optional*): Name of an existing Neptune project, in the form "workspace-name/project-name".
You can find and copy the name in Neptune from the project settings -> Properties. If None (default), the
value of the `NEPTUNE_PROJECT` environment variable is used.
name (`str`, *optional*): Custom name for the run.
base_namespace (`str`, *optional*, defaults to "finetuning"): In the Neptune run, the root namespace
that will contain all of the metadata logged by the callback.
log_parameters (`bool`, *optional*, defaults to `True`):
If True, logs all Trainer arguments and model parameters provided by the Trainer.
log_checkpoints (`str`, *optional*): If "same", uploads checkpoints whenever they are saved by the Trainer.
If "last", uploads only the most recently saved checkpoint. If "best", uploads the best checkpoint (among
the ones saved by the Trainer). If `None`, does not upload checkpoints.
run (`Run`, *optional*): Pass a Neptune run object if you want to continue logging to an existing run.
Read more about resuming runs in the [docs](https://docs.neptune.ai/logging/to_existing_object).
**neptune_run_kwargs (*optional*):
Additional keyword arguments to be passed directly to the
[`neptune.init_run()`](https://docs.neptune.ai/api/neptune#init_run) function when a new run is created.
For instructions and examples, see the [Transformers integration
guide](https://docs.neptune.ai/integrations/transformers) in the Neptune documentation.
"""
integration_version_key = "source_code/integrations/transformers"
model_parameters_key = "model_parameters"
trial_name_key = "trial"
trial_params_key = "trial_params"
trainer_parameters_key = "trainer_parameters"
flat_metrics = {"train/epoch"}
def __init__(
self,
*,
api_token: str | None = None,
project: str | None = None,
name: str | None = None,
base_namespace: str = "finetuning",
run=None,
log_parameters: bool = True,
log_checkpoints: str | None = None,
**neptune_run_kwargs,
):
if not is_neptune_available():
raise ValueError(
"NeptuneCallback requires the Neptune client library to be installed. "
"To install the library, run `pip install neptune`."
)
try:
from neptune import Run
from neptune.internal.utils import verify_type
except ImportError:
from neptune.new.internal.utils import verify_type
from neptune.new.metadata_containers.run import Run
verify_type("api_token", api_token, (str, type(None)))
verify_type("project", project, (str, type(None)))
verify_type("name", name, (str, type(None)))
verify_type("base_namespace", base_namespace, str)
verify_type("run", run, (Run, type(None)))
verify_type("log_parameters", log_parameters, bool)
verify_type("log_checkpoints", log_checkpoints, (str, type(None)))
self._base_namespace_path = base_namespace
self._log_parameters = log_parameters
self._log_checkpoints = log_checkpoints
self._initial_run: Run | None = run
self._run = None
self._is_monitoring_run = False
self._run_id = None
self._force_reset_monitoring_run = False
self._init_run_kwargs = {"api_token": api_token, "project": project, "name": name, **neptune_run_kwargs}
self._volatile_checkpoints_dir = None
self._should_upload_checkpoint = self._log_checkpoints is not None
self._recent_checkpoint_path = None
if self._log_checkpoints in {"last", "best"}:
self._target_checkpoints_namespace = f"checkpoints/{self._log_checkpoints}"
self._should_clean_recently_uploaded_checkpoint = True
else:
self._target_checkpoints_namespace = "checkpoints"
self._should_clean_recently_uploaded_checkpoint = False
def _stop_run_if_exists(self):
if self._run:
self._run.stop()
del self._run
self._run = None
def _initialize_run(self, **additional_neptune_kwargs):
try:
from neptune import init_run
from neptune.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException
except ImportError:
from neptune.new import init_run
from neptune.new.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException
self._stop_run_if_exists()
try:
run_params = additional_neptune_kwargs.copy()
run_params.update(self._init_run_kwargs)
self._run = init_run(**run_params)
self._run_id = self._run["sys/id"].fetch()
except (NeptuneMissingProjectNameException, NeptuneMissingApiTokenException) as e:
raise NeptuneMissingConfiguration() from e
def _use_initial_run(self):
self._run = self._initial_run
self._is_monitoring_run = True
self._run_id = self._run["sys/id"].fetch()
self._initial_run = None
def _ensure_run_with_monitoring(self):
if self._initial_run is not None:
self._use_initial_run()
else:
if not self._force_reset_monitoring_run and self._is_monitoring_run:
return
if self._run and not self._is_monitoring_run and not self._force_reset_monitoring_run:
self._initialize_run(with_id=self._run_id)
self._is_monitoring_run = True
else:
self._initialize_run()
self._force_reset_monitoring_run = False
def _ensure_at_least_run_without_monitoring(self):
if self._initial_run is not None:
self._use_initial_run()
else:
if not self._run:
self._initialize_run(
with_id=self._run_id,
capture_stdout=False,
capture_stderr=False,
capture_hardware_metrics=False,
capture_traceback=False,
)
self._is_monitoring_run = False
@property
def run(self):
if self._run is None:
self._ensure_at_least_run_without_monitoring()
return self._run
@property
def _metadata_namespace(self):
return self.run[self._base_namespace_path]
def _log_integration_version(self):
self.run[NeptuneCallback.integration_version_key] = version
def _log_trainer_parameters(self, args):
self._metadata_namespace[NeptuneCallback.trainer_parameters_key] = args.to_sanitized_dict()
def _log_model_parameters(self, model):
from neptune.utils import stringify_unsupported
if model and hasattr(model, "config") and model.config is not None:
self._metadata_namespace[NeptuneCallback.model_parameters_key] = stringify_unsupported(
model.config.to_dict()
)
def _log_hyper_param_search_parameters(self, state):
if state and hasattr(state, "trial_name"):
self._metadata_namespace[NeptuneCallback.trial_name_key] = state.trial_name
if state and hasattr(state, "trial_params") and state.trial_params is not None:
self._metadata_namespace[NeptuneCallback.trial_params_key] = state.trial_params
def _log_model_checkpoint(self, source_directory: str, checkpoint: str):
target_path = relative_path = os.path.join(source_directory, checkpoint)
if self._volatile_checkpoints_dir is not None:
consistent_checkpoint_path = os.path.join(self._volatile_checkpoints_dir, checkpoint)
try:
# Remove leading ../ from a relative path.
cpkt_path = relative_path.replace("..", "").lstrip(os.path.sep)
copy_path = os.path.join(consistent_checkpoint_path, cpkt_path)
shutil.copytree(relative_path, copy_path)
target_path = consistent_checkpoint_path
except OSError as e:
logger.warning(
f"NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{e}'. "
"Could fail trying to upload."
)
self._metadata_namespace[self._target_checkpoints_namespace].upload_files(target_path)
if self._should_clean_recently_uploaded_checkpoint and self._recent_checkpoint_path is not None:
self._metadata_namespace[self._target_checkpoints_namespace].delete_files(self._recent_checkpoint_path)
self._recent_checkpoint_path = relative_path
def on_init_end(self, args, state, control, **kwargs):
self._volatile_checkpoints_dir = None
if self._log_checkpoints and args.save_total_limit is not None:
self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name
if self._log_checkpoints == "best" and not args.load_best_model_at_end:
raise ValueError("To save the best model checkpoint, the load_best_model_at_end argument must be enabled.")
def on_train_begin(self, args, state, control, model=None, **kwargs):
if not state.is_world_process_zero:
return
self._ensure_run_with_monitoring()
self._force_reset_monitoring_run = True
self._log_integration_version()
if self._log_parameters:
self._log_trainer_parameters(args)
self._log_model_parameters(model)
if state.is_hyper_param_search:
self._log_hyper_param_search_parameters(state)
def on_train_end(self, args, state, control, **kwargs):
self._stop_run_if_exists()
def __del__(self):
if self._volatile_checkpoints_dir is not None:
shutil.rmtree(self._volatile_checkpoints_dir, ignore_errors=True)
self._stop_run_if_exists()
def on_save(self, args, state, control, **kwargs):
if self._should_upload_checkpoint:
self._log_model_checkpoint(args.output_dir, f"checkpoint-{state.global_step}")
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
if self._log_checkpoints == "best":
best_metric_name = args.metric_for_best_model
if not best_metric_name.startswith("eval_"):
best_metric_name = f"eval_{best_metric_name}"
metric_value = metrics.get(best_metric_name)
operator = np.greater if args.greater_is_better else np.less
self._should_upload_checkpoint = state.best_metric is None or operator(metric_value, state.best_metric)
@classmethod
def get_run(cls, trainer):
for callback in trainer.callback_handler.callbacks:
if isinstance(callback, cls):
return callback.run
raise Exception("The trainer doesn't have a NeptuneCallback configured.")
def on_log(self, args, state, control, logs: dict[str, float] | None = None, **kwargs):
if not state.is_world_process_zero:
return
if logs is not None:
for name, value in rewrite_logs(logs).items():
if isinstance(value, (int, float)):
if name in NeptuneCallback.flat_metrics:
self._metadata_namespace[name] = value
else:
self._metadata_namespace[name].log(value, step=state.global_step)
|
NeptuneCallback
|
python
|
redis__redis-py
|
redis/asyncio/client.py
|
{
"start": 49892,
"end": 50002
}
|
class ____(Protocol):
def __call__(self, e: BaseException, pubsub: PubSub): ...
|
PubsubWorkerExceptionHandler
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/_serialization.py
|
{
"start": 1947,
"end": 2006
}
|
class ____(KeySerializationEncryption):
pass
|
NoEncryption
|
python
|
getsentry__sentry
|
src/sentry/relay/config/metric_extraction.py
|
{
"start": 2408,
"end": 34652
}
|
class ____(TypedDict):
"""Configuration for generic extraction of metrics from all data categories."""
version: int
metrics: list[MetricSpec]
def get_max_widget_specs(organization: Organization) -> int:
if organization.id in options.get("on_demand.extended_widget_spec_orgs") and options.get(
"on_demand.extended_max_widget_specs"
):
return options.get("on_demand.extended_max_widget_specs")
max_widget_specs = options.get("on_demand.max_widget_specs")
return max_widget_specs
def get_max_alert_specs(organization: Organization) -> int:
if organization.id in options.get("on_demand.extended_alert_spec_orgs") and (
extended_max_specs := options.get("on_demand.extended_max_alert_specs")
):
return extended_max_specs
max_alert_specs = options.get("on_demand.max_alert_specs")
return max_alert_specs
@metrics.wraps("on_demand_metrics.get_metric_extraction_config")
def get_metric_extraction_config(project: Project) -> MetricExtractionConfig | None:
"""
Returns generic metric extraction config for the given project.
This requires respective feature flags to be enabled. At the moment, metrics
for the following models are extracted:
- Performance alert rules with advanced filter expressions.
- On-demand metrics widgets.
"""
# For efficiency purposes, we fetch the flags in batch and propagate them downstream.
sentry_sdk.set_tag("organization_id", project.organization_id)
with sentry_sdk.start_span(op="get_on_demand_metric_specs"):
alert_specs, widget_specs = build_safe_config(
"on_demand_metric_specs", get_on_demand_metric_specs, project
) or ([], [])
with sentry_sdk.start_span(op="merge_metric_specs"):
metric_specs = _merge_metric_specs(alert_specs, widget_specs)
if not metric_specs:
return None
rv: MetricExtractionConfig = {
"version": _METRIC_EXTRACTION_VERSION,
"metrics": metric_specs,
}
return rv
def get_on_demand_metric_specs(
timeout: TimeChecker, project: Project
) -> tuple[list[HashedMetricSpec], list[HashedMetricSpec]]:
with sentry_sdk.start_span(op="on_demand_metrics_feature_flags"):
enabled_features = on_demand_metrics_feature_flags(project.organization)
timeout.check()
prefilling = "organizations:on-demand-metrics-prefill" in enabled_features
prefilling_for_deprecation = (
"organizations:on-demand-gen-metrics-deprecation-prefill" in enabled_features
)
with sentry_sdk.start_span(op="get_alert_metric_specs"):
alert_specs = _get_alert_metric_specs(
project,
enabled_features,
prefilling,
prefilling_for_deprecation=prefilling_for_deprecation,
)
timeout.check()
with sentry_sdk.start_span(op="get_widget_metric_specs"):
widget_specs = _get_widget_metric_specs(project, enabled_features, prefilling)
timeout.check()
return (alert_specs, widget_specs)
def on_demand_metrics_feature_flags(organization: Organization) -> set[str]:
feature_names = [
"organizations:on-demand-metrics-extraction",
"organizations:on-demand-metrics-extraction-widgets", # Controls extraction for widgets
"organizations:on-demand-metrics-extraction-experimental",
"organizations:on-demand-metrics-prefill",
"organizations:on-demand-gen-metrics-deprecation-prefill",
]
enabled_features = set()
for feature in feature_names:
if features.has(feature, organization=organization):
enabled_features.add(feature)
return enabled_features
def get_all_alert_metric_specs(
project: Project,
enabled_features: set[str],
prefilling: bool,
prefilling_for_deprecation: bool,
) -> list[HashedMetricSpec]:
if not (
"organizations:on-demand-metrics-extraction" in enabled_features
or prefilling
or prefilling_for_deprecation
):
return []
metrics.incr(
"on_demand_metrics.get_alerts",
tags={"prefilling": prefilling},
)
datasets = [Dataset.PerformanceMetrics.value]
if prefilling:
datasets.append(Dataset.Transactions.value)
alert_rules = (
AlertRule.objects.fetch_for_project(project)
.filter(
organization=project.organization,
status=AlertRuleStatus.PENDING.value,
snuba_query__dataset__in=datasets,
)
.select_related("snuba_query")
)
specs = []
with metrics.timer("on_demand_metrics.alert_spec_convert"):
for alert in alert_rules:
alert_snuba_query = alert.snuba_query
metrics.incr(
"on_demand_metrics.before_alert_spec_generation",
tags={"prefilling": prefilling, "dataset": alert_snuba_query.dataset},
)
if results := _convert_snuba_query_to_metrics(
project,
alert_snuba_query,
prefilling,
prefilling_for_deprecation=prefilling_for_deprecation,
):
for spec in results:
metrics.incr(
"on_demand_metrics.on_demand_spec.for_alert",
tags={"prefilling": prefilling},
)
specs.append(spec)
return specs
def get_default_version_alert_metric_specs(
project: Project,
enabled_features: set[str],
prefilling: bool,
prefilling_for_deprecation: bool,
) -> list[HashedMetricSpec]:
specs = get_all_alert_metric_specs(
project, enabled_features, prefilling, prefilling_for_deprecation=prefilling_for_deprecation
)
specs_per_version = get_specs_per_version(specs)
default_extraction_version = OnDemandMetricSpecVersioning.get_default_spec_version().version
return specs_per_version.get(default_extraction_version, [])
@metrics.wraps("on_demand_metrics._get_alert_metric_specs")
def _get_alert_metric_specs(
project: Project,
enabled_features: set[str],
prefilling: bool,
prefilling_for_deprecation: bool,
) -> list[HashedMetricSpec]:
specs = get_all_alert_metric_specs(
project, enabled_features, prefilling, prefilling_for_deprecation=prefilling_for_deprecation
)
max_alert_specs = get_max_alert_specs(project.organization)
(specs, _) = _trim_if_above_limit(specs, max_alert_specs, project, "alerts")
return specs
def _bulk_cache_query_key(project: Project, chunk: int) -> str:
return f"on-demand.bulk-query-cache.{chunk}.{project.organization.id}"
def _get_bulk_cached_query(project: Project) -> tuple[dict[int, dict[str, bool]], list[int]]:
cache_result = {}
cold_cache_chunks = []
for i in range(WIDGET_QUERY_CACHE_MAX_CHUNKS):
query_bulk_cache_key = _bulk_cache_query_key(project, i)
chunk_result = cache.get(query_bulk_cache_key, None)
if chunk_result is None:
cold_cache_chunks.append(i)
sentry_sdk.set_tag(f"on_demand_metrics.query_cache.{i}", chunk_result is None)
cache_result[i] = chunk_result or {}
sentry_sdk.set_extra("cold_cache_chunks", cold_cache_chunks)
metrics.incr("on_demand_metrics.query_cache_cold_keys", amount=len(cold_cache_chunks))
return cache_result, cold_cache_chunks
def _set_bulk_cached_query_chunk(
project: Project, chunk_cache: dict[str, bool], chunk: int
) -> None:
query_bulk_cache_key = _bulk_cache_query_key(project, chunk)
cache.set(
query_bulk_cache_key, chunk_cache, timeout=900 + (137 * chunk)
) # Add prime number jitter per cache. All cache turns over between 15-25 mins
def _set_bulk_cached_query(
project: Project, query_cache: dict[int, dict[str, bool]], cold_cache_chunks: list[int]
) -> None:
for i in cold_cache_chunks:
_set_bulk_cached_query_chunk(project, query_cache[i], i)
@metrics.wraps("on_demand_metrics._get_widget_metric_specs")
def _get_widget_metric_specs(
project: Project, enabled_features: set[str], prefilling: bool
) -> list[HashedMetricSpec]:
if "organizations:on-demand-metrics-extraction-widgets" not in enabled_features:
metrics.incr("on_demand_metrics.get_widget_metric_specs.extraction_feature_disabled")
return []
metrics.incr(
"on_demand_metrics.get_widgets",
tags={"prefilling": prefilling},
)
# fetch all queries of all on demand metrics widgets of this organization
widget_queries = (
DashboardWidgetQuery.objects.filter(
widget__dashboard__organization=project.organization,
widget__widget_type__in=[
DashboardWidgetTypes.DISCOVER,
DashboardWidgetTypes.TRANSACTION_LIKE,
],
)
.prefetch_related("dashboardwidgetqueryondemand_set", "widget")
.order_by("-widget__dashboard__last_visited", "widget__order")
)
metrics.incr(
"on_demand_metrics.widgets_to_process", amount=len(widget_queries), sample_rate=1.0
)
organization_bulk_query_cache, cold_bulk_cache_chunks = _get_bulk_cached_query(project)
ignored_widget_ids: dict[int, bool] = {}
specs_for_widget: dict[int, list[HashedMetricSpec]] = defaultdict(list)
widget_query_for_spec_hash: dict[str, DashboardWidgetQuery] = {}
specs: list[HashedMetricSpec] = []
total_spec_count = 0
with metrics.timer("on_demand_metrics.widget_spec_convert"):
for widget_query in widget_queries:
widget_specs = convert_widget_query_to_metric(
project, widget_query, prefilling, organization_bulk_query_cache
)
if not widget_specs:
# Skip checking any widget queries that don't have specs,
# they don't affect decisions about the widget.
continue
total_spec_count += 1
specs_for_widget[widget_query.widget.id] += widget_specs
for spec in widget_specs:
widget_query_for_spec_hash[spec[0]] = widget_query
can_widget_query_use_stateful_extraction = _can_widget_query_use_stateful_extraction(
widget_query, widget_specs
)
if options.get("on_demand_metrics.widgets.use_stateful_extraction"):
if can_widget_query_use_stateful_extraction:
extraction_enabled = _widget_query_stateful_extraction_enabled(widget_query)
if not extraction_enabled:
# Return no specs if any extraction is blocked for a widget that should have specs.
ignored_widget_ids[widget_query.widget.id] = True
metrics.incr(
"on_demand_metrics.widgets.can_use_stateful_extraction", sample_rate=1.0
)
else:
# Stateful extraction cannot be used in some cases (eg. newly created or recently modified widgets).
# We skip cardinality checks for those cases, however, and assume extraction is allowed temporarily.
metrics.incr(
"on_demand_metrics.widgets.cannot_use_stateful_extraction", sample_rate=1.0
)
continue
else:
# TODO: Remove this cardinality check after above option is enabled permanently.
if not _is_widget_query_low_cardinality(widget_query, project):
metrics.incr("on_demand_metrics.widget_query.high_cardinality", sample_rate=1.0)
ignored_widget_ids[widget_query.widget.id] = True
metrics.incr("on_demand_metrics.widget_query_specs.pre_trim", amount=total_spec_count)
specs = _trim_disabled_widgets(ignored_widget_ids, specs_for_widget)
metrics.incr("on_demand_metrics.widget_query_specs.post_disabled_trim", amount=len(specs))
max_widget_specs = get_max_widget_specs(project.organization)
(specs, trimmed_specs) = _trim_if_above_limit(specs, max_widget_specs, project, "widgets")
_update_state_with_spec_limit(trimmed_specs, widget_query_for_spec_hash)
metrics.incr("on_demand_metrics.widget_query_specs", amount=len(specs))
if in_random_rollout("on_demand_metrics.cache_should_use_on_demand"):
_set_bulk_cached_query(project, organization_bulk_query_cache, cold_bulk_cache_chunks)
return specs
def _trim_disabled_widgets(
ignored_widgets: dict[int, bool], specs_for_widget: dict[int, list[HashedMetricSpec]]
) -> list[HashedMetricSpec]:
"""Specifically remove only widget specs that share a widget (spec limit, cardinality limit)."""
enabled_specs: list[HashedMetricSpec] = []
for widget_id, specs in specs_for_widget.items():
if not ignored_widgets.get(widget_id, None):
enabled_specs.extend(specs)
return enabled_specs
def _trim_if_above_limit(
specs: Sequence[HashedMetricSpec],
max_specs: int,
project: Project,
spec_type: Literal["alerts", "widgets", "span_attributes"],
) -> tuple[list[HashedMetricSpec], list[HashedMetricSpec]]:
"""Trim specs per version if above max limit, returns the accepted specs and the trimmed specs in a tuple"""
return_specs = []
trimmed_specs = []
specs_per_version: dict[int, dict[str, HashedMetricSpec]] = {}
for hash, spec, spec_version in specs:
specs_per_version.setdefault(spec_version.version, {})
specs_per_version[spec_version.version][hash] = (hash, spec, spec_version)
for version, _specs_for_version in specs_per_version.items():
specs_for_version = _specs_for_version.values()
if len(specs_for_version) > max_specs:
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("project_id", project.id)
scope.set_context("specs", {"values": [spec[0] for spec in specs_for_version]})
sentry_sdk.capture_exception(
Exception(
f"Spec version {version}: Too many ({len(specs_for_version)}) on demand metric {spec_type} for org {project.organization.slug}"
)
)
return_specs += list(specs_for_version)[:max_specs]
trimmed_specs += list(specs_for_version)[max_specs:]
else:
return_specs += list(specs_for_version)
return return_specs, trimmed_specs
def _update_state_with_spec_limit(
trimmed_specs: Sequence[HashedMetricSpec],
widget_query_for_spec_hash: dict[str, DashboardWidgetQuery],
) -> None:
"""We don't want to picked randomly last-visited widgets to exclude for specs, since we ideally want the extracted specs to be stable.
This sets the extracted state to disabled for specs over the limit. With stateful extraction that means that we will pick a consistent set of specs
under the limit and not have churn.
"""
widget_queries: dict[int, set[DashboardWidgetQuery]] = {}
for spec in trimmed_specs:
spec_hash, _, spec_version = spec
widget_query = widget_query_for_spec_hash[spec_hash]
if widget_query:
widget_queries.setdefault(spec_version.version, set())
widget_queries[spec_version.version].add(widget_query)
for version, widget_query_set in widget_queries.items():
for widget_query in widget_query_set:
widget_query.dashboardwidgetqueryondemand_set.filter(spec_version=version).update(
extraction_state=OnDemandExtractionState.DISABLED_SPEC_LIMIT
)
return None
@metrics.wraps("on_demand_metrics._merge_metric_specs")
def _merge_metric_specs(
alert_specs: list[HashedMetricSpec],
widget_specs: list[HashedMetricSpec],
) -> list[MetricSpec]:
# We use a dict so that we can deduplicate metrics with the same hash.
specs: dict[str, MetricSpec] = {}
duplicated_specs = 0
for query_hash, spec, _ in widget_specs + alert_specs:
already_present = specs.get(query_hash)
if already_present and not are_specs_equal(already_present, spec):
logger.warning(
"Duplicate metric spec found for hash %s with different specs.", query_hash
)
# Printing over two lines to prevent trimming
logger.info("Spec 1: %s", already_present)
logger.info("Spec 2: %s", spec)
duplicated_specs += 1
continue
specs[query_hash] = spec
if duplicated_specs > 0:
logger.error("%s metrics are duplicated. Check breadcrumbs for details.", duplicated_specs)
metrics.incr("on_demand_metrics.duplicate_specs", amount=duplicated_specs)
return list(specs.values())
def _convert_snuba_query_to_metrics(
project: Project,
snuba_query: SnubaQuery,
prefilling: bool,
prefilling_for_deprecation: bool,
) -> Sequence[HashedMetricSpec] | None:
"""
If the passed snuba_query is a valid query for on-demand metric extraction,
returns a tuple of (hash, MetricSpec) for the query. Otherwise, returns None.
"""
environment = snuba_query.environment.name if snuba_query.environment is not None else None
return _convert_aggregate_and_query_to_metrics(
project,
snuba_query.dataset,
snuba_query.aggregate,
snuba_query.query,
environment,
prefilling,
prefilling_for_deprecation=prefilling_for_deprecation,
)
def convert_widget_query_to_metric(
project: Project,
widget_query: DashboardWidgetQuery,
prefilling: bool,
organization_bulk_query_cache: dict[int, dict[str, bool]] | None = None,
) -> list[HashedMetricSpec]:
"""
Converts a passed metrics widget query to one or more MetricSpecs.
Widget query can result in multiple metric specs if it selects multiple fields
"""
metrics_specs: list[HashedMetricSpec] = []
if not widget_query.aggregates:
return metrics_specs
aggregates = widget_query.aggregates
groupbys = widget_query.columns
for aggregate in aggregates:
metrics_specs += _generate_metric_specs(
aggregate,
widget_query,
project,
prefilling,
prefilling_for_deprecation=False,
groupbys=groupbys,
organization_bulk_query_cache=organization_bulk_query_cache,
)
return metrics_specs
def _generate_metric_specs(
aggregate: str,
widget_query: DashboardWidgetQuery,
project: Project,
prefilling: bool,
prefilling_for_deprecation: bool,
groupbys: Sequence[str] | None = None,
organization_bulk_query_cache: dict[int, dict[str, bool]] | None = None,
) -> list[HashedMetricSpec]:
metrics_specs = []
metrics.incr("on_demand_metrics.before_widget_spec_generation")
if results := _convert_aggregate_and_query_to_metrics(
project,
# there is an internal check to make sure we extract metrics only for performance dataset
# however widgets do not have a dataset field, so we need to pass it explicitly
Dataset.PerformanceMetrics.value,
aggregate,
widget_query.conditions,
None,
prefilling,
prefilling_for_deprecation,
groupbys=groupbys,
spec_type=MetricSpecType.DYNAMIC_QUERY,
organization_bulk_query_cache=organization_bulk_query_cache,
):
for spec in results:
metrics.incr(
"on_demand_metrics.on_demand_spec.for_widget",
tags={"prefilling": prefilling},
)
metrics_specs.append(spec)
return metrics_specs
def get_specs_per_version(specs: Sequence[HashedMetricSpec]) -> dict[int, list[HashedMetricSpec]]:
"""This splits a list of specs into versioned specs for per-version logic"""
specs_per_version: dict[int, list[HashedMetricSpec]] = {}
for hash, spec, spec_version in specs:
specs_per_version.setdefault(spec_version.version, [])
specs_per_version[spec_version.version].append((hash, spec, spec_version))
return specs_per_version
def _can_widget_query_use_stateful_extraction(
widget_query: DashboardWidgetQuery, metrics_specs: Sequence[HashedMetricSpec]
) -> bool:
"""Stateful extraction for metrics is not always used, in cases where a query has been recently modified.
Separated from enabled state check to allow us to skip cardinality checks on the vast majority of widget queries.
"""
specs_per_version = get_specs_per_version(metrics_specs)
stateful_extraction_version = OnDemandMetricSpecVersioning.get_default_spec_version().version
default_version_specs = specs_per_version.get(stateful_extraction_version, [])
spec_hashes = [hashed_spec[0] for hashed_spec in default_version_specs]
on_demand_entries = [
entry
for entry in widget_query.dashboardwidgetqueryondemand_set.all()
if entry.spec_version == stateful_extraction_version
]
if len(on_demand_entries) == 0:
# 0 on-demand entries is expected, and happens when the on-demand task hasn't caught up yet for newly created widgets or widgets recently modified to have on-demand state.
if widget_query.date_modified > timezone.now() - timedelta(days=1):
metrics.incr(
"on_demand_metrics.on_demand_spec.skip_recently_modified",
amount=len(metrics_specs),
sample_rate=1.0,
)
else:
metrics.incr(
"on_demand_metrics.on_demand_spec.older_widget_query",
amount=len(metrics_specs),
sample_rate=1.0,
)
return False
elif len(on_demand_entries) > 1:
# There should only be one on demand entry.
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("widget_query", widget_query.id)
sentry_sdk.capture_message(
f"Wrong number of relations ({len(on_demand_entries)}) for widget_query: {widget_query.id}"
)
metrics.incr(
"on_demand_metrics.on_demand_spec.failed_on_demand_relations",
amount=len(metrics_specs),
sample_rate=1.0,
)
return False
on_demand_entry = on_demand_entries[0]
on_demand_hashes = on_demand_entry.spec_hashes
if on_demand_entry.date_modified < widget_query.date_modified:
# On demand entry was updated before the widget_query got updated, meaning it's potentially out of date
metrics.incr(
"on_demand_metrics.on_demand_spec.out_of_date_on_demand",
sample_rate=1.0,
)
return False
if set(spec_hashes) != set(on_demand_hashes):
# Spec hashes should match.
metrics.incr(
"on_demand_metrics.on_demand_spec.failed_on_demand_hashes",
amount=len(metrics_specs),
sample_rate=1.0,
)
return False
return True
def _widget_query_stateful_extraction_enabled(widget_query: DashboardWidgetQuery) -> bool:
"""Separate from the check on whether to use stateful extracion in the first place,
this assumes stateful extraction can be used, and returns the enabled state."""
stateful_extraction_version = OnDemandMetricSpecVersioning.get_default_spec_version().version
on_demand_entries = [
entry
for entry in widget_query.dashboardwidgetqueryondemand_set.all()
if entry.spec_version == stateful_extraction_version
]
if len(on_demand_entries) != 1:
with sentry_sdk.isolation_scope() as scope:
scope.set_extra("on_demand_entries", on_demand_entries)
scope.set_extra("spec_version", OnDemandMetricSpecVersioning.get_spec_versions())
sentry_sdk.capture_exception(
Exception("Skipped extraction due to mismatched on_demand entries")
)
# We default to allowed extraction if something unexpected occurs otherwise customers lose data.
return True
on_demand_entry = on_demand_entries[0]
return on_demand_entry.extraction_enabled()
def _get_widget_cardinality_query_ttl() -> int:
# Add ttl + 25% jitter to query so queries aren't all made at once.
return int(random.uniform(_WIDGET_QUERY_CARDINALITY_TTL, _WIDGET_QUERY_CARDINALITY_TTL * 1.5))
def _get_widget_cardinality_softdeadline_ttl() -> int:
# This is a much shorter deadline than the main cardinality TTL in the case softdeadline is hit
# We want to query again soon, but still avoid thundering herd problems.
return int(
random.uniform(
_WIDGET_QUERY_CARDINALITY_SOFT_DEADLINE_TTL,
_WIDGET_QUERY_CARDINALITY_SOFT_DEADLINE_TTL * 1.5,
)
)
def _is_widget_query_low_cardinality(widget_query: DashboardWidgetQuery, project: Project) -> bool:
"""
Checks cardinality of existing widget queries before allowing the metric spec, so that
group by clauses with high-cardinality tags are not added to the on_demand metric.
New queries will be checked upon creation and not allowed at that time.
"""
params: ParamsType = {
"statsPeriod": "30m",
"project_objects": [project],
"organization_id": project.organization_id, # Organization id has to be specified to not violate allocation policy.
}
start, end = get_date_range_from_params(params)
params["start"] = start
params["end"] = end
metrics.incr("on_demand_metrics.cardinality_check")
query_killswitch = options.get("on_demand.max_widget_cardinality.killswitch")
if query_killswitch:
return True
# No columns or only errors means no high-cardinality tags.
if not widget_query.columns or "event.type:error" in widget_query.conditions:
metrics.incr("on_demand_metrics.cardinality_check.not_applicable")
return True
max_cardinality_allowed = options.get("on_demand.max_widget_cardinality.count")
cache_key = f"check-widget-query-cardinality:{widget_query.id}"
cardinality_allowed = cache.get(cache_key)
if cardinality_allowed is not None:
metrics.incr(
"on_demand_metrics.cardinality_check.using_cache",
tags={"low_cardinality": cardinality_allowed},
)
return cardinality_allowed
unique_columns = [
f"count_unique({column})"
for column in widget_query.columns
if not fields.is_function(column)
]
query_builder = DiscoverQueryBuilder(
dataset=Dataset.Discover,
params=params,
selected_columns=unique_columns,
config=QueryBuilderConfig(
transform_alias_to_input_format=True,
),
)
with sentry_sdk.isolation_scope() as scope:
metrics.incr("on_demand_metrics.cardinality_check.query")
scope.set_tag("widget_query.widget_id", widget_query.id)
scope.set_tag("widget_query.org_id", project.organization_id)
scope.set_tag("widget_query.conditions", widget_query.conditions)
try:
results = query_builder.run_query(Referrer.METRIC_EXTRACTION_CARDINALITY_CHECK.value)
processed_results = query_builder.process_results(results)
except ProcessingDeadlineExceeded as error:
scope.set_tag("widget_soft_deadline", True)
sentry_sdk.capture_exception(error)
# We're setting a much shorter cache timeout here since this is essentially a permissive 'unknown' state
cache.set(cache_key, True, timeout=_get_widget_cardinality_softdeadline_ttl())
return True
except Exception as error:
sentry_sdk.capture_exception(error)
cache.set(cache_key, False, timeout=_get_widget_cardinality_query_ttl())
return False
try:
for index, column in enumerate(unique_columns):
count = processed_results["data"][0][unique_columns[index]]
if count > max_cardinality_allowed:
cache.set(cache_key, False, timeout=_get_widget_cardinality_query_ttl())
scope.set_tag("widget_query.column_name", column)
scope.set_extra("widget_query.column_count", count)
scope.set_extra("widget_query.id", widget_query.id)
raise HighCardinalityWidgetException()
except HighCardinalityWidgetException as error:
sentry_sdk.capture_message(str(error))
return False
cache.set(cache_key, True)
return True
def _convert_aggregate_and_query_to_metrics(
project: Project,
dataset: str,
aggregate: str,
query: str,
environment: str | None,
prefilling: bool,
prefilling_for_deprecation: bool,
spec_type: MetricSpecType = MetricSpecType.SIMPLE_QUERY,
groupbys: Sequence[str] | None = None,
organization_bulk_query_cache: dict[int, dict[str, bool]] | None = None,
) -> Sequence[HashedMetricSpec] | None:
"""
Converts an aggregate and a query to a metric spec with its hash value.
Extra metric specs will be returned if we need to maintain various versions of it.
This makes it easier to maintain multiple spec versions when a mistake is made.
"""
# We can avoid injection of the environment in the query, since it's supported by standard, thus it won't change
# the supported state of a query, since if it's standard, and we added environment it will still be standard
# and if it's on demand, it will always be on demand irrespectively of what we add.
if not should_use_on_demand_metrics(
dataset,
aggregate,
query,
groupbys,
prefilling,
organization_bulk_query_cache,
prefilling_for_deprecation=prefilling_for_deprecation,
):
return None
metric_specs_and_hashes = []
extra = {
"dataset": dataset,
"aggregate": aggregate,
"query": query,
"groupbys": groupbys,
}
with sentry_sdk.start_span(op="converting_aggregate_and_query") as span:
span.set_data("widget_query_args", {"query": query, "aggregate": aggregate})
# Create as many specs as we support
for spec_version in OnDemandMetricSpecVersioning.get_spec_versions():
try:
on_demand_spec = OnDemandMetricSpec(
field=aggregate,
query=query,
environment=environment,
groupbys=groupbys,
spec_type=spec_type,
spec_version=spec_version,
)
metric_spec = on_demand_spec.to_metric_spec(project)
# TODO: switch to validate_rule_condition
if (condition := metric_spec.get("condition")) is not None:
validate_sampling_condition(json.dumps(condition))
else:
metrics.incr(
"on_demand_metrics.missing_condition_spec", tags={"prefilling": prefilling}
)
metric_specs_and_hashes.append(
(on_demand_spec.query_hash, metric_spec, spec_version)
)
except ValueError:
# raised by validate_sampling_condition or metric_spec lacking "condition"
metrics.incr(
"on_demand_metrics.invalid_metric_spec", tags={"prefilling": prefilling}
)
logger.exception("Invalid on-demand metric spec", extra=extra)
except OnDemandMetricSpecError:
metrics.incr("on_demand_metrics.invalid_metric_spec.other")
logger.warning(
"Failed on-demand metric spec creation due to specification error.", extra=extra
)
except Exception:
# Since prefilling might include several non-ondemand-compatible alerts, we want to not trigger errors in the
metrics.incr("on_demand_metrics.invalid_metric_spec.other")
logger.exception("Failed on-demand metric spec creation.", extra=extra)
return metric_specs_and_hashes
# CONDITIONAL TAGGING
|
MetricExtractionConfig
|
python
|
openai__openai-python
|
src/openai/types/video_download_content_params.py
|
{
"start": 216,
"end": 405
}
|
class ____(TypedDict, total=False):
variant: Literal["video", "thumbnail", "spritesheet"]
"""Which downloadable asset to return. Defaults to the MP4 video."""
|
VideoDownloadContentParams
|
python
|
kamyu104__LeetCode-Solutions
|
Python/intersection-of-two-arrays-ii.py
|
{
"start": 1459,
"end": 2512
}
|
class ____(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
if len(nums1) > len(nums2):
return self.intersect(nums2, nums1)
def binary_search(compare, nums, left, right, target):
while left < right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid
else:
left = mid + 1
return left
nums1.sort(), nums2.sort() # Make sure it is sorted, doesn't count in time.
res = []
left = 0
for i in nums1:
left = binary_search(lambda x, y: x >= y, nums2, left, len(nums2), i)
if left != len(nums2) and nums2[left] == i:
res += i,
left += 1
return res
# If the given array is already sorted, and the memory is limited or m ~ n.
# Time: O(m + n)
# Space: O(1)
# Two pointers solution.
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 152745,
"end": 153866
}
|
class ____(sgqlc.types.Input):
"""The filters that are available when fetching check runs."""
__schema__ = github_schema
__field_names__ = ("check_type", "app_id", "check_name", "status", "statuses", "conclusions")
check_type = sgqlc.types.Field(CheckRunType, graphql_name="checkType")
"""Filters the check runs by this type."""
app_id = sgqlc.types.Field(Int, graphql_name="appId")
"""Filters the check runs created by this application ID."""
check_name = sgqlc.types.Field(String, graphql_name="checkName")
"""Filters the check runs by this name."""
status = sgqlc.types.Field(CheckStatusState, graphql_name="status")
"""Filters the check runs by this status. Superceded by statuses."""
statuses = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(CheckStatusState)), graphql_name="statuses")
"""Filters the check runs by this status. Overrides status."""
conclusions = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(CheckConclusionState)), graphql_name="conclusions")
"""Filters the check runs by these conclusions."""
|
CheckRunFilter
|
python
|
getsentry__sentry
|
src/sentry/models/pullrequest.py
|
{
"start": 6548,
"end": 6913
}
|
class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
pull_request = FlexibleForeignKey("sentry.PullRequest")
commit = FlexibleForeignKey("sentry.Commit", db_constraint=False)
class Meta:
app_label = "sentry"
db_table = "sentry_pullrequest_commit"
unique_together = (("pull_request", "commit"),)
|
PullRequestCommit
|
python
|
django__django
|
tests/generic_relations/models.py
|
{
"start": 2832,
"end": 2965
}
|
class ____(models.Model):
has_tail = models.BooleanField(default=False)
objects = GeckoManager()
# To test fix for #11263
|
Gecko
|
python
|
getsentry__sentry
|
src/sentry/preprod/api/endpoints/project_preprod_artifact_delete.py
|
{
"start": 751,
"end": 4614
}
|
class ____(PreprodArtifactEndpoint):
owner = ApiOwner.EMERGE_TOOLS
publish_status = {
"DELETE": ApiPublishStatus.EXPERIMENTAL,
}
def delete(
self,
request: Request,
project: Project,
head_artifact_id: int,
head_artifact: PreprodArtifact,
) -> Response:
"""Delete a preprod artifact and all associated data"""
if not features.has(
"organizations:preprod-frontend-routes", project.organization, actor=request.user
):
return Response({"error": "Feature not enabled"}, status=403)
analytics.record(
PreprodArtifactApiDeleteEvent(
organization_id=project.organization_id,
project_id=project.id,
user_id=request.user.id,
artifact_id=str(head_artifact_id),
)
)
try:
with transaction.atomic(using=router.db_for_write(PreprodArtifact)):
# Delete dependent files, these do not have cascade deletes so have to do manually
files_deleted_count = self._delete_artifact_files(head_artifact)
# Delete the actual artifact record (this will cascade delete size metrics and installable artifacts)
deleted_count, deleted_models = head_artifact.delete()
size_metrics_count = deleted_models.get("preprod.PreprodArtifactSizeMetrics", 0)
installable_count = deleted_models.get("preprod.InstallablePreprodArtifact", 0)
logger.info(
"preprod_artifact.deleted",
extra={
"artifact_id": int(head_artifact_id),
"user_id": request.user.id,
"files_count": files_deleted_count,
"deleted_count": deleted_count,
"size_metrics_count": size_metrics_count,
"installable_count": installable_count,
},
)
return Response(
{
"success": True,
"message": f"Artifact {head_artifact_id} deleted successfully.",
"artifact_id": str(head_artifact_id),
"files_deleted_count": files_deleted_count,
"size_metrics_deleted": size_metrics_count,
"installable_artifacts_deleted": installable_count,
}
)
except Exception:
logger.exception(
"preprod_artifact.delete_failed",
extra={"artifact_id": int(head_artifact_id), "user_id": request.user.id},
)
return Response(
{
"success": False,
"error": "Internal error deleting artifact.",
},
status=500,
)
def _delete_artifact_files(self, preprod_artifact: PreprodArtifact) -> int:
file_ids_to_delete = []
# Collect all file IDs to delete first so we can batch delete them
if preprod_artifact.file_id:
file_ids_to_delete.append(preprod_artifact.file_id)
if preprod_artifact.installable_app_file_id:
file_ids_to_delete.append(preprod_artifact.installable_app_file_id)
# Collect size analysis file IDs
size_metrics = PreprodArtifactSizeMetrics.objects.filter(preprod_artifact=preprod_artifact)
for size_metric in size_metrics:
if size_metric.analysis_file_id:
file_ids_to_delete.append(size_metric.analysis_file_id)
# Batch delete all files
files_deleted_count = 0
if file_ids_to_delete:
files_deleted_count, _ = File.objects.filter(id__in=file_ids_to_delete).delete()
return files_deleted_count
|
ProjectPreprodArtifactDeleteEndpoint
|
python
|
psf__black
|
tests/data/cases/dummy_implementations.py
|
{
"start": 981,
"end": 1137
}
|
class ____:
async def async_method(self):
...
async def async_function(self):
...
@decorated
async def async_function(self):
...
|
AsyncCls
|
python
|
openai__openai-python
|
src/openai/types/shared/custom_tool_input_format.py
|
{
"start": 299,
"end": 402
}
|
class ____(BaseModel):
type: Literal["text"]
"""Unconstrained text format. Always `text`."""
|
Text
|
python
|
apache__airflow
|
airflow-ctl/tests/airflow_ctl/api/test_operations.py
|
{
"start": 39251,
"end": 40337
}
|
class ____:
job_response = JobResponse(
id=1,
dag_id="dag_id",
state="state",
job_type="job_type",
start_date=datetime.datetime(2024, 12, 31, 23, 59, 59),
end_date=datetime.datetime(2025, 1, 1, 0, 0, 0),
latest_heartbeat=datetime.datetime(2025, 1, 1, 0, 0, 0),
executor_class="LocalExecutor",
hostname="hostname",
unixname="unixname",
)
job_collection_response = JobCollectionResponse(
jobs=[job_response],
total_entries=1,
)
def test_list(self):
def handle_request(request: httpx.Request) -> httpx.Response:
assert request.url.path == "/api/v2/jobs"
return httpx.Response(200, json=json.loads(self.job_collection_response.model_dump_json()))
client = make_api_client(transport=httpx.MockTransport(handle_request))
response = client.jobs.list(
job_type="job_type",
hostname="hostname",
is_alive=True,
)
assert response == self.job_collection_response
|
TestJobsOperations
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/tuple7.py
|
{
"start": 1071,
"end": 1154
}
|
class ____(Generic[_T]):
def __init__(self):
self._x: tuple[_T, ...] = ()
|
X
|
python
|
spack__spack
|
lib/spack/spack/error.py
|
{
"start": 6400,
"end": 6513
}
|
class ____(SpackError):
"""Raised if something goes wrong when probing or querying a compiler."""
|
CompilerError
|
python
|
squidfunk__mkdocs-material
|
material/plugins/tags/config.py
|
{
"start": 1713,
"end": 3859
}
|
class ____(Config):
enabled = Type(bool, default = True)
# Settings for filtering
filters = SubConfig(FilterConfig)
# Settings for tags
tags = Type(bool, default = True)
tags_slugify = Type(Callable, default = slugify(case = "lower"))
tags_slugify_separator = Type(str, default = "-")
tags_slugify_format = Type(str, default = "tag:{slug}")
tags_hierarchy = Type(bool, default = False)
tags_hierarchy_separator = Type(str, default = "/")
tags_sort_by = Type(Callable, default = tag_name)
tags_sort_reverse = Type(bool, default = False)
tags_name_property = Type(str, default = "tags")
tags_name_variable = Type(str, default = "tags")
tags_allowed = TagSet()
# Settings for listings
listings = Type(bool, default = True)
listings_map = DictOfItems(SubConfig(ListingConfig), default = {})
listings_sort_by = Type(Callable, default = item_title)
listings_sort_reverse = Type(bool, default = False)
listings_tags_sort_by = Type(Callable, default = tag_name)
listings_tags_sort_reverse = Type(bool, default = False)
listings_directive = Type(str, default = "material/tags")
listings_layout = Type(str, default = "default")
listings_toc = Type(bool, default = True)
# Settings for shadow tags
shadow = Type(bool, default = False)
shadow_on_serve = Type(bool, default = True)
shadow_tags = TagSet()
shadow_tags_prefix = Type(str, default = "")
shadow_tags_suffix = Type(str, default = "")
# Settings for export
export = Type(bool, default = True)
export_file = Type(str, default = "tags.json")
export_only = Type(bool, default = False)
# Deprecated settings
tags_compare = Deprecated(moved_to = "tags_sort_by")
tags_compare_reverse = Deprecated(moved_to = "tags_sort_reverse")
tags_pages_compare = Deprecated(moved_to = "listings_sort_by")
tags_pages_compare_reverse = Deprecated(moved_to = "listings_sort_reverse")
tags_file = Deprecated(option_type = Type(str))
tags_extra_files = Deprecated(
option_type = DictOfItems(ListOfItems(Type(str)), default = {})
)
|
TagsConfig
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.